Skip to content
Closed
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s

import io.fabric8.kubernetes.api.model.Pod

import org.apache.spark.SparkConf
import org.apache.spark.deploy.k8s.Config._
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.deploy.k8s.submit.{JavaMainAppResource, MainAppResource}
import org.apache.spark.internal.config.ConfigEntry

private[spark] sealed trait KubernetesRoleSpecificConf

private[spark] case class KubernetesDriverSpecificConf(
mainAppResource: Option[MainAppResource],
mainClass: String,
appName: String,
appArgs: Seq[String],
appId: String) extends KubernetesRoleSpecificConf

private[spark] case class KubernetesExecutorSpecificConf(
executorId: String, driverPod: Pod)
extends KubernetesRoleSpecificConf

private[spark] class KubernetesConf[T <: KubernetesRoleSpecificConf](
private val sparkConf: SparkConf,
val roleSpecificConf: T,
val appResourceNamePrefix: String,
val appId: String,
val roleLabels: Map[String, String],
val roleAnnotations: Map[String, String],
val roleSecretNamesToMountPaths: Map[String, String]) {

def namespace(): String = sparkConf.get(KUBERNETES_NAMESPACE)

def sparkJars(): Seq[String] = sparkConf
.getOption("spark.jars")
.map(str => str.split(",").toSeq)
.getOrElse(Seq.empty[String])

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

additionalMainAppJar?

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Plan is to have the caller inject the main app resource into the spark.jars field.

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Change of plans - see KubernetesConf.createDriverConf.


def sparkFiles(): Seq[String] = sparkConf
.getOption("spark.files")
.map(str => str.split(",").toSeq)
.getOrElse(Seq.empty[String])

def driverCustomEnvs(): Seq[(String, String)] =
sparkConf.getAllWithPrefix(KUBERNETES_DRIVER_ENV_KEY).toSeq

def imagePullPolicy(): String = sparkConf.get(CONTAINER_IMAGE_PULL_POLICY)

def nodeSelector(): Map[String, String] =
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_NODE_SELECTOR_PREFIX)

def getSparkConf(): SparkConf = sparkConf.clone()

def get[T](config: ConfigEntry[T]): T = sparkConf.get(config)

def get(conf: String, defaultValue: String): String = sparkConf.get(conf, defaultValue)

def getOption(key: String): Option[String] = sparkConf.getOption(key)

}

private[spark] object KubernetesConf {
def createDriverConf(
sparkConf: SparkConf,
appName: String,
appResourceNamePrefix: String,
appId: String,
mainAppResource: Option[MainAppResource],
mainClass: String,
appArgs: Array[String]): KubernetesConf[KubernetesDriverSpecificConf] = {
val sparkConfWithMainAppJar = sparkConf.clone()
mainAppResource.foreach {
case JavaMainAppResource(res) =>
val previousJars = sparkConf
.getOption("spark.jars")
.map(_.split(","))
.getOrElse(Array.empty)
if (!previousJars.contains(res)) {
sparkConfWithMainAppJar.setJars(previousJars ++ Seq(res))
}
}
val driverCustomLabels = KubernetesUtils.parsePrefixedKeyValuePairs(
sparkConf,
KUBERNETES_DRIVER_LABEL_PREFIX)
require(!driverCustomLabels.contains(SPARK_APP_ID_LABEL), "Label with key " +
s"$SPARK_APP_ID_LABEL is not allowed as it is reserved for Spark bookkeeping " +
"operations.")
require(!driverCustomLabels.contains(SPARK_ROLE_LABEL), "Label with key " +
s"$SPARK_ROLE_LABEL is not allowed as it is reserved for Spark bookkeeping " +
"operations.")
val driverLabels = driverCustomLabels ++ Map(
SPARK_APP_ID_LABEL -> appId,
SPARK_ROLE_LABEL -> SPARK_POD_DRIVER_ROLE)
val driverAnnotations =
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_DRIVER_ANNOTATION_PREFIX)
val driverSecretNamesToMountPaths =
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_DRIVER_SECRETS_PREFIX)
new KubernetesConf(
sparkConfWithMainAppJar,
KubernetesDriverSpecificConf(
mainAppResource,
appName,
mainClass,
appArgs,
appId),
appResourceNamePrefix,
appId,
driverLabels,
driverAnnotations,
driverSecretNamesToMountPaths)
}

def createExecutorConf(
sparkConf: SparkConf,
executorId: String,
appId: String,
driverPod: Pod): KubernetesConf[KubernetesExecutorSpecificConf] = {
val executorCustomLabels = KubernetesUtils.parsePrefixedKeyValuePairs(
sparkConf,
KUBERNETES_EXECUTOR_LABEL_PREFIX)
require(
!executorCustomLabels.contains(SPARK_APP_ID_LABEL),
s"Custom executor labels cannot contain $SPARK_APP_ID_LABEL as it is reserved for Spark.")
require(
!executorCustomLabels.contains(SPARK_EXECUTOR_ID_LABEL),
s"Custom executor labels cannot contain $SPARK_EXECUTOR_ID_LABEL as it is reserved for" +
" Spark.")
require(
!executorCustomLabels.contains(SPARK_ROLE_LABEL),
s"Custom executor labels cannot contain $SPARK_ROLE_LABEL as it is reserved for Spark.")
val executorLabels = Map(
SPARK_EXECUTOR_ID_LABEL -> executorId,
SPARK_APP_ID_LABEL -> appId,
SPARK_ROLE_LABEL -> SPARK_POD_EXECUTOR_ROLE) ++
executorCustomLabels
val executorAnnotations =
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_EXECUTOR_ANNOTATION_PREFIX)
val executorSecrets =
KubernetesUtils.parsePrefixedKeyValuePairs(sparkConf, KUBERNETES_EXECUTOR_SECRETS_PREFIX)
new KubernetesConf(
sparkConf.clone(),
KubernetesExecutorSpecificConf(executorId, driverPod),
sparkConf.get(KUBERNETES_EXECUTOR_POD_NAME_PREFIX),
appId,
executorLabels,
executorAnnotations,
executorSecrets)
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s

import io.fabric8.kubernetes.api.model.{ContainerBuilder, HasMetadata, PodBuilder}

private[k8s] case class KubernetesSpec(
pod: SparkPod,
additionalDriverKubernetesResources: Seq[HasMetadata],
podJavaSystemProperties: Map[String, String])

private[k8s] object KubernetesSpec {
def initialSpec(initialProps: Map[String, String]): KubernetesSpec = KubernetesSpec(
SparkPod.initialPod(),
Seq.empty,
initialProps)
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s

import io.fabric8.kubernetes.api.model.{Container, ContainerBuilder, Pod, PodBuilder}

private[k8s] case class SparkPod(pod: Pod, container: Container)

private[k8s] object SparkPod {
def initialPod(): SparkPod = {
SparkPod(
new PodBuilder().withNewMetadata().endMetadata().withNewSpec().endSpec().build(),
new ContainerBuilder().build())
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.features

import scala.collection.JavaConverters._
import scala.collection.mutable

import io.fabric8.kubernetes.api.model.{ContainerBuilder, EnvVarBuilder, EnvVarSourceBuilder, HasMetadata, PodBuilder, QuantityBuilder}

import org.apache.spark.SparkException
import org.apache.spark.deploy.k8s.{KubernetesConf, KubernetesDriverSpecificConf, KubernetesUtils, SparkPod}
import org.apache.spark.deploy.k8s.Config._
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.internal.config._
import org.apache.spark.launcher.SparkLauncher

private[spark] class BasicDriverFeatureStep(
kubernetesConf: KubernetesConf[KubernetesDriverSpecificConf])
extends KubernetesFeatureConfigStep {

private val driverPodName = kubernetesConf
.get(KUBERNETES_DRIVER_POD_NAME)
.getOrElse(s"${kubernetesConf.appResourceNamePrefix}-driver")

private val driverExtraClasspath = kubernetesConf.get(DRIVER_CLASS_PATH)

private val driverContainerImage = kubernetesConf
.get(DRIVER_CONTAINER_IMAGE)
.getOrElse(throw new SparkException("Must specify the driver container image"))

// CPU settings
private val driverCpuCores = kubernetesConf.getOption("spark.driver.cores").getOrElse("1")
private val driverLimitCores = kubernetesConf.get(KUBERNETES_DRIVER_LIMIT_CORES)

// Memory settings
private val driverMemoryMiB = kubernetesConf.get(DRIVER_MEMORY)
private val memoryOverheadMiB = kubernetesConf
.get(DRIVER_MEMORY_OVERHEAD)
.getOrElse(math.max((MEMORY_OVERHEAD_FACTOR * driverMemoryMiB).toInt, MEMORY_OVERHEAD_MIN_MIB))
private val driverMemoryWithOverheadMiB = driverMemoryMiB + memoryOverheadMiB

override def configurePod(pod: SparkPod): SparkPod = {
val driverExtraClasspathEnv = driverExtraClasspath.map { classPath =>
new EnvVarBuilder()
.withName(ENV_CLASSPATH)
.withValue(classPath)
.build()
}

val driverCustomEnvs = kubernetesConf.driverCustomEnvs()
.map { env =>
new EnvVarBuilder()
.withName(env._1)
.withValue(env._2)
.build()
}

val driverCpuQuantity = new QuantityBuilder(false)
.withAmount(driverCpuCores)
.build()
val driverMemoryQuantity = new QuantityBuilder(false)
.withAmount(s"${driverMemoryMiB}Mi")
.build()
val driverMemoryLimitQuantity = new QuantityBuilder(false)
.withAmount(s"${driverMemoryWithOverheadMiB}Mi")
.build()
val maybeCpuLimitQuantity = driverLimitCores.map { limitCores =>
("cpu", new QuantityBuilder(false).withAmount(limitCores).build())
}

val driverContainerWithoutArgs = new ContainerBuilder(pod.container)
.withName(DRIVER_CONTAINER_NAME)
.withImage(driverContainerImage)
.withImagePullPolicy(kubernetesConf.imagePullPolicy())
.addAllToEnv(driverCustomEnvs.asJava)
.addToEnv(driverExtraClasspathEnv.toSeq: _*)
.addNewEnv()
.withName(ENV_DRIVER_BIND_ADDRESS)
.withValueFrom(new EnvVarSourceBuilder()
.withNewFieldRef("v1", "status.podIP")
.build())
.endEnv()
.withNewResources()
.addToRequests("cpu", driverCpuQuantity)
.addToRequests("memory", driverMemoryQuantity)
.addToLimits("memory", driverMemoryLimitQuantity)
.addToLimits(maybeCpuLimitQuantity.toMap.asJava)
.endResources()
.addToArgs("driver")
.addToArgs("--properties-file", SPARK_CONF_PATH)
.addToArgs("--class", kubernetesConf.roleSpecificConf.mainClass)
// The user application jar is merged into the spark.jars list and managed through that
// property, so there is no need to reference it explicitly here.
.addToArgs(SparkLauncher.NO_RESOURCE)

val driverContainer = kubernetesConf.roleSpecificConf.appArgs.toList match {
case "" :: Nil | Nil => driverContainerWithoutArgs.build()
case resolvedAppArgs => driverContainerWithoutArgs.addToArgs(resolvedAppArgs: _*).build()
}

val driverPod = new PodBuilder(pod.pod)
.editOrNewMetadata()
.withName(driverPodName)
.addToLabels(kubernetesConf.roleLabels.asJava)
.addToAnnotations(kubernetesConf.roleAnnotations.asJava)
.endMetadata()
.withNewSpec()
.withRestartPolicy("Never")
.withNodeSelector(kubernetesConf.nodeSelector().asJava)
.endSpec()
.build()
SparkPod(driverPod, driverContainer)
}

override def getAdditionalPodSystemProperties(): Map[String, String] = {
val additionalProps = mutable.Map(
KUBERNETES_DRIVER_POD_NAME.key -> driverPodName,
"spark.app.id" -> kubernetesConf.appId,
KUBERNETES_EXECUTOR_POD_NAME_PREFIX.key -> kubernetesConf.appResourceNamePrefix,
KUBERNETES_DRIVER_SUBMIT_CHECK.key -> "true")

val resolvedSparkJars = KubernetesUtils.resolveFileUrisAndPath(
kubernetesConf.sparkJars())
val resolvedSparkFiles = KubernetesUtils.resolveFileUrisAndPath(
kubernetesConf.sparkFiles())
if (resolvedSparkJars.nonEmpty) {
additionalProps.put("spark.jars", resolvedSparkJars.mkString(","))
}
if (resolvedSparkFiles.nonEmpty) {
additionalProps.put("spark.files", resolvedSparkFiles.mkString(","))
}
additionalProps.toMap
}

override def getAdditionalKubernetesResources(): Seq[HasMetadata] = Seq.empty
}
Loading