-
Notifications
You must be signed in to change notification settings - Fork 28.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[SPARK-18278] [Scheduler] Spark on Kubernetes - Basic Scheduler Backend #19468
Changes from all commits
f6fdd6a
75e31a9
cf82b21
488c535
82b79a7
c052212
c565c9f
2fb596d
992acbe
b0a5839
a4f9797
2b5dcac
018f4d8
4b32134
6cf4ed7
1f271be
71a971f
0ab9ca7
7f14b71
7afce3f
b75b413
3b587b4
cb12fec
ae396cf
f8e3249
a44c29e
4bed817
c386186
b85cfc4
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,47 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
package org.apache.spark.scheduler.cluster | ||
|
||
import org.apache.spark.SparkConf | ||
import org.apache.spark.internal.config.{DYN_ALLOCATION_MAX_EXECUTORS, DYN_ALLOCATION_MIN_EXECUTORS, EXECUTOR_INSTANCES} | ||
import org.apache.spark.util.Utils | ||
|
||
private[spark] object SchedulerBackendUtils { | ||
val DEFAULT_NUMBER_EXECUTORS = 2 | ||
|
||
/** | ||
* Getting the initial target number of executors depends on whether dynamic allocation is | ||
* enabled. | ||
* If not using dynamic allocation it gets the number of executors requested by the user. | ||
*/ | ||
def getInitialTargetExecutorNumber( | ||
conf: SparkConf, | ||
numExecutors: Int = DEFAULT_NUMBER_EXECUTORS): Int = { | ||
if (Utils.isDynamicAllocationEnabled(conf)) { | ||
val minNumExecutors = conf.get(DYN_ALLOCATION_MIN_EXECUTORS) | ||
val initialNumExecutors = Utils.getDynamicAllocationInitialExecutors(conf) | ||
val maxNumExecutors = conf.get(DYN_ALLOCATION_MAX_EXECUTORS) | ||
require(initialNumExecutors >= minNumExecutors && initialNumExecutors <= maxNumExecutors, | ||
s"initial executor number $initialNumExecutors must between min executor number " + | ||
s"$minNumExecutors and max executor number $maxNumExecutors") | ||
|
||
initialNumExecutors | ||
} else { | ||
conf.get(EXECUTOR_INSTANCES).getOrElse(numExecutors) | ||
} | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -531,6 +531,14 @@ def __hash__(self): | |
sbt_test_goals=["mesos/test"] | ||
) | ||
|
||
kubernetes = Module( | ||
name="kubernetes", | ||
dependencies=[], | ||
source_file_regexes=["resource-managers/kubernetes/core"], | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. remove 'core' |
||
build_profile_flags=["-Pkubernetes"], | ||
sbt_test_goals=["kubernetes/test"] | ||
) | ||
|
||
# The root module is a dummy module which is used to run all of the tests. | ||
# No other modules should directly depend on this module. | ||
root = Module( | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2648,6 +2648,13 @@ | |
</modules> | ||
</profile> | ||
|
||
<profile> | ||
<id>kubernetes</id> | ||
<modules> | ||
<module>resource-managers/kubernetes/core</module> | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. both yarn and meson don't have a sub-directory called There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. See #19468 (comment) |
||
</modules> | ||
</profile> | ||
|
||
<profile> | ||
<id>hive-thriftserver</id> | ||
<modules> | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,100 @@ | ||
<?xml version="1.0" encoding="UTF-8"?> | ||
<!-- | ||
~ Licensed to the Apache Software Foundation (ASF) under one or more | ||
~ contributor license agreements. See the NOTICE file distributed with | ||
~ this work for additional information regarding copyright ownership. | ||
~ The ASF licenses this file to You under the Apache License, Version 2.0 | ||
~ (the "License"); you may not use this file except in compliance with | ||
~ the License. You may obtain a copy of the License at | ||
~ | ||
~ http://www.apache.org/licenses/LICENSE-2.0 | ||
~ | ||
~ Unless required by applicable law or agreed to in writing, software | ||
~ distributed under the License is distributed on an "AS IS" BASIS, | ||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
~ See the License for the specific language governing permissions and | ||
~ limitations under the License. | ||
--> | ||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> | ||
<modelVersion>4.0.0</modelVersion> | ||
<parent> | ||
<groupId>org.apache.spark</groupId> | ||
<artifactId>spark-parent_2.11</artifactId> | ||
<version>2.3.0-SNAPSHOT</version> | ||
<relativePath>../../../pom.xml</relativePath> | ||
</parent> | ||
|
||
<artifactId>spark-kubernetes_2.11</artifactId> | ||
<packaging>jar</packaging> | ||
<name>Spark Project Kubernetes</name> | ||
<properties> | ||
<sbt.project.name>kubernetes</sbt.project.name> | ||
<kubernetes.client.version>3.0.0</kubernetes.client.version> | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Last time I asked about the client version, there were concerns regarding maturity/stability of 3.x compared to the 2.2 (iirc) version which was in use - where they resolved ? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We have been using version 3.0.0 in our forked repo for a couple of months now. We haven't seen any issue with it. |
||
</properties> | ||
|
||
<dependencies> | ||
<dependency> | ||
<groupId>org.apache.spark</groupId> | ||
<artifactId>spark-core_${scala.binary.version}</artifactId> | ||
<version>${project.version}</version> | ||
</dependency> | ||
|
||
<dependency> | ||
<groupId>org.apache.spark</groupId> | ||
<artifactId>spark-core_${scala.binary.version}</artifactId> | ||
<version>${project.version}</version> | ||
<type>test-jar</type> | ||
<scope>test</scope> | ||
</dependency> | ||
|
||
<dependency> | ||
<groupId>io.fabric8</groupId> | ||
<artifactId>kubernetes-client</artifactId> | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. these dependencies need to be listed, please see There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The dependency needs to be added to There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Listed in f8e3249. |
||
<version>${kubernetes.client.version}</version> | ||
<exclusions> | ||
<exclusion> | ||
<groupId>com.fasterxml.jackson.core</groupId> | ||
<artifactId>*</artifactId> | ||
</exclusion> | ||
<exclusion> | ||
<groupId>com.fasterxml.jackson.dataformat</groupId> | ||
<artifactId>jackson-dataformat-yaml</artifactId> | ||
</exclusion> | ||
</exclusions> | ||
</dependency> | ||
|
||
<!-- Required by kubernetes-client but we exclude it --> | ||
<dependency> | ||
<groupId>com.fasterxml.jackson.dataformat</groupId> | ||
<artifactId>jackson-dataformat-yaml</artifactId> | ||
<version>${fasterxml.jackson.version}</version> | ||
</dependency> | ||
|
||
<!-- Explicitly depend on shaded dependencies from the parent, since shaded deps aren't transitive --> | ||
<dependency> | ||
<groupId>com.google.guava</groupId> | ||
<artifactId>guava</artifactId> | ||
</dependency> | ||
<!-- End of shaded deps. --> | ||
|
||
<dependency> | ||
<groupId>org.mockito</groupId> | ||
<artifactId>mockito-core</artifactId> | ||
<scope>test</scope> | ||
</dependency> | ||
|
||
<dependency> | ||
<groupId>com.squareup.okhttp3</groupId> | ||
<artifactId>okhttp</artifactId> | ||
<version>3.8.1</version> | ||
</dependency> | ||
|
||
</dependencies> | ||
|
||
|
||
<build> | ||
<outputDirectory>target/scala-${scala.binary.version}/classes</outputDirectory> | ||
<testOutputDirectory>target/scala-${scala.binary.version}/test-classes</testOutputDirectory> | ||
</build> | ||
|
||
</project> |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,123 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
package org.apache.spark.deploy.k8s | ||
|
||
import org.apache.spark.internal.Logging | ||
import org.apache.spark.internal.config.ConfigBuilder | ||
import org.apache.spark.network.util.ByteUnit | ||
|
||
private[spark] object Config extends Logging { | ||
|
||
val KUBERNETES_NAMESPACE = | ||
ConfigBuilder("spark.kubernetes.namespace") | ||
.doc("The namespace that will be used for running the driver and executor pods. When using " + | ||
"spark-submit in cluster mode, this can also be passed to spark-submit via the " + | ||
"--kubernetes-namespace command line argument.") | ||
.stringConf | ||
.createWithDefault("default") | ||
|
||
val EXECUTOR_DOCKER_IMAGE = | ||
ConfigBuilder("spark.kubernetes.executor.docker.image") | ||
.doc("Docker image to use for the executors. Specify this using the standard Docker tag " + | ||
"format.") | ||
.stringConf | ||
.createOptional | ||
|
||
val DOCKER_IMAGE_PULL_POLICY = | ||
ConfigBuilder("spark.kubernetes.docker.image.pullPolicy") | ||
.doc("Kubernetes image pull policy. Valid values are Always, Never, and IfNotPresent.") | ||
.stringConf | ||
.checkValues(Set("Always", "Never", "IfNotPresent")) | ||
.createWithDefault("IfNotPresent") | ||
|
||
val APISERVER_AUTH_DRIVER_CONF_PREFIX = | ||
"spark.kubernetes.authenticate.driver" | ||
val APISERVER_AUTH_DRIVER_MOUNTED_CONF_PREFIX = | ||
"spark.kubernetes.authenticate.driver.mounted" | ||
val OAUTH_TOKEN_CONF_SUFFIX = "oauthToken" | ||
val OAUTH_TOKEN_FILE_CONF_SUFFIX = "oauthTokenFile" | ||
val CLIENT_KEY_FILE_CONF_SUFFIX = "clientKeyFile" | ||
val CLIENT_CERT_FILE_CONF_SUFFIX = "clientCertFile" | ||
val CA_CERT_FILE_CONF_SUFFIX = "caCertFile" | ||
|
||
val KUBERNETES_SERVICE_ACCOUNT_NAME = | ||
ConfigBuilder(s"$APISERVER_AUTH_DRIVER_CONF_PREFIX.serviceAccountName") | ||
.doc("Service account that is used when running the driver pod. The driver pod uses " + | ||
"this service account when requesting executor pods from the API server. If specific " + | ||
"credentials are given for the driver pod to use, the driver will favor " + | ||
"using those credentials instead.") | ||
.stringConf | ||
.createOptional | ||
|
||
// Note that while we set a default for this when we start up the | ||
// scheduler, the specific default value is dynamically determined | ||
// based on the executor memory. | ||
val KUBERNETES_EXECUTOR_MEMORY_OVERHEAD = | ||
ConfigBuilder("spark.kubernetes.executor.memoryOverhead") | ||
.doc("The amount of off-heap memory (in megabytes) to be allocated per executor. This " + | ||
"is memory that accounts for things like VM overheads, interned strings, other native " + | ||
"overheads, etc. This tends to grow with the executor size. (typically 6-10%).") | ||
.bytesConf(ByteUnit.MiB) | ||
.createOptional | ||
|
||
val KUBERNETES_EXECUTOR_LABEL_PREFIX = "spark.kubernetes.executor.label." | ||
val KUBERNETES_EXECUTOR_ANNOTATION_PREFIX = "spark.kubernetes.executor.annotation." | ||
|
||
val KUBERNETES_DRIVER_POD_NAME = | ||
ConfigBuilder("spark.kubernetes.driver.pod.name") | ||
.doc("Name of the driver pod.") | ||
.stringConf | ||
.createOptional | ||
|
||
val KUBERNETES_EXECUTOR_POD_NAME_PREFIX = | ||
ConfigBuilder("spark.kubernetes.executor.podNamePrefix") | ||
.doc("Prefix to use in front of the executor pod names.") | ||
.internal() | ||
.stringConf | ||
.createWithDefault("spark") | ||
|
||
val KUBERNETES_ALLOCATION_BATCH_SIZE = | ||
ConfigBuilder("spark.kubernetes.allocation.batch.size") | ||
.doc("Number of pods to launch at once in each round of executor allocation.") | ||
.intConf | ||
.checkValue(value => value > 0, "Allocation batch size should be a positive integer") | ||
.createWithDefault(5) | ||
|
||
val KUBERNETES_ALLOCATION_BATCH_DELAY = | ||
ConfigBuilder("spark.kubernetes.allocation.batch.delay") | ||
.doc("Number of seconds to wait between each round of executor allocation.") | ||
.longConf | ||
.checkValue(value => value > 0, "Allocation batch delay should be a positive integer") | ||
.createWithDefault(1) | ||
|
||
val KUBERNETES_EXECUTOR_LIMIT_CORES = | ||
ConfigBuilder("spark.kubernetes.executor.limit.cores") | ||
.doc("Specify the hard cpu limit for a single executor pod") | ||
.stringConf | ||
.createOptional | ||
|
||
val KUBERNETES_EXECUTOR_LOST_REASON_CHECK_MAX_ATTEMPTS = | ||
ConfigBuilder("spark.kubernetes.executor.lostCheck.maxAttempts") | ||
.doc("Maximum number of attempts allowed for checking the reason of an executor loss " + | ||
"before it is assumed that the executor failed.") | ||
.intConf | ||
.checkValue(value => value > 0, "Maximum attempts of checks of executor lost reason " + | ||
"must be a positive integer") | ||
.createWithDefault(10) | ||
|
||
val KUBERNETES_NODE_SELECTOR_PREFIX = "spark.kubernetes.node.selector." | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should we remove this val from
YarnSparkHadoopUtil
?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done