diff --git a/CHANGELOG.md b/CHANGELOG.md index b1aad75f..3f55b6a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,12 +4,19 @@ All notable changes to this project will be documented in this file. ## [Unreleased] +### Changed + +- Various documentation of the CRD ([#319]). +- [BREAKING] Removed version field. Several attributes have been changed to mandatory. While this change is + technically breaking, existing Spark jobs would not have worked before as these attributes were necessary ([#319]). + ### Fixed - Add missing `deletecollection` RBAC permission for Spark drivers. Previously this caused confusing error messages in the spark driver log (`User "system:serviceaccount:default:my-spark-app" cannot deletecollection resource "configmaps" in API group "" in the namespace "default".`) ([#313]). [#313]: https://github.com/stackabletech/spark-k8s-operator/pull/313 +[#319]: https://github.com/stackabletech/spark-k8s-operator/pull/319 ## [23.11.0] - 2023-11-24 diff --git a/Cargo.lock b/Cargo.lock index d836e415..d6206a22 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -673,12 +673,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - [[package]] name = "hashbrown" version = "0.14.1" @@ -840,16 +834,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - [[package]] name = "indexmap" version = "2.0.2" @@ -857,7 +841,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897" dependencies = [ "equivalent", - "hashbrown 0.14.1", + "hashbrown", ] [[package]] @@ -1045,7 +1029,7 @@ dependencies = [ "backoff", "derivative", "futures 0.3.28", - "hashbrown 0.14.1", + "hashbrown", "json-patch", "k8s-openapi", "kube-client", @@ -1205,71 +1189,62 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "opentelemetry" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9591d937bc0e6d2feb6f71a559540ab300ea49955229c347a517a28d27784c54" +checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ - "opentelemetry_api", - "opentelemetry_sdk", + "futures-core", + "futures-sink", + "indexmap", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", + "urlencoding", ] [[package]] name = "opentelemetry-jaeger" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876958ba9084f390f913fcf04ddf7bbbb822898867bb0a51cc28f2b9e5c1b515" +checksum = "e617c66fd588e40e0dbbd66932fdc87393095b125d4459b1a3a10feb1712f8a1" dependencies = [ "async-trait", "futures-core", "futures-util", "opentelemetry", "opentelemetry-semantic-conventions", + "opentelemetry_sdk", "thrift", "tokio", ] [[package]] name = "opentelemetry-semantic-conventions" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73c9f9340ad135068800e7f1b24e9e09ed9e7143f5bf8518ded3d3ec69789269" +checksum = "f5774f1ef1f982ef2a447f6ee04ec383981a3ab99c8e77a1a7b30182e65bbc84" dependencies = [ "opentelemetry", ] -[[package]] -name = "opentelemetry_api" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a81f725323db1b1206ca3da8bb19874bbd3f57c3bcd59471bfb04525b265b9b" -dependencies = [ - "futures-channel", - "futures-util", - "indexmap 1.9.3", - "js-sys", - "once_cell", - "pin-project-lite", - "thiserror", - "urlencoding", -] - [[package]] name = "opentelemetry_sdk" -version = "0.20.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa8e705a0612d48139799fcbaba0d4a90f06277153e43dd2bdc16c6f0edd8026" +checksum = "968ba3f2ca03e90e5187f5e4f46c791ef7f2c163ae87789c8ce5f5ca3b7b7de5" dependencies = [ "async-trait", "crossbeam-channel", "futures-channel", "futures-executor", "futures-util", + "glob", "once_cell", - "opentelemetry_api", - "ordered-float 3.9.1", + "opentelemetry", + "ordered-float 4.2.0", "percent-encoding", "rand", - "regex", "thiserror", "tokio", "tokio-stream", @@ -1286,9 +1261,9 @@ dependencies = [ [[package]] name = "ordered-float" -version = "3.9.1" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a54938017eacd63036332b4ae5c8a49fc8c0c1d6d629893057e4f13609edd06" +checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e" dependencies = [ "num-traits", ] @@ -1802,7 +1777,7 @@ version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" dependencies = [ - "indexmap 2.0.2", + "indexmap", "itoa", "ryu", "serde", @@ -1823,7 +1798,7 @@ version = "0.9.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" dependencies = [ - "indexmap 2.0.2", + "indexmap", "itoa", "ryu", "serde", @@ -1945,8 +1920,8 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "stackable-operator" -version = "0.56.1" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.56.1#beeb39436024fa5f61d840402c26ee56fc5fbd29" +version = "0.58.1" +source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.58.1#ab309d577e1937834f2adcbcd647822aa9c2ae43" dependencies = [ "chrono", "clap", @@ -1961,10 +1936,12 @@ dependencies = [ "lazy_static", "opentelemetry", "opentelemetry-jaeger", + "opentelemetry_sdk", "product-config", "rand", "regex", "schemars", + "semver", "serde", "serde_json", "serde_yaml", @@ -1976,12 +1953,13 @@ dependencies = [ "tracing", "tracing-opentelemetry", "tracing-subscriber", + "url", ] [[package]] name = "stackable-operator-derive" -version = "0.56.1" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.56.1#beeb39436024fa5f61d840402c26ee56fc5fbd29" +version = "0.58.1" +source = "git+https://github.com/stackabletech/operator-rs.git?tag=0.58.1#ab309d577e1937834f2adcbcd647822aa9c2ae43" dependencies = [ "darling", "proc-macro2", @@ -2248,7 +2226,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.0.2", + "indexmap", "serde", "serde_spanned", "toml_datetime", @@ -2362,20 +2340,33 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + [[package]] name = "tracing-opentelemetry" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75327c6b667828ddc28f5e3f169036cb793c3f588d83bf0f262a7f062ffed3c8" +checksum = "c67ac25c5407e7b961fafc6f7e9aa5958fd297aada2d20fa2ae1737357e55596" dependencies = [ + "js-sys", "once_cell", "opentelemetry", "opentelemetry_sdk", "smallvec", "tracing", "tracing-core", - "tracing-log", + "tracing-log 0.2.0", "tracing-subscriber", + "web-time", ] [[package]] @@ -2393,7 +2384,7 @@ dependencies = [ "thread_local", "tracing", "tracing-core", - "tracing-log", + "tracing-log 0.1.3", ] [[package]] @@ -2582,6 +2573,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "web-time" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57099a701fb3a8043f993e8228dc24229c7b942e2b009a1b962e54489ba1d3bf" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index dc5d6550..fe5ad768 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_yaml = "0.9" snafu = "0.7" -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.56.1" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.58.1" } strum = { version = "0.25", features = ["derive"] } tokio = { version = "1.29", features = ["full"] } tracing = "0.1" diff --git a/deploy/helm/spark-k8s-operator/crds/crds.yaml b/deploy/helm/spark-k8s-operator/crds/crds.yaml index 581ea19e..2e4f7ce4 100644 --- a/deploy/helm/spark-k8s-operator/crds/crds.yaml +++ b/deploy/helm/spark-k8s-operator/crds/crds.yaml @@ -23,37 +23,52 @@ spec: description: Auto-generated derived type for SparkApplicationSpec via `CustomResource` properties: spec: + description: |- + A Spark cluster stacklet. This resource is managed by the Stackable operator for Apache Spark. Find more information on how to use it and the resources that the operator generates in the [operator documentation](https://docs.stackable.tech/home/nightly/spark-k8s/). + + The SparkApplication CRD looks a little different than the CRDs of the other products on the Stackable Data Platform. properties: args: + default: [] + description: Arguments passed directly to the job artifact. items: type: string - nullable: true type: array deps: - nullable: true + default: + requirements: [] + packages: [] + repositories: [] + excludePackages: [] + description: 'Job dependencies: a list of python packages that will be installed via pip, a list of packages or repositories that is passed directly to spark-submit, or a list of excluded packages (also passed directly to spark-submit).' properties: excludePackages: + default: [] + description: A list of excluded packages that is passed directly to `spark-submit`. items: type: string - nullable: true type: array packages: + default: [] + description: A list of packages that is passed directly to `spark-submit`. items: type: string - nullable: true type: array repositories: + default: [] + description: A list of repositories that is passed directly to `spark-submit`. items: type: string - nullable: true type: array requirements: + default: [] + description: 'Under the `requirements` you can specify Python dependencies that will be installed with `pip`. Example: `tabulate==0.8.9`' items: type: string - nullable: true type: array type: object driver: + description: The driver role specifies the configuration that, together with the driver pod template, is used by Spark to create driver pods. nullable: true properties: cliOverrides: @@ -70,6 +85,7 @@ spec: podAntiAffinity: null nodeAffinity: null nodeSelector: null + description: These configuration settings control [Pod placement](https://docs.stackable.tech/home/nightly/concepts/operations/pod_placement). properties: nodeAffinity: description: Node affinity is a group of node affinity scheduling rules. @@ -539,7 +555,7 @@ spec: default: enableVectorAgent: null containers: {} - description: Logging configuration + description: Logging configuration, learn more in the [logging concept documentation](https://docs.stackable.tech/home/nightly/concepts/logging). properties: containers: additionalProperties: @@ -612,10 +628,10 @@ spec: description: Configuration per logger type: object type: object - description: Log configuration per container + description: Log configuration per container. type: object enableVectorAgent: - description: Wether or not to deploy a container with the Vector log agent + description: Wether or not to deploy a container with the Vector log agent. nullable: true type: boolean type: object @@ -628,6 +644,7 @@ spec: min: null max: null storage: {} + description: Resource usage is configured here, this includes CPU usage, memory usage and disk storage usage, if this role needs any. properties: cpu: default: @@ -635,21 +652,22 @@ spec: max: null properties: max: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: The maximum amount of CPU cores that can be requested by Pods. Equivalent to the `limit` for Pod resource configuration. Cores are specified either as a decimal point number or as milli units. For example:`1.5` will be 1.5 cores, also written as `1500m`. nullable: true type: string min: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: The minimal amount of CPU cores that Pods need to run. Equivalent to the `request` for Pod resource configuration. Cores are specified either as a decimal point number or as milli units. For example:`1.5` will be 1.5 cores, also written as `1500m`. nullable: true type: string type: object memory: properties: limit: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: 'The maximum amount of memory that should be available to the Pod. Specified as a byte [Quantity](https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/), which means these suffixes are supported: E, P, T, G, M, k. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki. For example, the following represent roughly the same value: `128974848, 129e6, 129M, 128974848000m, 123Mi`' nullable: true type: string runtimeLimits: + description: Additional options that can be specified. type: object type: object storage: @@ -690,15 +708,17 @@ spec: type: string type: object default: {} + description: The `configOverrides` can be used to configure properties in product config files that are not exposed in the CRD. Read the [config overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#config-overrides) and consult the operator specific usage guide documentation for details on the available config files and settings for the specific product. type: object envOverrides: additionalProperties: type: string default: {} + description: '`envOverrides` configure environment variables to be set in the Pods. It is a map from strings to strings - environment variables and the value to set. Read the [environment variable overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#env-overrides) for more information and consult the operator specific usage guide to find out about the product specific environment variables that are available.' type: object podOverrides: default: {} - description: See PodTemplateSpec (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podtemplatespec-v1-core) for more details + description: In the `podOverrides` property you can define a [PodTemplateSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podtemplatespec-v1-core) to override any property that can be set on a Kubernetes Pod. Read the [Pod overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#pod-overrides) for more information. properties: metadata: properties: @@ -3496,6 +3516,8 @@ spec: type: object type: object env: + default: [] + description: A list of environment variables that will be set in the job pod and the driver and executor pod templates. items: description: EnvVar represents an environment variable present in a Container. properties: @@ -3569,9 +3591,9 @@ spec: required: - name type: object - nullable: true type: array executor: + description: The executor role specifies the configuration that, together with the driver pod template, is used by Spark to create the executor pods. nullable: true properties: cliOverrides: @@ -3588,6 +3610,7 @@ spec: podAntiAffinity: null nodeAffinity: null nodeSelector: null + description: These configuration settings control [Pod placement](https://docs.stackable.tech/home/nightly/concepts/operations/pod_placement). properties: nodeAffinity: description: Node affinity is a group of node affinity scheduling rules. @@ -4057,7 +4080,7 @@ spec: default: enableVectorAgent: null containers: {} - description: Logging configuration + description: Logging configuration, learn more in the [logging concept documentation](https://docs.stackable.tech/home/nightly/concepts/logging). properties: containers: additionalProperties: @@ -4130,10 +4153,10 @@ spec: description: Configuration per logger type: object type: object - description: Log configuration per container + description: Log configuration per container. type: object enableVectorAgent: - description: Wether or not to deploy a container with the Vector log agent + description: Wether or not to deploy a container with the Vector log agent. nullable: true type: boolean type: object @@ -4146,6 +4169,7 @@ spec: min: null max: null storage: {} + description: Resource usage is configured here, this includes CPU usage, memory usage and disk storage usage, if this role needs any. properties: cpu: default: @@ -4153,21 +4177,22 @@ spec: max: null properties: max: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: The maximum amount of CPU cores that can be requested by Pods. Equivalent to the `limit` for Pod resource configuration. Cores are specified either as a decimal point number or as milli units. For example:`1.5` will be 1.5 cores, also written as `1500m`. nullable: true type: string min: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: The minimal amount of CPU cores that Pods need to run. Equivalent to the `request` for Pod resource configuration. Cores are specified either as a decimal point number or as milli units. For example:`1.5` will be 1.5 cores, also written as `1500m`. nullable: true type: string type: object memory: properties: limit: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: 'The maximum amount of memory that should be available to the Pod. Specified as a byte [Quantity](https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/), which means these suffixes are supported: E, P, T, G, M, k. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki. For example, the following represent roughly the same value: `128974848, 129e6, 129M, 128974848000m, 123Mi`' nullable: true type: string runtimeLimits: + description: Additional options that can be specified. type: object type: object storage: @@ -4208,15 +4233,17 @@ spec: type: string type: object default: {} + description: The `configOverrides` can be used to configure properties in product config files that are not exposed in the CRD. Read the [config overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#config-overrides) and consult the operator specific usage guide documentation for details on the available config files and settings for the specific product. type: object envOverrides: additionalProperties: type: string default: {} + description: '`envOverrides` configure environment variables to be set in the Pods. It is a map from strings to strings - environment variables and the value to set. Read the [environment variable overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#env-overrides) for more information and consult the operator specific usage guide to find out about the product specific environment variables that are available.' type: object podOverrides: default: {} - description: See PodTemplateSpec (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podtemplatespec-v1-core) for more details + description: In the `podOverrides` property you can define a [PodTemplateSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podtemplatespec-v1-core) to override any property that can be set on a Kubernetes Pod. Read the [Pod overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#pod-overrides) for more information. properties: metadata: properties: @@ -7050,9 +7077,11 @@ spec: type: object type: object image: + description: User-supplied image containing spark-job dependencies that will be copied to the specified volume mount. See the [examples](https://docs.stackable.tech/home/nightly/spark-k8s/usage-guide/examples). nullable: true type: string job: + description: The job builds a spark-submit command, complete with arguments and referenced dependencies such as templates, and passes it on to Spark. nullable: true properties: cliOverrides: @@ -7067,7 +7096,7 @@ spec: default: enableVectorAgent: null containers: {} - description: Logging configuration + description: Logging configuration, learn more in the [logging concept documentation](https://docs.stackable.tech/home/nightly/concepts/logging). properties: containers: additionalProperties: @@ -7140,10 +7169,10 @@ spec: description: Configuration per logger type: object type: object - description: Log configuration per container + description: Log configuration per container. type: object enableVectorAgent: - description: Wether or not to deploy a container with the Vector log agent + description: Wether or not to deploy a container with the Vector log agent. nullable: true type: boolean type: object @@ -7156,6 +7185,7 @@ spec: min: null max: null storage: {} + description: Resource usage is configured here, this includes CPU usage, memory usage and disk storage usage, if this role needs any. properties: cpu: default: @@ -7163,21 +7193,22 @@ spec: max: null properties: max: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: The maximum amount of CPU cores that can be requested by Pods. Equivalent to the `limit` for Pod resource configuration. Cores are specified either as a decimal point number or as milli units. For example:`1.5` will be 1.5 cores, also written as `1500m`. nullable: true type: string min: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: The minimal amount of CPU cores that Pods need to run. Equivalent to the `request` for Pod resource configuration. Cores are specified either as a decimal point number or as milli units. For example:`1.5` will be 1.5 cores, also written as `1500m`. nullable: true type: string type: object memory: properties: limit: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: 'The maximum amount of memory that should be available to the Pod. Specified as a byte [Quantity](https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/), which means these suffixes are supported: E, P, T, G, M, k. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki. For example, the following represent roughly the same value: `128974848, 129e6, 129M, 128974848000m, 123Mi`' nullable: true type: string runtimeLimits: + description: Additional options that can be specified. type: object type: object storage: @@ -7190,15 +7221,17 @@ spec: type: string type: object default: {} + description: The `configOverrides` can be used to configure properties in product config files that are not exposed in the CRD. Read the [config overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#config-overrides) and consult the operator specific usage guide documentation for details on the available config files and settings for the specific product. type: object envOverrides: additionalProperties: type: string default: {} + description: '`envOverrides` configure environment variables to be set in the Pods. It is a map from strings to strings - environment variables and the value to set. Read the [environment variable overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#env-overrides) for more information and consult the operator specific usage guide to find out about the product specific environment variables that are available.' type: object podOverrides: default: {} - description: See PodTemplateSpec (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podtemplatespec-v1-core) for more details + description: In the `podOverrides` property you can define a [PodTemplateSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podtemplatespec-v1-core) to override any property that can be set on a Kubernetes Pod. Read the [Pod overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#pod-overrides) for more information. properties: metadata: properties: @@ -9996,6 +10029,7 @@ spec: type: object type: object logFileDirectory: + description: The log file directory definition used by the Spark history server. Currently only S3 buckets are supported. nullable: true oneOf: - required: @@ -10004,7 +10038,7 @@ spec: s3: properties: bucket: - description: Operators are expected to define fields for this type in order to work with S3 buckets. + description: An S3 bucket definition, it can either be a reference to an explicit S3Bucket object, or it can be an inline defintion of a bucket. Read the [S3 resources concept documentation](https://docs.stackable.tech/home/nightly/concepts/s3) to learn more. oneOf: - required: - inline @@ -10012,13 +10046,14 @@ spec: - reference properties: inline: - description: S3 bucket specification containing only the bucket name and an inlined or referenced connection specification. + description: An inline definition, containing the S3 bucket properties. properties: bucketName: + description: The name of the S3 bucket. nullable: true type: string connection: - description: Operators are expected to define fields for this type in order to work with S3 connections. + description: The definition of an S3 connection, either inline or as a reference. nullable: true oneOf: - required: @@ -10027,47 +10062,50 @@ spec: - reference properties: inline: - description: S3 connection definition as CRD. + description: Inline definition of an S3 connection. properties: accessStyle: - description: Which access style to use. Defaults to virtual hosted-style as most of the data products out there. Have a look at the official documentation on + description: Which access style to use. Defaults to virtual hosted-style as most of the data products out there. Have a look at the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html). enum: - Path - VirtualHosted nullable: true type: string credentials: - description: If the S3 uses authentication you have to specify you S3 credentials. In the most cases a SecretClass providing `accessKey` and `secretKey` is sufficient. + description: If the S3 uses authentication you have to specify you S3 credentials. In the most cases a [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) providing `accessKey` and `secretKey` is sufficient. nullable: true properties: scope: - description: '[Scope](https://docs.stackable.tech/secret-operator/scope.html) of the [SecretClass](https://docs.stackable.tech/secret-operator/secretclass.html)' + description: '[Scope](https://docs.stackable.tech/home/nightly/secret-operator/scope) of the [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass).' nullable: true properties: node: default: false + description: The node scope is resolved to the name of the Kubernetes Node object that the Pod is running on. This will typically be the DNS name of the node. type: boolean pod: default: false + description: The pod scope is resolved to the name of the Kubernetes Pod. This allows the secret to differentiate between StatefulSet replicas. type: boolean services: default: [] + description: The service scope allows Pod objects to specify custom scopes. This should typically correspond to Service objects that the Pod participates in. items: type: string type: array type: object secretClass: - description: '[SecretClass](https://docs.stackable.tech/secret-operator/secretclass.html) containing the LDAP bind credentials' + description: '[SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) containing the LDAP bind credentials.' type: string required: - secretClass type: object host: - description: Hostname of the S3 server without any protocol or port + description: 'Hostname of the S3 server without any protocol or port. For example: `west1.my-cloud.com`.' nullable: true type: string port: - description: Port the S3 server listens on. If not specified the products will determine the port to use. + description: Port the S3 server listens on. If not specified the product will determine the port to use. format: uint16 minimum: 0.0 nullable: true @@ -10077,7 +10115,7 @@ spec: nullable: true properties: verification: - description: The verification method used to verify the certificates of the server and/or the client + description: The verification method used to verify the certificates of the server and/or the client. oneOf: - required: - none @@ -10085,13 +10123,13 @@ spec: - server properties: none: - description: Use TLS but don't verify certificates + description: Use TLS but don't verify certificates. type: object server: - description: Use TLS and ca certificate to verify the server + description: Use TLS and a CA certificate to verify the server. properties: caCert: - description: Ca cert to verify the server + description: CA cert to verify the server. oneOf: - required: - webPki @@ -10099,10 +10137,10 @@ spec: - secretClass properties: secretClass: - description: Name of the SecretClass which will provide the ca cert. Note that a SecretClass does not need to have a key but can also work with just a ca cert. So if you got provided with a ca cert but don't have access to the key you can still use this method. + description: Name of the [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) which will provide the CA certificate. Note that a SecretClass does not need to have a key but can also work with just a CA certificate, so if you got provided with a CA cert but don't have access to the key you can still use this method. type: string webPki: - description: Use TLS and the ca certificates trusted by the common web browsers to verify the server. This can be useful when you e.g. use public AWS S3 or other public available services. + description: Use TLS and the CA certificates trusted by the common web browsers to verify the server. This can be useful when you e.g. use public AWS S3 or other public available services. type: object type: object required: @@ -10114,10 +10152,12 @@ spec: type: object type: object reference: + description: A reference to an S3Connection resource. type: string type: object type: object reference: + description: A reference to an S3 bucket object. This is simply the name of the `S3Bucket` resource. type: string type: object prefix: @@ -10128,16 +10168,20 @@ spec: type: object type: object mainApplicationFile: - nullable: true + description: The actual application file that will be called by `spark-submit`. type: string mainClass: + description: The main class - i.e. entry point - for JVM artifacts. nullable: true type: string mode: - nullable: true + description: 'Mode: cluster or client. Currently only cluster is supported.' + enum: + - cluster + - client type: string s3connection: - description: Operators are expected to define fields for this type in order to work with S3 connections. + description: Configure an S3 connection that the SparkApplication has access to. Read more in the [Spark S3 usage guide](https://docs.stackable.tech/home/nightly/spark-k8s/usage-guide/s3). nullable: true oneOf: - required: @@ -10146,47 +10190,50 @@ spec: - reference properties: inline: - description: S3 connection definition as CRD. + description: Inline definition of an S3 connection. properties: accessStyle: - description: Which access style to use. Defaults to virtual hosted-style as most of the data products out there. Have a look at the official documentation on + description: Which access style to use. Defaults to virtual hosted-style as most of the data products out there. Have a look at the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html). enum: - Path - VirtualHosted nullable: true type: string credentials: - description: If the S3 uses authentication you have to specify you S3 credentials. In the most cases a SecretClass providing `accessKey` and `secretKey` is sufficient. + description: If the S3 uses authentication you have to specify you S3 credentials. In the most cases a [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) providing `accessKey` and `secretKey` is sufficient. nullable: true properties: scope: - description: '[Scope](https://docs.stackable.tech/secret-operator/scope.html) of the [SecretClass](https://docs.stackable.tech/secret-operator/secretclass.html)' + description: '[Scope](https://docs.stackable.tech/home/nightly/secret-operator/scope) of the [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass).' nullable: true properties: node: default: false + description: The node scope is resolved to the name of the Kubernetes Node object that the Pod is running on. This will typically be the DNS name of the node. type: boolean pod: default: false + description: The pod scope is resolved to the name of the Kubernetes Pod. This allows the secret to differentiate between StatefulSet replicas. type: boolean services: default: [] + description: The service scope allows Pod objects to specify custom scopes. This should typically correspond to Service objects that the Pod participates in. items: type: string type: array type: object secretClass: - description: '[SecretClass](https://docs.stackable.tech/secret-operator/secretclass.html) containing the LDAP bind credentials' + description: '[SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) containing the LDAP bind credentials.' type: string required: - secretClass type: object host: - description: Hostname of the S3 server without any protocol or port + description: 'Hostname of the S3 server without any protocol or port. For example: `west1.my-cloud.com`.' nullable: true type: string port: - description: Port the S3 server listens on. If not specified the products will determine the port to use. + description: Port the S3 server listens on. If not specified the product will determine the port to use. format: uint16 minimum: 0.0 nullable: true @@ -10196,7 +10243,7 @@ spec: nullable: true properties: verification: - description: The verification method used to verify the certificates of the server and/or the client + description: The verification method used to verify the certificates of the server and/or the client. oneOf: - required: - none @@ -10204,13 +10251,13 @@ spec: - server properties: none: - description: Use TLS but don't verify certificates + description: Use TLS but don't verify certificates. type: object server: - description: Use TLS and ca certificate to verify the server + description: Use TLS and a CA certificate to verify the server. properties: caCert: - description: Ca cert to verify the server + description: CA cert to verify the server. oneOf: - required: - webPki @@ -10218,10 +10265,10 @@ spec: - secretClass properties: secretClass: - description: Name of the SecretClass which will provide the ca cert. Note that a SecretClass does not need to have a key but can also work with just a ca cert. So if you got provided with a ca cert but don't have access to the key you can still use this method. + description: Name of the [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) which will provide the CA certificate. Note that a SecretClass does not need to have a key but can also work with just a CA certificate, so if you got provided with a CA cert but don't have access to the key you can still use this method. type: string webPki: - description: Use TLS and the ca certificates trusted by the common web browsers to verify the server. This can be useful when you e.g. use public AWS S3 or other public available services. + description: Use TLS and the CA certificates trusted by the common web browsers to verify the server. This can be useful when you e.g. use public AWS S3 or other public available services. type: object type: object required: @@ -10233,12 +10280,14 @@ spec: type: object type: object reference: + description: A reference to an S3Connection resource. type: string type: object sparkConf: additionalProperties: type: string - nullable: true + default: {} + description: A map of key/value strings that will be passed directly to spark-submit. type: object sparkImage: anyOf: @@ -10247,6 +10296,10 @@ spec: - productVersion - required: - productVersion + description: |- + Specify which image to use, the easiest way is to only configure the `productVersion`. You can also configure a custom image registry to pull from, as well as completely custom images. + + Consult the [Product image selection documentation](https://docs.stackable.tech/home/nightly/concepts/product_image_selection) for details. properties: custom: description: Overwrite the docker image. Specify the full docker image name, e.g. `docker.stackable.tech/stackable/superset:1.4.1-stackable2.1.0` @@ -10256,14 +10309,14 @@ spec: type: string pullPolicy: default: Always - description: '[Pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) used when pulling the Images' + description: '[Pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) used when pulling the image.' enum: - IfNotPresent - Always - Never type: string pullSecrets: - description: '[Image pull secrets](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) to pull images from a private registry' + description: '[Image pull secrets](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) to pull images from a private registry.' items: description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. properties: @@ -10282,17 +10335,13 @@ spec: nullable: true type: string type: object - stopped: - nullable: true - type: boolean vectorAggregatorConfigMapName: - description: Name of the Vector aggregator discovery ConfigMap. It must contain the key `ADDRESS` with the address of the Vector aggregator. - nullable: true - type: string - version: + description: Name of the Vector aggregator [discovery ConfigMap](https://docs.stackable.tech/home/nightly/concepts/service_discovery). It must contain the key `ADDRESS` with the address of the Vector aggregator. Follow the [logging tutorial](https://docs.stackable.tech/home/nightly/tutorials/logging-vector-aggregator) to learn how to configure log aggregation with Vector. nullable: true type: string volumes: + default: [] + description: A list of volumes that can be made available to the job, driver or executors via their volume mounts. items: description: Volume represents a named volume in a pod that may be accessed by any container in the pod. properties: @@ -11366,9 +11415,10 @@ spec: required: - name type: object - nullable: true type: array required: + - mainApplicationFile + - mode - sparkImage type: object status: @@ -11412,11 +11462,12 @@ spec: description: Auto-generated derived type for SparkHistoryServerSpec via `CustomResource` properties: spec: + description: A Spark cluster history server component. This resource is managed by the Stackable operator for Apache Spark. Find more information on how to use it in the [operator documentation](https://docs.stackable.tech/home/nightly/spark-k8s/usage-guide/history-server). properties: clusterConfig: default: listenerClass: cluster-internal - description: Global Spark history server configuration that applies to all roles and role groups + description: Global Spark history server configuration that applies to all roles and role groups. properties: listenerClass: default: cluster-internal @@ -11443,6 +11494,10 @@ spec: - productVersion - required: - productVersion + description: |- + Specify which image to use, the easiest way is to only configure the `productVersion`. You can also configure a custom image registry to pull from, as well as completely custom images. + + Consult the [Product image selection documentation](https://docs.stackable.tech/home/nightly/concepts/product_image_selection) for details. properties: custom: description: Overwrite the docker image. Specify the full docker image name, e.g. `docker.stackable.tech/stackable/superset:1.4.1-stackable2.1.0` @@ -11452,14 +11507,14 @@ spec: type: string pullPolicy: default: Always - description: '[Pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) used when pulling the Images' + description: '[Pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) used when pulling the image.' enum: - IfNotPresent - Always - Never type: string pullSecrets: - description: '[Image pull secrets](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) to pull images from a private registry' + description: '[Image pull secrets](https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod) to pull images from a private registry.' items: description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. properties: @@ -11479,6 +11534,7 @@ spec: type: string type: object logFileDirectory: + description: The log file directory definition used by the Spark history server. Currently only S3 buckets are supported. oneOf: - required: - s3 @@ -11486,7 +11542,7 @@ spec: s3: properties: bucket: - description: Operators are expected to define fields for this type in order to work with S3 buckets. + description: An S3 bucket definition, it can either be a reference to an explicit S3Bucket object, or it can be an inline defintion of a bucket. Read the [S3 resources concept documentation](https://docs.stackable.tech/home/nightly/concepts/s3) to learn more. oneOf: - required: - inline @@ -11494,13 +11550,14 @@ spec: - reference properties: inline: - description: S3 bucket specification containing only the bucket name and an inlined or referenced connection specification. + description: An inline definition, containing the S3 bucket properties. properties: bucketName: + description: The name of the S3 bucket. nullable: true type: string connection: - description: Operators are expected to define fields for this type in order to work with S3 connections. + description: The definition of an S3 connection, either inline or as a reference. nullable: true oneOf: - required: @@ -11509,47 +11566,50 @@ spec: - reference properties: inline: - description: S3 connection definition as CRD. + description: Inline definition of an S3 connection. properties: accessStyle: - description: Which access style to use. Defaults to virtual hosted-style as most of the data products out there. Have a look at the official documentation on + description: Which access style to use. Defaults to virtual hosted-style as most of the data products out there. Have a look at the [AWS documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html). enum: - Path - VirtualHosted nullable: true type: string credentials: - description: If the S3 uses authentication you have to specify you S3 credentials. In the most cases a SecretClass providing `accessKey` and `secretKey` is sufficient. + description: If the S3 uses authentication you have to specify you S3 credentials. In the most cases a [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) providing `accessKey` and `secretKey` is sufficient. nullable: true properties: scope: - description: '[Scope](https://docs.stackable.tech/secret-operator/scope.html) of the [SecretClass](https://docs.stackable.tech/secret-operator/secretclass.html)' + description: '[Scope](https://docs.stackable.tech/home/nightly/secret-operator/scope) of the [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass).' nullable: true properties: node: default: false + description: The node scope is resolved to the name of the Kubernetes Node object that the Pod is running on. This will typically be the DNS name of the node. type: boolean pod: default: false + description: The pod scope is resolved to the name of the Kubernetes Pod. This allows the secret to differentiate between StatefulSet replicas. type: boolean services: default: [] + description: The service scope allows Pod objects to specify custom scopes. This should typically correspond to Service objects that the Pod participates in. items: type: string type: array type: object secretClass: - description: '[SecretClass](https://docs.stackable.tech/secret-operator/secretclass.html) containing the LDAP bind credentials' + description: '[SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) containing the LDAP bind credentials.' type: string required: - secretClass type: object host: - description: Hostname of the S3 server without any protocol or port + description: 'Hostname of the S3 server without any protocol or port. For example: `west1.my-cloud.com`.' nullable: true type: string port: - description: Port the S3 server listens on. If not specified the products will determine the port to use. + description: Port the S3 server listens on. If not specified the product will determine the port to use. format: uint16 minimum: 0.0 nullable: true @@ -11559,7 +11619,7 @@ spec: nullable: true properties: verification: - description: The verification method used to verify the certificates of the server and/or the client + description: The verification method used to verify the certificates of the server and/or the client. oneOf: - required: - none @@ -11567,13 +11627,13 @@ spec: - server properties: none: - description: Use TLS but don't verify certificates + description: Use TLS but don't verify certificates. type: object server: - description: Use TLS and ca certificate to verify the server + description: Use TLS and a CA certificate to verify the server. properties: caCert: - description: Ca cert to verify the server + description: CA cert to verify the server. oneOf: - required: - webPki @@ -11581,10 +11641,10 @@ spec: - secretClass properties: secretClass: - description: Name of the SecretClass which will provide the ca cert. Note that a SecretClass does not need to have a key but can also work with just a ca cert. So if you got provided with a ca cert but don't have access to the key you can still use this method. + description: Name of the [SecretClass](https://docs.stackable.tech/home/nightly/secret-operator/secretclass) which will provide the CA certificate. Note that a SecretClass does not need to have a key but can also work with just a CA certificate, so if you got provided with a CA cert but don't have access to the key you can still use this method. type: string webPki: - description: Use TLS and the ca certificates trusted by the common web browsers to verify the server. This can be useful when you e.g. use public AWS S3 or other public available services. + description: Use TLS and the CA certificates trusted by the common web browsers to verify the server. This can be useful when you e.g. use public AWS S3 or other public available services. type: object type: object required: @@ -11596,10 +11656,12 @@ spec: type: object type: object reference: + description: A reference to an S3Connection resource. type: string type: object type: object reference: + description: A reference to an S3 bucket object. This is simply the name of the `S3Bucket` resource. type: string type: object prefix: @@ -11610,7 +11672,7 @@ spec: type: object type: object nodes: - description: This struct represents a role - e.g. HDFS datanodes or Trino workers. It has a [`HashMap`] containing all the roleGroups that are part of this role. Additionally, there is a `config`, which is configurable at the role *and* roleGroup level. Everything at roleGroup level is merged on top of what is configured on role level using the [`Merge`] trait. There is also a second form of config, which can only be configured at role level, the `roleConfig`. + description: A history server node role definition. properties: cliOverrides: additionalProperties: @@ -11626,6 +11688,7 @@ spec: podAntiAffinity: null nodeAffinity: null nodeSelector: null + description: These configuration settings control [Pod placement](https://docs.stackable.tech/home/nightly/concepts/operations/pod_placement). properties: nodeAffinity: description: Node affinity is a group of node affinity scheduling rules. @@ -12098,7 +12161,7 @@ spec: default: enableVectorAgent: null containers: {} - description: Logging configuration + description: Logging configuration, learn more in the [logging concept documentation](https://docs.stackable.tech/home/nightly/concepts/logging). properties: containers: additionalProperties: @@ -12171,10 +12234,10 @@ spec: description: Configuration per logger type: object type: object - description: Log configuration per container + description: Log configuration per container. type: object enableVectorAgent: - description: Wether or not to deploy a container with the Vector log agent + description: Wether or not to deploy a container with the Vector log agent. nullable: true type: boolean type: object @@ -12187,6 +12250,7 @@ spec: min: null max: null storage: {} + description: Resource usage is configured here, this includes CPU usage, memory usage and disk storage usage, if this role needs any. properties: cpu: default: @@ -12194,21 +12258,22 @@ spec: max: null properties: max: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: The maximum amount of CPU cores that can be requested by Pods. Equivalent to the `limit` for Pod resource configuration. Cores are specified either as a decimal point number or as milli units. For example:`1.5` will be 1.5 cores, also written as `1500m`. nullable: true type: string min: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: The minimal amount of CPU cores that Pods need to run. Equivalent to the `request` for Pod resource configuration. Cores are specified either as a decimal point number or as milli units. For example:`1.5` will be 1.5 cores, also written as `1500m`. nullable: true type: string type: object memory: properties: limit: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: 'The maximum amount of memory that should be available to the Pod. Specified as a byte [Quantity](https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/), which means these suffixes are supported: E, P, T, G, M, k. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki. For example, the following represent roughly the same value: `128974848, 129e6, 129M, 128974848000m, 123Mi`' nullable: true type: string runtimeLimits: + description: Additional options that can be specified. type: object type: object storage: @@ -12221,15 +12286,17 @@ spec: type: string type: object default: {} + description: The `configOverrides` can be used to configure properties in product config files that are not exposed in the CRD. Read the [config overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#config-overrides) and consult the operator specific usage guide documentation for details on the available config files and settings for the specific product. type: object envOverrides: additionalProperties: type: string default: {} + description: '`envOverrides` configure environment variables to be set in the Pods. It is a map from strings to strings - environment variables and the value to set. Read the [environment variable overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#env-overrides) for more information and consult the operator specific usage guide to find out about the product specific environment variables that are available.' type: object podOverrides: default: {} - description: See PodTemplateSpec (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podtemplatespec-v1-core) for more details + description: In the `podOverrides` property you can define a [PodTemplateSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podtemplatespec-v1-core) to override any property that can be set on a Kubernetes Pod. Read the [Pod overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#pod-overrides) for more information. properties: metadata: properties: @@ -15036,7 +15103,12 @@ spec: default: enabled: true maxUnavailable: null - description: 'This struct is used to configure: 1.) If PodDisruptionBudgets are created by the operator 2.) The allowed number of Pods to be unavailable (`maxUnavailable`)' + description: |- + This struct is used to configure: + + 1. If PodDisruptionBudgets are created by the operator 2. The allowed number of Pods to be unavailable (`maxUnavailable`) + + Learn more in the [allowed Pod disruptions documentation](https://docs.stackable.tech/home/nightly/concepts/operations/pod_disruptions). properties: enabled: default: true @@ -15067,6 +15139,7 @@ spec: podAntiAffinity: null nodeAffinity: null nodeSelector: null + description: These configuration settings control [Pod placement](https://docs.stackable.tech/home/nightly/concepts/operations/pod_placement). properties: nodeAffinity: description: Node affinity is a group of node affinity scheduling rules. @@ -15539,7 +15612,7 @@ spec: default: enableVectorAgent: null containers: {} - description: Logging configuration + description: Logging configuration, learn more in the [logging concept documentation](https://docs.stackable.tech/home/nightly/concepts/logging). properties: containers: additionalProperties: @@ -15612,10 +15685,10 @@ spec: description: Configuration per logger type: object type: object - description: Log configuration per container + description: Log configuration per container. type: object enableVectorAgent: - description: Wether or not to deploy a container with the Vector log agent + description: Wether or not to deploy a container with the Vector log agent. nullable: true type: boolean type: object @@ -15628,6 +15701,7 @@ spec: min: null max: null storage: {} + description: Resource usage is configured here, this includes CPU usage, memory usage and disk storage usage, if this role needs any. properties: cpu: default: @@ -15635,21 +15709,22 @@ spec: max: null properties: max: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: The maximum amount of CPU cores that can be requested by Pods. Equivalent to the `limit` for Pod resource configuration. Cores are specified either as a decimal point number or as milli units. For example:`1.5` will be 1.5 cores, also written as `1500m`. nullable: true type: string min: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: The minimal amount of CPU cores that Pods need to run. Equivalent to the `request` for Pod resource configuration. Cores are specified either as a decimal point number or as milli units. For example:`1.5` will be 1.5 cores, also written as `1500m`. nullable: true type: string type: object memory: properties: limit: - description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` ::= \n\n\t(Note that may be empty, from the \"\" case in .)\n\n ::= 0 | 1 | ... | 9 ::= | ::= | . | . | . ::= \"+\" | \"-\" ::= | ::= | | ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n ::= \"e\" | \"E\" ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation." + description: 'The maximum amount of memory that should be available to the Pod. Specified as a byte [Quantity](https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/), which means these suffixes are supported: E, P, T, G, M, k. You can also use the power-of-two equivalents: Ei, Pi, Ti, Gi, Mi, Ki. For example, the following represent roughly the same value: `128974848, 129e6, 129M, 128974848000m, 123Mi`' nullable: true type: string runtimeLimits: + description: Additional options that can be specified. type: object type: object storage: @@ -15662,15 +15737,17 @@ spec: type: string type: object default: {} + description: The `configOverrides` can be used to configure properties in product config files that are not exposed in the CRD. Read the [config overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#config-overrides) and consult the operator specific usage guide documentation for details on the available config files and settings for the specific product. type: object envOverrides: additionalProperties: type: string default: {} + description: '`envOverrides` configure environment variables to be set in the Pods. It is a map from strings to strings - environment variables and the value to set. Read the [environment variable overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#env-overrides) for more information and consult the operator specific usage guide to find out about the product specific environment variables that are available.' type: object podOverrides: default: {} - description: See PodTemplateSpec (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podtemplatespec-v1-core) for more details + description: In the `podOverrides` property you can define a [PodTemplateSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#podtemplatespec-v1-core) to override any property that can be set on a Kubernetes Pod. Read the [Pod overrides documentation](https://docs.stackable.tech/home/nightly/concepts/overrides#pod-overrides) for more information. properties: metadata: properties: @@ -18511,6 +18588,7 @@ spec: additionalProperties: type: string default: {} + description: A map of key/value strings that will be passed directly to Spark when deploying the history server. type: object vectorAggregatorConfigMapName: description: Name of the Vector aggregator discovery ConfigMap. It must contain the key `ADDRESS` with the address of the Vector aggregator. diff --git a/docs/modules/spark-k8s/examples/example-encapsulated.yaml b/docs/modules/spark-k8s/examples/example-encapsulated.yaml index bdc6d3f4..e8edac3c 100644 --- a/docs/modules/spark-k8s/examples/example-encapsulated.yaml +++ b/docs/modules/spark-k8s/examples/example-encapsulated.yaml @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: spark-pi spec: - version: "1.0" sparkImage: productVersion: 3.5.0 # <1> mode: cluster diff --git a/docs/modules/spark-k8s/examples/example-history-app.yaml b/docs/modules/spark-k8s/examples/example-history-app.yaml index 95ed5350..cfd37074 100644 --- a/docs/modules/spark-k8s/examples/example-history-app.yaml +++ b/docs/modules/spark-k8s/examples/example-history-app.yaml @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: spark-pi-s3-1 spec: - version: "1.0" sparkImage: productVersion: 3.5.0 pullPolicy: IfNotPresent diff --git a/docs/modules/spark-k8s/examples/example-sparkapp-configmap.yaml b/docs/modules/spark-k8s/examples/example-sparkapp-configmap.yaml index a5c90a8f..27173724 100644 --- a/docs/modules/spark-k8s/examples/example-sparkapp-configmap.yaml +++ b/docs/modules/spark-k8s/examples/example-sparkapp-configmap.yaml @@ -5,7 +5,6 @@ metadata: name: ny-tlc-report-configmap namespace: default spec: - version: "1.0" sparkImage: productVersion: 3.5.0 mode: cluster diff --git a/docs/modules/spark-k8s/examples/example-sparkapp-external-dependencies.yaml b/docs/modules/spark-k8s/examples/example-sparkapp-external-dependencies.yaml index b1c798c5..c9a700ba 100644 --- a/docs/modules/spark-k8s/examples/example-sparkapp-external-dependencies.yaml +++ b/docs/modules/spark-k8s/examples/example-sparkapp-external-dependencies.yaml @@ -5,7 +5,6 @@ metadata: name: example-sparkapp-external-dependencies namespace: default spec: - version: "1.0" sparkImage: productVersion: 3.5.0 mode: cluster diff --git a/docs/modules/spark-k8s/examples/example-sparkapp-image.yaml b/docs/modules/spark-k8s/examples/example-sparkapp-image.yaml index 13e7fe9c..15428794 100644 --- a/docs/modules/spark-k8s/examples/example-sparkapp-image.yaml +++ b/docs/modules/spark-k8s/examples/example-sparkapp-image.yaml @@ -5,7 +5,6 @@ metadata: name: example-sparkapp-image namespace: default spec: - version: "1.0" image: docker.stackable.tech/stackable/ny-tlc-report:0.1.0 # <1> sparkImage: productVersion: 3.5.0 diff --git a/docs/modules/spark-k8s/examples/example-sparkapp-pvc.yaml b/docs/modules/spark-k8s/examples/example-sparkapp-pvc.yaml index 0bce8d0f..322fd315 100644 --- a/docs/modules/spark-k8s/examples/example-sparkapp-pvc.yaml +++ b/docs/modules/spark-k8s/examples/example-sparkapp-pvc.yaml @@ -5,7 +5,6 @@ metadata: name: example-sparkapp-pvc namespace: default spec: - version: "1.0" sparkImage: productVersion: 3.5.0 mode: cluster diff --git a/docs/modules/spark-k8s/examples/example-sparkapp-s3-private.yaml b/docs/modules/spark-k8s/examples/example-sparkapp-s3-private.yaml index b9b06157..0c0b6167 100644 --- a/docs/modules/spark-k8s/examples/example-sparkapp-s3-private.yaml +++ b/docs/modules/spark-k8s/examples/example-sparkapp-s3-private.yaml @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: example-sparkapp-s3-private spec: - version: "1.0" sparkImage: productVersion: 3.5.0 mode: cluster diff --git a/docs/modules/spark-k8s/examples/example-sparkapp-streaming.yaml b/docs/modules/spark-k8s/examples/example-sparkapp-streaming.yaml index 34213995..572bb7e5 100644 --- a/docs/modules/spark-k8s/examples/example-sparkapp-streaming.yaml +++ b/docs/modules/spark-k8s/examples/example-sparkapp-streaming.yaml @@ -5,7 +5,6 @@ metadata: name: pyspark-streaming namespace: default spec: - version: "1.0" sparkImage: productVersion: 3.5.0 mode: cluster diff --git a/docs/modules/spark-k8s/examples/getting_started/getting_started.sh b/docs/modules/spark-k8s/examples/getting_started/getting_started.sh index db4b9127..e6b1ef29 100755 --- a/docs/modules/spark-k8s/examples/getting_started/getting_started.sh +++ b/docs/modules/spark-k8s/examples/getting_started/getting_started.sh @@ -59,7 +59,6 @@ metadata: name: pyspark-pi namespace: default spec: - version: "1.0" sparkImage: productVersion: 3.5.0 mode: cluster diff --git a/docs/modules/spark-k8s/examples/getting_started/getting_started.sh.j2 b/docs/modules/spark-k8s/examples/getting_started/getting_started.sh.j2 index b2fbc2fc..a7e2a954 100755 --- a/docs/modules/spark-k8s/examples/getting_started/getting_started.sh.j2 +++ b/docs/modules/spark-k8s/examples/getting_started/getting_started.sh.j2 @@ -59,7 +59,6 @@ metadata: name: pyspark-pi namespace: default spec: - version: "1.0" sparkImage: productVersion: 3.5.0 mode: cluster diff --git a/docs/modules/spark-k8s/pages/crd-reference.adoc b/docs/modules/spark-k8s/pages/crd-reference.adoc index 7a8b1135..98b235b5 100644 --- a/docs/modules/spark-k8s/pages/crd-reference.adoc +++ b/docs/modules/spark-k8s/pages/crd-reference.adoc @@ -12,49 +12,46 @@ Below are listed the CRD fields that can be defined by the user: |`SparkApplication` |`metadata.name` -|Application name - -|`spec.version` -|Application version +|Application name. Mandatory. |`spec.mode` -| `cluster` or `client`. Currently only `cluster` is supported +| `cluster` or `client`. Currently only `cluster` is supported. Mandatory. |`spec.image` -|User-supplied image containing spark-job dependencies that will be copied to the specified volume mount +|User-supplied image containing spark-job dependencies that will be copied to the specified volume mount. |`spec.sparkImage` -| Spark image which will be deployed to driver and executor pods, which must contain spark environment needed by the job e.g. `docker.stackable.tech/stackable/spark-k8s:3.5.0-stackable0.0.0-dev` +| Spark image which will be deployed to driver and executor pods, which must contain spark environment needed by the job e.g. `docker.stackable.tech/stackable/spark-k8s:3.5.0-stackable0.0.0-dev`. Mandatory. |`spec.sparkImagePullPolicy` -| Optional Enum (one of `Always`, `IfNotPresent` or `Never`) that determines the pull policy of the spark job image +| Optional Enum (one of `Always`, `IfNotPresent` or `Never`) that determines the pull policy of the spark job image. |`spec.sparkImagePullSecrets` -| An optional list of references to secrets in the same namespace to use for pulling any of the images used by a `SparkApplication` resource. Each reference has a single property (`name`) that must contain a reference to a valid secret +| An optional list of references to secrets in the same namespace to use for pulling any of the images used by a `SparkApplication` resource. Each reference has a single property (`name`) that must contain a reference to a valid secret. |`spec.mainApplicationFile` -|The actual application file that will be called by `spark-submit` +|The actual application file that will be called by `spark-submit`. Mandatory. |`spec.mainClass` -|The main class i.e. entry point for JVM artifacts +|The main class/entry point for JVM artifacts. |`spec.args` -|Arguments passed directly to the job artifact +|Arguments passed directly to the job artifact. |`spec.s3connection` |S3 connection specification. See the xref:concepts:s3.adoc[] for more details. |`spec.sparkConf` -|A map of key/value strings that will be passed directly to `spark-submit` +|A map of key/value strings that will be passed directly to `spark-submit. |`spec.deps.requirements` -|A list of python packages that will be installed via `pip` +|A list of python packages that will be installed via `pip`. |`spec.deps.packages` -|A list of packages that is passed directly to `spark-submit` +|A list of packages that is passed directly to `spark-submit`. |`spec.deps.excludePackages` -|A list of excluded packages that is passed directly to `spark-submit` +|A list of excluded packages that is passed directly to `spark-submit`. |`spec.deps.repositories` |A list of repositories that is passed directly to `spark-submit` @@ -92,23 +89,23 @@ Below are listed the CRD fields that can be defined by the user: |`spec.executor.resources` |Resources specification for the executor Pods -|`spec.executor.instances` -|Number of executor instances launched for this job +|`spec.executor.replicas` +|Number of executor instances launched for this job. |`spec.executor.volumeMounts` -|A list of mounted volumes for each executor +|A list of mounted volumes for each executor. |`spec.executor.volumeMounts.name` -|Name of mount +|Name of mount. |`spec.executor.volumeMounts.mountPath` -|Volume mount path +|Volume mount path. |`spec.executor.affinity` |Driver Pod placement affinity. See xref:usage-guide/operations/pod-placement.adoc[] for details. |`spec.executor.logging` -|Logging aggregation for the executor Pods. See xref:concepts:logging.adoc[] for details +|Logging aggregation for the executor Pods. See xref:concepts:logging.adoc[] for details. |`spec.logFileDirectory.bucket` |S3 bucket definition where applications should publish events for the Spark History server. diff --git a/examples/ny-tlc-report-external-dependencies.yaml b/examples/ny-tlc-report-external-dependencies.yaml index f5d3bc69..965e3122 100644 --- a/examples/ny-tlc-report-external-dependencies.yaml +++ b/examples/ny-tlc-report-external-dependencies.yaml @@ -5,7 +5,6 @@ metadata: name: spark-ny-ext namespace: default spec: - version: "1.0" sparkImage: productVersion: 3.5.0 pullPolicy: IfNotPresent diff --git a/examples/ny-tlc-report-image.yaml b/examples/ny-tlc-report-image.yaml index d2a8dfa6..5adb3328 100644 --- a/examples/ny-tlc-report-image.yaml +++ b/examples/ny-tlc-report-image.yaml @@ -5,7 +5,6 @@ metadata: name: spark-ny-image namespace: default spec: - version: "1.0" # everything under /jobs will be copied to /stackable/spark/jobs image: docker.stackable.tech/stackable/ny-tlc-report:0.1.0 sparkImage: docker.stackable.tech/stackable/spark-k8s:3.5.0-stackable0.0.0-dev diff --git a/examples/ny-tlc-report.yaml b/examples/ny-tlc-report.yaml index dfd14139..eda7a0cd 100644 --- a/examples/ny-tlc-report.yaml +++ b/examples/ny-tlc-report.yaml @@ -12,7 +12,6 @@ kind: SparkApplication metadata: name: spark-ny-cm spec: - version: "1.0" sparkImage: productVersion: 3.5.0 mode: cluster diff --git a/rust/crd/src/history.rs b/rust/crd/src/history.rs index 026b7cef..2de608b2 100644 --- a/rust/crd/src/history.rs +++ b/rust/crd/src/history.rs @@ -49,6 +49,9 @@ pub enum Error { CannotRetrieveRoleGroup { role_group: String }, } +/// A Spark cluster history server component. This resource is managed by the Stackable operator +/// for Apache Spark. Find more information on how to use it in the +/// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/history-server). #[derive(Clone, CustomResource, Debug, Deserialize, JsonSchema, Serialize)] #[kube( group = "spark.stackable.tech", @@ -65,16 +68,25 @@ pub enum Error { #[serde(rename_all = "camelCase")] pub struct SparkHistoryServerSpec { pub image: ProductImage, - /// Global Spark history server configuration that applies to all roles and role groups + + /// Global Spark history server configuration that applies to all roles and role groups. #[serde(default)] pub cluster_config: SparkHistoryServerClusterConfig, + /// Name of the Vector aggregator discovery ConfigMap. /// It must contain the key `ADDRESS` with the address of the Vector aggregator. #[serde(skip_serializing_if = "Option::is_none")] pub vector_aggregator_config_map_name: Option, + + /// The log file directory definition used by the Spark history server. + /// Currently only S3 buckets are supported. pub log_file_directory: LogFileDirectorySpec, + + /// A map of key/value strings that will be passed directly to Spark when deploying the history server. #[serde(default)] pub spark_conf: BTreeMap, + + /// A history server node role definition. pub nodes: Role, } diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index ca771d0c..2a12958b 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -82,6 +82,12 @@ pub struct SparkApplicationStatus { pub phase: String, } +/// A Spark cluster stacklet. This resource is managed by the Stackable operator for Apache Spark. +/// Find more information on how to use it and the resources that the operator generates in the +/// [operator documentation](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/). +/// +/// The SparkApplication CRD looks a little different than the CRDs of the other products on the +/// Stackable Data Platform. #[derive(Clone, CustomResource, Debug, Deserialize, JsonSchema, Serialize)] #[kube( group = "spark.stackable.tech", @@ -98,41 +104,76 @@ pub struct SparkApplicationStatus { )] #[serde(rename_all = "camelCase")] pub struct SparkApplicationSpec { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub version: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub mode: Option, + /// Mode: cluster or client. Currently only cluster is supported. + pub mode: SparkMode, + + /// The main class - i.e. entry point - for JVM artifacts. #[serde(default, skip_serializing_if = "Option::is_none")] pub main_class: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub main_application_file: Option, + + /// The actual application file that will be called by `spark-submit`. + pub main_application_file: String, + + /// User-supplied image containing spark-job dependencies that will be copied to the specified volume mount. + /// See the [examples](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/examples). #[serde(default, skip_serializing_if = "Option::is_none")] pub image: Option, + + // no doc - docs in ProductImage struct. pub spark_image: ProductImage, - /// Name of the Vector aggregator discovery ConfigMap. + + /// Name of the Vector aggregator [discovery ConfigMap](DOCS_BASE_URL_PLACEHOLDER/concepts/service_discovery). /// It must contain the key `ADDRESS` with the address of the Vector aggregator. + /// Follow the [logging tutorial](DOCS_BASE_URL_PLACEHOLDER/tutorials/logging-vector-aggregator) + /// to learn how to configure log aggregation with Vector. #[serde(skip_serializing_if = "Option::is_none")] pub vector_aggregator_config_map_name: Option, + + /// The job builds a spark-submit command, complete with arguments and referenced dependencies + /// such as templates, and passes it on to Spark. #[serde(default, skip_serializing_if = "Option::is_none")] pub job: Option>, + + /// The driver role specifies the configuration that, together with the driver pod template, is used by + /// Spark to create driver pods. #[serde(default, skip_serializing_if = "Option::is_none")] pub driver: Option>, + + /// The executor role specifies the configuration that, together with the driver pod template, is used by + /// Spark to create the executor pods. #[serde(default, skip_serializing_if = "Option::is_none")] pub executor: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub stopped: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub spark_conf: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub deps: Option, + + /// A map of key/value strings that will be passed directly to spark-submit. + #[serde(default)] + pub spark_conf: HashMap, + + /// Job dependencies: a list of python packages that will be installed via pip, a list of packages + /// or repositories that is passed directly to spark-submit, or a list of excluded packages + /// (also passed directly to spark-submit). + #[serde(default)] + pub deps: JobDependencies, + + /// Configure an S3 connection that the SparkApplication has access to. + /// Read more in the [Spark S3 usage guide](DOCS_BASE_URL_PLACEHOLDER/spark-k8s/usage-guide/s3). #[serde(default, skip_serializing_if = "Option::is_none")] pub s3connection: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub args: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub volumes: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub env: Option>, + + /// Arguments passed directly to the job artifact. + #[serde(default)] + pub args: Vec, + + /// A list of volumes that can be made available to the job, driver or executors via their volume mounts. + #[serde(default)] + pub volumes: Vec, + + /// A list of environment variables that will be set in the job pod and the driver and executor + /// pod templates. + #[serde(default)] + pub env: Vec, + + /// The log file directory definition used by the Spark history server. + /// Currently only S3 buckets are supported. #[serde(default, skip_serializing_if = "Option::is_none")] pub log_file_directory: Option, } @@ -140,14 +181,22 @@ pub struct SparkApplicationSpec { #[derive(Clone, Debug, Default, Deserialize, JsonSchema, PartialEq, Eq, Serialize)] #[serde(rename_all = "camelCase")] pub struct JobDependencies { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub requirements: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub packages: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub repositories: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub exclude_packages: Option>, + /// Under the `requirements` you can specify Python dependencies that will be installed with `pip`. + /// Example: `tabulate==0.8.9` + #[serde(default)] + pub requirements: Vec, + + /// A list of packages that is passed directly to `spark-submit`. + #[serde(default)] + pub packages: Vec, + + /// A list of repositories that is passed directly to `spark-submit`. + #[serde(default)] + pub repositories: Vec, + + /// A list of excluded packages that is passed directly to `spark-submit`. + #[serde(default)] + pub exclude_packages: Vec, } impl SparkApplication { @@ -159,36 +208,23 @@ impl SparkApplication { format!("{app_name}-{role}-pod-template", app_name = self.name_any()) } - pub fn mode(&self) -> Option<&str> { - self.spec.mode.as_deref() - } - pub fn image(&self) -> Option<&str> { self.spec.image.as_deref() } - pub fn version(&self) -> Option<&str> { - self.spec.version.as_deref() - } - - pub fn application_artifact(&self) -> Option<&str> { - self.spec.main_application_file.as_deref() + pub fn application_artifact(&self) -> &str { + self.spec.main_application_file.as_ref() } pub fn requirements(&self) -> Option { - self.spec - .deps - .as_ref() - .and_then(|deps| deps.requirements.as_ref()) - .map(|req| req.join(" ")) + if !self.spec.deps.requirements.is_empty() { + return Some(self.spec.deps.requirements.join(" ")); + } + None } pub fn packages(&self) -> Vec { - self.spec - .deps - .as_ref() - .and_then(|deps| deps.packages.clone()) - .unwrap_or_default() + self.spec.deps.packages.clone() } pub fn volumes( @@ -197,14 +233,7 @@ impl SparkApplication { s3logdir: &Option, log_config_map: &str, ) -> Vec { - let mut result: Vec = self - .spec - .volumes - .as_ref() - .iter() - .flat_map(|v| v.iter()) - .cloned() - .collect(); + let mut result: Vec = self.spec.volumes.clone(); if self.spec.image.is_some() { result.push( @@ -378,11 +407,15 @@ impl SparkApplication { mounts } - pub fn build_recommended_labels<'a>(&'a self, role: &'a str) -> ObjectLabels { + pub fn build_recommended_labels<'a>( + &'a self, + app_version: &'a str, + role: &'a str, + ) -> ObjectLabels { ObjectLabels { owner: self, app_name: APP_NAME, - app_version: self.version().unwrap(), + app_version, operator_name: OPERATOR_NAME, controller_name: CONTROLLER_NAME, role, @@ -398,7 +431,7 @@ impl SparkApplication { spark_image: &str, ) -> Result, Error> { // mandatory properties - let mode = self.mode().context(ObjectHasNoDeployModeSnafu)?; + let mode = &self.spec.mode; let name = self.metadata.name.clone().context(ObjectHasNoNameSnafu)?; let mut submit_cmd: Vec = vec![]; @@ -477,15 +510,18 @@ impl SparkApplication { ]); // repositories and packages arguments - if let Some(deps) = self.spec.deps.clone() { - submit_cmd.extend( - deps.repositories - .map(|r| format!("--repositories {}", r.join(","))), - ); - submit_cmd.extend( - deps.packages - .map(|p| format!("--conf spark.jars.packages={}", p.join(","))), - ); + if !self.spec.deps.repositories.is_empty() { + submit_cmd.extend(vec![format!( + "--repositories {}", + self.spec.deps.repositories.join(",") + )]); + } + + if !self.spec.deps.packages.is_empty() { + submit_cmd.extend(vec![format!( + "--conf spark.jars.packages={}", + self.spec.deps.packages.join(",") + )]); } // some command elements need to be initially stored in a map (to allow overwrites) and @@ -530,9 +566,8 @@ impl SparkApplication { } // conf arguments: these should follow - and thus override - values set from resource limits above - if let Some(spark_conf) = self.spec.spark_conf.clone() { - submit_conf.extend(spark_conf); - } + submit_conf.extend(self.spec.spark_conf.clone()); + // ...before being added to the command collection for (key, value) in submit_conf { submit_cmd.push(format!("--conf \"{key}={value}\"")); @@ -545,14 +580,10 @@ impl SparkApplication { .map(|mc| format! {"--class {mc}"}), ); - let artifact = self - .application_artifact() - .context(ObjectHasNoArtifactSnafu)?; + let artifact = self.application_artifact(); submit_cmd.push(artifact.to_string()); - if let Some(job_args) = self.spec.args.clone() { - submit_cmd.extend(job_args); - } + submit_cmd.extend(self.spec.args.clone()); Ok(submit_cmd) } @@ -562,8 +593,7 @@ impl SparkApplication { s3conn: &Option, s3logdir: &Option, ) -> Vec { - let tmp = self.spec.env.as_ref(); - let mut e: Vec = tmp.iter().flat_map(|e| e.iter()).cloned().collect(); + let mut e: Vec = self.spec.env.clone(); if self.requirements().is_some() { e.push(EnvVar { name: "PYTHONPATH".to_string(), @@ -912,6 +942,8 @@ mod tests { metadata: name: spark-examples spec: + mode: cluster + mainApplicationFile: test.py sparkImage: productVersion: 1.2.3 "}) @@ -939,6 +971,8 @@ mod tests { metadata: name: spark-examples spec: + mode: cluster + mainApplicationFile: test.py sparkImage: productVersion: 1.2.3 job: @@ -1112,6 +1146,8 @@ mod tests { metadata: name: spark-examples spec: + mode: cluster + mainApplicationFile: test.py sparkImage: productVersion: 1.2.3 "#}) diff --git a/rust/crd/src/roles.rs b/rust/crd/src/roles.rs index 42f0e275..9ac3241f 100644 --- a/rust/crd/src/roles.rs +++ b/rust/crd/src/roles.rs @@ -88,6 +88,13 @@ pub enum SparkContainer { Vector, Tls, } +#[derive(Clone, Debug, Deserialize, Display, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] +pub enum SparkMode { + Cluster, + Client, +} #[derive(Clone, Debug, Default, Fragment, JsonSchema, PartialEq)] #[fragment_attrs( diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index 65045550..96f4d248 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -51,8 +51,8 @@ async fn main() -> anyhow::Result<()> { let opts = Opts::parse(); match opts.cmd { Command::Crd => { - SparkApplication::print_yaml_schema()?; - SparkHistoryServer::print_yaml_schema()?; + SparkApplication::print_yaml_schema(built_info::CARGO_PKG_VERSION)?; + SparkHistoryServer::print_yaml_schema(built_info::CARGO_PKG_VERSION)?; } Command::Run(ProductOperatorRun { product_config, diff --git a/rust/operator-binary/src/spark_k8s_controller.rs b/rust/operator-binary/src/spark_k8s_controller.rs index 9e8dfb54..f1c6b72d 100644 --- a/rust/operator-binary/src/spark_k8s_controller.rs +++ b/rust/operator-binary/src/spark_k8s_controller.rs @@ -191,7 +191,8 @@ pub async fn reconcile(spark_application: Arc, ctx: Arc) .validated_role_config(&resolved_product_image, &ctx.product_config) .context(InvalidProductConfigSnafu)?; - let (serviceaccount, rolebinding) = build_spark_role_serviceaccount(&spark_application)?; + let (serviceaccount, rolebinding) = + build_spark_role_serviceaccount(&spark_application, &resolved_product_image)?; client .apply_patch(CONTROLLER_NAME, &serviceaccount, &serviceaccount) .await @@ -298,6 +299,7 @@ pub async fn reconcile(spark_application: Arc, ctx: Arc) submit_product_config, vector_aggregator_address.as_deref(), &submit_config.logging, + &resolved_product_image, )?; client .apply_patch( @@ -482,7 +484,10 @@ fn pod_template( // cleanly (specifically driver pods and related config maps) when the spark application is deleted. .ownerreference_from_resource(spark_application, None, None) .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(spark_application.build_recommended_labels(&container_name)) + .with_recommended_labels( + spark_application + .build_recommended_labels(&spark_image.app_version_label, &container_name), + ) .build(), ) .add_container(cb.build()) @@ -580,7 +585,8 @@ fn pod_template_config_map( .ownerreference_from_resource(spark_application, None, Some(true)) .context(ObjectMissingMetadataForOwnerRefSnafu)? .with_recommended_labels( - spark_application.build_recommended_labels("pod-templates"), + spark_application + .build_recommended_labels(&spark_image.app_version_label, "pod-templates"), ) .build(), ) @@ -628,6 +634,7 @@ fn submit_job_config_map( product_config: Option<&HashMap>>, vector_aggregator_address: Option<&str>, logging: &Logging, + spark_image: &ResolvedProductImage, ) -> Result { let cm_name = spark_application.submit_job_config_map_name(); @@ -639,7 +646,10 @@ fn submit_job_config_map( .name(&cm_name) .ownerreference_from_resource(spark_application, None, Some(true)) .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(spark_application.build_recommended_labels("spark-submit")) + .with_recommended_labels( + spark_application + .build_recommended_labels(&spark_image.app_version_label, "spark-submit"), + ) .build(), ); @@ -767,25 +777,27 @@ fn spark_job( )); } - let mut pod = PodTemplateSpec { - metadata: Some( - ObjectMetaBuilder::new() - .name("spark-submit") - .with_recommended_labels( - spark_application.build_recommended_labels("spark-job-template"), - ) - .build(), - ), - spec: Some(PodSpec { - containers, - restart_policy: Some("Never".to_string()), - service_account_name: serviceaccount.metadata.name.clone(), - volumes: Some(volumes), - image_pull_secrets: spark_image.pull_secrets.clone(), - security_context: Some(security_context()), - ..PodSpec::default() - }), - }; + let mut pod = + PodTemplateSpec { + metadata: Some( + ObjectMetaBuilder::new() + .name("spark-submit") + .with_recommended_labels(spark_application.build_recommended_labels( + &spark_image.app_version_label, + "spark-job-template", + )) + .build(), + ), + spec: Some(PodSpec { + containers, + restart_policy: Some("Never".to_string()), + service_account_name: serviceaccount.metadata.name.clone(), + volumes: Some(volumes), + image_pull_secrets: spark_image.pull_secrets.clone(), + security_context: Some(security_context()), + ..PodSpec::default() + }), + }; if let Some(submit_pod_overrides) = spark_application.pod_overrides(SparkApplicationRole::Submit) @@ -798,7 +810,10 @@ fn spark_job( .name_and_namespace(spark_application) .ownerreference_from_resource(spark_application, None, Some(true)) .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(spark_application.build_recommended_labels("spark-job")) + .with_recommended_labels( + spark_application + .build_recommended_labels(&spark_image.app_version_label, "spark-job"), + ) .build(), spec: Some(JobSpec { template: pod, @@ -817,6 +832,7 @@ fn spark_job( /// They are deleted when the job is deleted. fn build_spark_role_serviceaccount( spark_app: &SparkApplication, + spark_image: &ResolvedProductImage, ) -> Result<(ServiceAccount, RoleBinding)> { let sa_name = spark_app.metadata.name.as_ref().unwrap().to_string(); let sa = ServiceAccount { @@ -825,7 +841,10 @@ fn build_spark_role_serviceaccount( .name(&sa_name) .ownerreference_from_resource(spark_app, None, Some(true)) .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(spark_app.build_recommended_labels("service-account")) + .with_recommended_labels( + spark_app + .build_recommended_labels(&spark_image.app_version_label, "service-account"), + ) .build(), ..ServiceAccount::default() }; @@ -836,7 +855,9 @@ fn build_spark_role_serviceaccount( .name(binding_name) .ownerreference_from_resource(spark_app, None, Some(true)) .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(spark_app.build_recommended_labels("role-binding")) + .with_recommended_labels( + spark_app.build_recommended_labels(&spark_image.app_version_label, "role-binding"), + ) .build(), role_ref: RoleRef { api_group: ClusterRole::GROUP.to_string(), diff --git a/tests/templates/kuttl/iceberg/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/iceberg/10-deploy-spark-app.yaml.j2 index 943e63e0..ede72f0d 100644 --- a/tests/templates/kuttl/iceberg/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/iceberg/10-deploy-spark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: pyspark-iceberg spec: - version: "1.0" {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/logging/05-deploy-automatic-log-config-spark-app.yaml.j2 b/tests/templates/kuttl/logging/05-deploy-automatic-log-config-spark-app.yaml.j2 index 353f1ca6..1ea2e310 100644 --- a/tests/templates/kuttl/logging/05-deploy-automatic-log-config-spark-app.yaml.j2 +++ b/tests/templates/kuttl/logging/05-deploy-automatic-log-config-spark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: spark-automatic-log-config spec: - version: "1.0" sparkImage: {% if test_scenario['values']['spark'].find(",") > 0 %} custom: "{{ test_scenario['values']['spark'].split(',')[1] }}" diff --git a/tests/templates/kuttl/logging/06-deploy-custom-log-config-spark-app.yaml.j2 b/tests/templates/kuttl/logging/06-deploy-custom-log-config-spark-app.yaml.j2 index ac323f21..a5227b37 100644 --- a/tests/templates/kuttl/logging/06-deploy-custom-log-config-spark-app.yaml.j2 +++ b/tests/templates/kuttl/logging/06-deploy-custom-log-config-spark-app.yaml.j2 @@ -38,7 +38,6 @@ kind: SparkApplication metadata: name: spark-custom-log-config spec: - version: "1.0" sparkImage: {% if test_scenario['values']['spark'].find(",") > 0 %} custom: "{{ test_scenario['values']['spark'].split(',')[1] }}" diff --git a/tests/templates/kuttl/logging/07-deploy-automatic-log-config-pyspark-app.yaml.j2 b/tests/templates/kuttl/logging/07-deploy-automatic-log-config-pyspark-app.yaml.j2 index f87b1160..85d903cd 100644 --- a/tests/templates/kuttl/logging/07-deploy-automatic-log-config-pyspark-app.yaml.j2 +++ b/tests/templates/kuttl/logging/07-deploy-automatic-log-config-pyspark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: pyspark-automatic-log-config spec: - version: "1.0" sparkImage: {% if test_scenario['values']['spark'].find(",") > 0 %} custom: "{{ test_scenario['values']['spark'].split(',')[1] }}" diff --git a/tests/templates/kuttl/logging/08-deploy-custom-log-config-pyspark-app.yaml.j2 b/tests/templates/kuttl/logging/08-deploy-custom-log-config-pyspark-app.yaml.j2 index c3646e0a..f85328a1 100644 --- a/tests/templates/kuttl/logging/08-deploy-custom-log-config-pyspark-app.yaml.j2 +++ b/tests/templates/kuttl/logging/08-deploy-custom-log-config-pyspark-app.yaml.j2 @@ -38,7 +38,6 @@ kind: SparkApplication metadata: name: pyspark-custom-log-config spec: - version: "1.0" sparkImage: {% if test_scenario['values']['spark'].find(",") > 0 %} custom: "{{ test_scenario['values']['spark'].split(',')[1] }}" diff --git a/tests/templates/kuttl/pod_overrides/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/pod_overrides/10-deploy-spark-app.yaml.j2 index b606aa8a..cf66b309 100644 --- a/tests/templates/kuttl/pod_overrides/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/pod_overrides/10-deploy-spark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: spark-pi-s3-1 spec: - version: "1.0" sparkImage: {% if test_scenario['values']['spark'].find(",") > 0 %} custom: "{{ test_scenario['values']['spark'].split(',')[1] }}" diff --git a/tests/templates/kuttl/pyspark-ny-public-s3-image/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/pyspark-ny-public-s3-image/10-deploy-spark-app.yaml.j2 index 8c1d9a34..ce607ef2 100644 --- a/tests/templates/kuttl/pyspark-ny-public-s3-image/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/pyspark-ny-public-s3-image/10-deploy-spark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: pyspark-ny-public-s3-image spec: - version: "1.0" # everything under /jobs will be copied to /stackable/spark/jobs image: "docker.stackable.tech/stackable/ny-tlc-report:{{ test_scenario['values']['ny-tlc-report'] }}" {% if lookup('env', 'VECTOR_AGGREGATOR') %} diff --git a/tests/templates/kuttl/pyspark-ny-public-s3/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/pyspark-ny-public-s3/10-deploy-spark-app.yaml.j2 index 7cc98360..26f9a795 100644 --- a/tests/templates/kuttl/pyspark-ny-public-s3/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/pyspark-ny-public-s3/10-deploy-spark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: pyspark-ny-public-s3 spec: - version: "1.0" {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/resources/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/resources/10-deploy-spark-app.yaml.j2 index dd2acd53..23f885d2 100644 --- a/tests/templates/kuttl/resources/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/resources/10-deploy-spark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: resources-crd spec: - version: "1.0" {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/resources/12-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/resources/12-deploy-spark-app.yaml.j2 index dc48fe9e..d04aec51 100644 --- a/tests/templates/kuttl/resources/12-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/resources/12-deploy-spark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: resources-sparkconf spec: - version: "1.0" {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/resources/20-assert.yaml.j2 b/tests/templates/kuttl/resources/20-assert.yaml.j2 deleted file mode 100644 index 0fd00fea..00000000 --- a/tests/templates/kuttl/resources/20-assert.yaml.j2 +++ /dev/null @@ -1,7 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -timeout: 120 -commands: - - script: kubectl get pods -n $NAMESPACE resources-crd-exec-1 -o json | jq '.spec.containers[].env[] | select(.name == "SPARK_EXECUTOR_CORES").value' | grep -w "2" - - script: kubectl get pods -n $NAMESPACE resources-sparkconf-exec-1 -o json | jq '.spec.containers[].env[] | select(.name == "SPARK_EXECUTOR_CORES").value' | grep -w "2" diff --git a/tests/templates/kuttl/smoke/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/smoke/10-deploy-spark-app.yaml.j2 index bd26ac35..b806109c 100644 --- a/tests/templates/kuttl/smoke/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/smoke/10-deploy-spark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: spark-pi-s3-1 spec: - version: "1.0" {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/spark-examples/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/spark-examples/10-deploy-spark-app.yaml.j2 index 2bab38db..0658c426 100644 --- a/tests/templates/kuttl/spark-examples/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/spark-examples/10-deploy-spark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: spark-examples spec: - version: "1.0" {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/spark-history-server/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/spark-history-server/10-deploy-spark-app.yaml.j2 index 93a96d25..54508763 100644 --- a/tests/templates/kuttl/spark-history-server/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/spark-history-server/10-deploy-spark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: spark-pi-s3-1 spec: - version: "1.0" {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/spark-history-server/12-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/spark-history-server/12-deploy-spark-app.yaml.j2 index 286e1a56..9bc1f4fc 100644 --- a/tests/templates/kuttl/spark-history-server/12-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/spark-history-server/12-deploy-spark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: spark-pi-s3-2 spec: - version: "1.0" {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/spark-ny-public-s3/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/spark-ny-public-s3/10-deploy-spark-app.yaml.j2 index 99510674..756e13fa 100644 --- a/tests/templates/kuttl/spark-ny-public-s3/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/spark-ny-public-s3/10-deploy-spark-app.yaml.j2 @@ -12,7 +12,6 @@ kind: SparkApplication metadata: name: spark-ny-cm spec: - version: "1.0" {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/spark-pi-private-s3/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/spark-pi-private-s3/10-deploy-spark-app.yaml.j2 index 4adda1e8..953f3945 100644 --- a/tests/templates/kuttl/spark-pi-private-s3/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/spark-pi-private-s3/10-deploy-spark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: spark-pi-private-s3 spec: - version: "1.0" {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} diff --git a/tests/templates/kuttl/spark-pi-public-s3/10-deploy-spark-app.yaml.j2 b/tests/templates/kuttl/spark-pi-public-s3/10-deploy-spark-app.yaml.j2 index aa38ed4a..a5dcf506 100644 --- a/tests/templates/kuttl/spark-pi-public-s3/10-deploy-spark-app.yaml.j2 +++ b/tests/templates/kuttl/spark-pi-public-s3/10-deploy-spark-app.yaml.j2 @@ -4,7 +4,6 @@ kind: SparkApplication metadata: name: spark-pi-public-s3 spec: - version: "1.0" {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %}