From bc05d558bf02ca3df0fff0f4ae8f45a58f49e546 Mon Sep 17 00:00:00 2001 From: Anton Ippolitov Date: Fri, 17 Mar 2023 16:55:50 +0100 Subject: [PATCH 01/87] Fix signal handling for non-leader processes (#1680) --- main.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/main.go b/main.go index 2bb5496a68..db4861b2e6 100644 --- a/main.go +++ b/main.go @@ -219,7 +219,11 @@ func main() { if *enableLeaderElection { glog.Info("Waiting to be elected leader before starting application controller goroutines") - <-startCh + select { + case <-signalCh: + os.Exit(0) + case <-startCh: + } } glog.Info("Starting application controller goroutines") From e7361ba2b4dab90566f9e16ab50dad682df5266a Mon Sep 17 00:00:00 2001 From: Harry Su Date: Fri, 17 Mar 2023 08:57:12 -0700 Subject: [PATCH 02/87] add lifecycle to executor (#1674) --- docs/user-guide.md | 2 +- .../sparkoperator.k8s.io/v1beta2/types.go | 3 ++ pkg/webhook/patch.go | 9 ++++-- pkg/webhook/patch_test.go | 31 +++++++++++++++++++ 4 files changed, 42 insertions(+), 3 deletions(-) diff --git a/docs/user-guide.md b/docs/user-guide.md index 3cc65efd9d..1aeeb1248e 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -614,7 +614,7 @@ spec: ``` ### Using Container LifeCycle Hooks -A Spark Application can optionally specify a [Container Lifecycle Hooks](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks) for a driver. It is useful in cases where you need a PreStop or PostStart hooks to driver. +A Spark Application can optionally specify a [Container Lifecycle Hooks](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks) for a driver. It is useful in cases where you need a PreStop or PostStart hooks to driver and executor. ```yaml spec: diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go index bd4aee27e4..2c89fe9482 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go @@ -580,6 +580,9 @@ type ExecutorSpec struct { // GC settings or other logging. // +optional JavaOptions *string `json:"javaOptions,omitempty"` + // Lifecycle for running preStop or postStart commands + // +optional + Lifecycle *apiv1.Lifecycle `json:"lifecycle,omitempty"` // DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. // Maps to `spark.kubernetes.executor.deleteOnTermination` that is available since Spark 3.0. // +optional diff --git a/pkg/webhook/patch.go b/pkg/webhook/patch.go index 3034ebd4dd..bc9a748172 100644 --- a/pkg/webhook/patch.go +++ b/pkg/webhook/patch.go @@ -765,8 +765,13 @@ func addTerminationGracePeriodSeconds(pod *corev1.Pod, app *v1beta2.SparkApplica func addPodLifeCycleConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { var lifeCycle *corev1.Lifecycle + var containerName string if util.IsDriverPod(pod) { lifeCycle = app.Spec.Driver.Lifecycle + containerName = config.SparkDriverContainerName + } else if util.IsExecutorPod(pod) { + lifeCycle = app.Spec.Executor.Lifecycle + containerName = config.SparkExecutorContainerName } if lifeCycle == nil { return nil @@ -775,12 +780,12 @@ func addPodLifeCycleConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) *patc i := 0 // Find the driver container in the pod. for ; i < len(pod.Spec.Containers); i++ { - if pod.Spec.Containers[i].Name == config.SparkDriverContainerName { + if pod.Spec.Containers[i].Name == containerName { break } } if i == len(pod.Spec.Containers) { - glog.Warningf("Spark driver container not found in pod %s", pod.Name) + glog.Warningf("Spark container %s not found in pod %s", containerName, pod.Name) return nil } diff --git a/pkg/webhook/patch_test.go b/pkg/webhook/patch_test.go index 9efa740fd9..4b66b62a7e 100644 --- a/pkg/webhook/patch_test.go +++ b/pkg/webhook/patch_test.go @@ -1752,6 +1752,9 @@ func TestPatchSparkPod_Lifecycle(t *testing.T) { preStopTest := &corev1.ExecAction{ Command: []string{"/bin/sh", "-c", "echo Hello from the pre stop handler > /usr/share/message"}, } + postStartTest := &corev1.ExecAction{ + Command: []string{"/bin/sh", "-c", "echo Hello from the post start handler > /usr/share/message"}, + } app := &v1beta2.SparkApplication{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-test", @@ -1763,6 +1766,11 @@ func TestPatchSparkPod_Lifecycle(t *testing.T) { PreStop: &corev1.LifecycleHandler{Exec: preStopTest}, }, }, + Executor: v1beta2.ExecutorSpec{ + Lifecycle: &corev1.Lifecycle{ + PostStart: &corev1.LifecycleHandler{Exec: postStartTest}, + }, + }, }, } @@ -1784,11 +1792,34 @@ func TestPatchSparkPod_Lifecycle(t *testing.T) { }, } + executorPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "spark-executor", + Labels: map[string]string{ + config.SparkRoleLabel: config.SparkExecutorRole, + config.LaunchedBySparkOperatorLabel: "true", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: config.SparkExecutorContainerName, + Image: "spark-executor:latest", + }, + }, + }, + } + modifiedDriverPod, err := getModifiedPod(driverPod, app) if err != nil { t.Fatal(err) } + modifiedExecutorPod, err := getModifiedPod(executorPod, app) + if err != nil { + t.Fatal(err) + } assert.Equal(t, preStopTest, modifiedDriverPod.Spec.Containers[0].Lifecycle.PreStop.Exec) + assert.Equal(t, postStartTest, modifiedExecutorPod.Spec.Containers[0].Lifecycle.PostStart.Exec) } func getModifiedPod(pod *corev1.Pod, app *v1beta2.SparkApplication) (*corev1.Pod, error) { From fff3ba6219765f2f32e2698f4624916991867956 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Claudino?= <102805681+claudino-kognita@users.noreply.github.com> Date: Fri, 17 Mar 2023 12:59:08 -0300 Subject: [PATCH 03/87] Add Kognita to "Who is using" (#1637) --- docs/who-is-using.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/who-is-using.md b/docs/who-is-using.md index 2a6167e343..76a609d476 100644 --- a/docs/who-is-using.md +++ b/docs/who-is-using.md @@ -39,3 +39,4 @@ | [DiDi](https://www.didiglobal.com) | @Run-Lin | Evaluation | Data Infrastructure | | [DeepCure](https://www.deepcure.ai) | @mschroering | Production | Spark / ML | | [C2FO](https://www.c2fo.com/) | @vanhoale | Production | Data Platform / Data Infrastructure | +| [Kognita](https://kognita.com.br/) | @andreclaudino | Production | MLOps, Data Platform / Data Infrastructure, ML/AI | From 3dc776580112638069cfe8b2a9f3423b2c74bc9c Mon Sep 17 00:00:00 2001 From: ArshiA Akhavan <45825003+ArshiAAkhavan@users.noreply.github.com> Date: Fri, 17 Mar 2023 19:30:36 +0330 Subject: [PATCH 04/87] Add support for `ephemeral.volumeClaimTemplate` in helm chart CRDs (#1661) --- ...tor.k8s.io_scheduledsparkapplications.yaml | 23 +++++++++++++++++++ ...parkoperator.k8s.io_sparkapplications.yaml | 23 +++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 3ee45fb2a4..1d23c4d8f9 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -3936,6 +3936,29 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + spec: + properties: + accessModes: + items: + type: string + type: array + resources: + properties: + requests: + properties: + storage: + type: string + type: object + type: object + storageClassName: + type: string + type: object + type: object + type: object fc: properties: fsType: diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml index fbb52e2aa0..5ca31001cb 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -3924,6 +3924,29 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + spec: + properties: + accessModes: + items: + type: string + type: array + resources: + properties: + requests: + properties: + storage: + type: string + type: object + type: object + storageClassName: + type: string + type: object + type: object + type: object fc: properties: fsType: From e5c45e293586286050c4a2b17b102bf754b47372 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Bauer?= Date: Fri, 17 Mar 2023 17:11:22 +0100 Subject: [PATCH 05/87] add dependabot (#1629) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: André Bauer --- .github/dependabot.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 .github/dependabot.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..b6b6a6ccc6 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,16 @@ +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "weekly" + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" From a733749052766938df6a06297334336589084399 Mon Sep 17 00:00:00 2001 From: Zhiming Date: Sat, 18 Mar 2023 00:14:20 +0800 Subject: [PATCH 06/87] fix tolerations block in wrong segment for webhook jobs (#1633) --- charts/spark-operator-chart/templates/webhook-cleanup-job.yaml | 2 +- charts/spark-operator-chart/templates/webhook-init-job.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml b/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml index 0ab1350d00..dadd1df17b 100644 --- a/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml +++ b/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml @@ -45,7 +45,6 @@ spec: -H \"Content-Type: application/json\" \ --data \"{\\\"kind\\\":\\\"DeleteOptions\\\",\\\"apiVersion\\\":\\\"batch/v1\\\",\\\"propagationPolicy\\\":\\\"Foreground\\\"}\" \ https://kubernetes.default.svc/apis/batch/v1/namespaces/{{ .Release.Namespace }}/jobs/{{ include "spark-operator.fullname" . }}-webhook-init" -{{ end }} {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} @@ -54,3 +53,4 @@ spec: nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} +{{ end }} diff --git a/charts/spark-operator-chart/templates/webhook-init-job.yaml b/charts/spark-operator-chart/templates/webhook-init-job.yaml index 10fab8f5e2..e944839095 100644 --- a/charts/spark-operator-chart/templates/webhook-init-job.yaml +++ b/charts/spark-operator-chart/templates/webhook-init-job.yaml @@ -35,7 +35,6 @@ spec: "-r", "{{ include "spark-operator.fullname" . }}-webhook-certs", "-p" ] -{{ end }} {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} @@ -44,3 +43,4 @@ spec: nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} +{{ end }} From bd2eda216b79733c992861f257b28dad36bb4c62 Mon Sep 17 00:00:00 2001 From: ordukhanian Date: Sat, 18 Mar 2023 00:37:49 +0400 Subject: [PATCH 07/87] Added permissions for leader election (#1647) Added new RBAC permissions needed by default for leader election for the coordination/v1 API. Required after upgrade to golang:1.19.2. In k8s.io/client-go@v0.25.3/tools/leaderelection/resourcelock/interface.go:166 `configMapsResourceLock` was removed and should be replaced by `ConfigMapsLeasesResourceLock`. --- charts/spark-operator-chart/Chart.yaml | 2 +- .../spark-operator-chart/templates/rbac.yaml | 20 ++++++++++++++++++- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 4fad2b551c..da9a2e4220 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.1.26 +version: 1.1.27 appVersion: v1beta2-1.3.8-3.1.1 keywords: - spark diff --git a/charts/spark-operator-chart/templates/rbac.yaml b/charts/spark-operator-chart/templates/rbac.yaml index 56ddc1e081..6f5d97c0d6 100644 --- a/charts/spark-operator-chart/templates/rbac.yaml +++ b/charts/spark-operator-chart/templates/rbac.yaml @@ -105,7 +105,25 @@ rules: verbs: - delete {{- end }} - + {{- if gt (int .Values.replicaCount) 1 }} +- apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - {{ .Values.leaderElection.lockName }} + verbs: + - get + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + {{- end }} --- apiVersion: rbac.authorization.k8s.io/v1 From 72481c02ebfca1409beba44e07c45a4474626d7d Mon Sep 17 00:00:00 2001 From: Koen van Zuijlen <8818390+kvanzuijlen@users.noreply.github.com> Date: Thu, 23 Mar 2023 19:45:09 +0100 Subject: [PATCH 08/87] Extra helm chart labels (#1669) * Added support for setting extra commonLabels * Added support for podLabels on cleanup and init job * Fixed templating errors * Added documentation --- charts/spark-operator-chart/README.md | 3 +++ charts/spark-operator-chart/templates/_helpers.tpl | 3 +++ .../templates/webhook-cleanup-job.yaml | 4 ++++ .../spark-operator-chart/templates/webhook-init-job.yaml | 4 ++++ charts/spark-operator-chart/values.yaml | 7 +++++++ 5 files changed, 21 insertions(+) diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index c00d9aaf4c..2314c82be6 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -81,6 +81,7 @@ All charts linted successfully |-----|------|---------|-------------| | affinity | object | `{}` | Affinity for pod assignment | | batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application | +| commonLabels | object | `{}` | Common labels to add to the resources | | controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage | | fullnameOverride | string | `""` | String to override release name | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | @@ -126,8 +127,10 @@ All charts linted successfully | tolerations | list | `[]` | List of node taints to tolerate | | uiService.enable | bool | `true` | Enable UI service creation for Spark application | | webhook.cleanupAnnotations | object | `{"helm.sh/hook":"pre-delete, pre-upgrade","helm.sh/hook-delete-policy":"hook-succeeded"}` | The annotations applied to the cleanup job, required for helm lifecycle hooks | +| webhook.cleanupPodLabels | object | `{}` | The podLabels applied to the pod of the cleanup job | | webhook.enable | bool | `false` | Enable webhook server | | webhook.initAnnotations | object | `{"helm.sh/hook":"pre-install, pre-upgrade","helm.sh/hook-weight":"50"}` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | +| webhook.initPodLabels | object | `{}` | The podLabels applied to the pod of the init job | | webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | | webhook.port | int | `8080` | Webhook service port | | webhook.timeout | int | `30` | | diff --git a/charts/spark-operator-chart/templates/_helpers.tpl b/charts/spark-operator-chart/templates/_helpers.tpl index 2954e89690..8e884ee9db 100644 --- a/charts/spark-operator-chart/templates/_helpers.tpl +++ b/charts/spark-operator-chart/templates/_helpers.tpl @@ -37,6 +37,9 @@ Common labels {{- define "spark-operator.labels" -}} helm.sh/chart: {{ include "spark-operator.chart" . }} {{ include "spark-operator.selectorLabels" . }} +{{- if .Values.commonLabels }} +{{ toYaml .Values.commonLabels }} +{{- end }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} diff --git a/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml b/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml index dadd1df17b..a6f2c1ea96 100644 --- a/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml +++ b/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml @@ -15,6 +15,10 @@ spec: annotations: "sidecar.istio.io/inject": "false" {{- end }} + {{- if .Values.webhook.cleanupPodLabels }} + labels: + {{- toYaml .Values.webhook.cleanupPodLabels | nindent 8 }} + {{- end }} spec: serviceAccountName: {{ include "spark-operator.serviceAccountName" . }} restartPolicy: OnFailure diff --git a/charts/spark-operator-chart/templates/webhook-init-job.yaml b/charts/spark-operator-chart/templates/webhook-init-job.yaml index e944839095..009879d997 100644 --- a/charts/spark-operator-chart/templates/webhook-init-job.yaml +++ b/charts/spark-operator-chart/templates/webhook-init-job.yaml @@ -15,6 +15,10 @@ spec: annotations: "sidecar.istio.io/inject": "false" {{- end }} + {{- if .Values.webhook.initPodLabels }} + labels: + {{- toYaml .Values.webhook.initPodLabels | nindent 8 }} + {{- end }} spec: serviceAccountName: {{ include "spark-operator.serviceAccountName" . }} restartPolicy: OnFailure diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index 891b45f6bb..5007b7ea68 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -2,6 +2,9 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. +# -- Common labels to add to the resources +commonLabels: {} + # replicaCount -- Desired number of pods, leaderElection will be enabled # if this is greater than 1 replicaCount: 1 @@ -92,11 +95,15 @@ webhook: initAnnotations: "helm.sh/hook": pre-install, pre-upgrade "helm.sh/hook-weight": "50" + # -- The podLabels applied to the pod of the init job + initPodLabels: {} # -- The annotations applied to the cleanup job, required for helm lifecycle hooks cleanupAnnotations: "helm.sh/hook": pre-delete, pre-upgrade "helm.sh/hook-delete-policy": hook-succeeded # -- Webhook Timeout in seconds + # -- The podLabels applied to the pod of the cleanup job + cleanupPodLabels: {} timeout: 30 metrics: From 5f2efd4ff97e7c0bfdb726a066118d3401576730 Mon Sep 17 00:00:00 2001 From: Ashish Pushp Date: Tue, 4 Apr 2023 23:29:46 +0530 Subject: [PATCH 09/87] Molex started using spark K8 operator. (#1714) Used to run Analytics Jobs and ETL pipelines along with AI/ML jobs. --- docs/who-is-using.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/who-is-using.md b/docs/who-is-using.md index 76a609d476..f0c5759a85 100644 --- a/docs/who-is-using.md +++ b/docs/who-is-using.md @@ -40,3 +40,4 @@ | [DeepCure](https://www.deepcure.ai) | @mschroering | Production | Spark / ML | | [C2FO](https://www.c2fo.com/) | @vanhoale | Production | Data Platform / Data Infrastructure | | [Kognita](https://kognita.com.br/) | @andreclaudino | Production | MLOps, Data Platform / Data Infrastructure, ML/AI | +| [Molex](https://www.molex.com/) | @AshishPushpSingh | Evaluation/Production | Data Platform | From 5b7a6e3ae8a4e239a76e49bdac0b726c33871ba9 Mon Sep 17 00:00:00 2001 From: Mat Schaffer <115565899+matschaffer-roblox@users.noreply.github.com> Date: Thu, 26 Oct 2023 09:22:25 -0700 Subject: [PATCH 10/87] Roblox who-is (#1784) --- docs/who-is-using.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/who-is-using.md b/docs/who-is-using.md index f0c5759a85..c9c2fd49e6 100644 --- a/docs/who-is-using.md +++ b/docs/who-is-using.md @@ -41,3 +41,4 @@ | [C2FO](https://www.c2fo.com/) | @vanhoale | Production | Data Platform / Data Infrastructure | | [Kognita](https://kognita.com.br/) | @andreclaudino | Production | MLOps, Data Platform / Data Infrastructure, ML/AI | | [Molex](https://www.molex.com/) | @AshishPushpSingh | Evaluation/Production | Data Platform | +| [Roblox](https://www.roblox.com/) | @matschaffer-roblox | Evaluation | Data Infrastructure | From d596205c5b2fe6c7dd366c81f5a5fdf3c6b03e35 Mon Sep 17 00:00:00 2001 From: Zhao Yuanjie Date: Fri, 27 Oct 2023 00:22:58 +0800 Subject: [PATCH 11/87] Optional sidecars for operator pod (#1754) * add sidecars for operator * bumping chart version --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/templates/deployment.yaml | 3 +++ charts/spark-operator-chart/values.yaml | 3 +++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index da9a2e4220..437804b35b 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.1.27 +version: 1.1.28 appVersion: v1beta2-1.3.8-3.1.1 keywords: - spark diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml index 127a2dd5e2..96c37ec529 100644 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ b/charts/spark-operator-chart/templates/deployment.yaml @@ -98,6 +98,9 @@ spec: {{- with .Values.volumeMounts }} {{- toYaml . | nindent 10 }} {{- end }} + {{- with .Values.sidecars }} + {{- toYaml . | nindent 6 }} + {{- end }} {{- if or .Values.webhook.enable (ne (len .Values.volumes) 0 ) }} volumes: {{- end }} diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index 5007b7ea68..e850d7fdaf 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -77,6 +77,9 @@ podSecurityContext: {} # securityContext -- Operator container security context securityContext: {} +# sidecars -- Sidecar containers +sidecars: [] + # volumes - Operator volumes volumes: [] From 0ad755c1eb87c2d87fd1914e8dae976dfb358851 Mon Sep 17 00:00:00 2001 From: Mat Schaffer <115565899+matschaffer-roblox@users.noreply.github.com> Date: Thu, 26 Oct 2023 09:24:56 -0700 Subject: [PATCH 12/87] Expand ingress docs a bit (#1806) This notes the controller intended to be used with the operator-managed ingress resources. When setting up my own cluster I also tried https://docs.nginx.com/nginx-ingress-controller/ and https://kubernetes-sigs.github.io/aws-load-balancer-controller/ but the path format generated by the operator won't work with those. --- docs/quick-start-guide.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/quick-start-guide.md b/docs/quick-start-guide.md index 22e8cbb34a..8913666909 100644 --- a/docs/quick-start-guide.md +++ b/docs/quick-start-guide.md @@ -238,10 +238,20 @@ and deleting the pods outside the operator might lead to incorrect metric values ## Driver UI Access and Ingress The operator, by default, makes the Spark UI accessible by creating a service of type `ClusterIP` which exposes the UI. This is only accessible from within the cluster. + The operator also supports creating an optional Ingress for the UI. This can be turned on by setting the `ingress-url-format` command-line flag. The `ingress-url-format` should be a template like `{{$appName}}.{ingress_suffix}/{{$appNamespace}}/{{$appName}}`. The `{ingress_suffix}` should be replaced by the user to indicate the cluster's ingress url and the operator will replace the `{{$appName}}` & `{{$appNamespace}}` with the appropriate value. Please note that Ingress support requires that cluster's ingress url routing is correctly set-up. For e.g. if the `ingress-url-format` is `{{$appName}}.ingress.cluster.com`, it requires that anything `*ingress.cluster.com` should be routed to the ingress-controller on the K8s cluster. The operator also sets both `WebUIAddress` which is accessible from within the cluster as well as `WebUIIngressAddress` as part of the `DriverInfo` field of the `SparkApplication`. +The operator generates ingress resources intended for use with the [Ingress NGINX Controller](https://kubernetes.github.io/ingress-nginx/). Include this in your application spec for the controller to ensure it recognizes the ingress and provides appropriate routes to your Spark UI. + +```yaml +spec: + sparkUIOptions: + ingressAnnotations: + kubernetes.io/ingress.class: nginx +``` + ## About the Mutating Admission Webhook The Kubernetes Operator for Apache Spark comes with an optional mutating admission webhook for customizing Spark driver and executor pods based on the specification in `SparkApplication` objects, e.g., mounting user-specified ConfigMaps and volumes, and setting pod affinity/anti-affinity, and adding tolerations. From 1b842a8ab27bcee00de69a5ce31425ad49cb6036 Mon Sep 17 00:00:00 2001 From: Mat Schaffer <115565899+matschaffer-roblox@users.noreply.github.com> Date: Thu, 26 Oct 2023 09:25:11 -0700 Subject: [PATCH 13/87] Add envFrom to operator deployment (#1785) * Add envFrom to operator deployment Useful to when env vars are used for auth when downloading `spark.archives` from S3. * Fix over-indenting --- charts/spark-operator-chart/templates/deployment.yaml | 2 ++ charts/spark-operator-chart/values.yaml | 3 +++ 2 files changed, 5 insertions(+) diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml index 96c37ec529..9deacae43f 100644 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ b/charts/spark-operator-chart/templates/deployment.yaml @@ -47,6 +47,8 @@ spec: - name: {{ .Chart.Name }} image: {{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }} imagePullPolicy: {{ .Values.image.pullPolicy }} + envFrom: + {{- toYaml .Values.envFrom | nindent 10 }} securityContext: {{- toYaml .Values.securityContext | nindent 10 }} {{- if .Values.metrics.enable }} diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index e850d7fdaf..c7d672f60c 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -71,6 +71,9 @@ ingressUrlFormat: "" # -- Set higher levels for more verbose logging logLevel: 2 +# -- Pod environment variable sources +envFrom: [] + # podSecurityContext -- Pod security context podSecurityContext: {} From 463ed1ec7fd3d0b55df2486542ce4d48577dd24a Mon Sep 17 00:00:00 2001 From: Kohei Watanabe Date: Fri, 27 Oct 2023 01:25:55 +0900 Subject: [PATCH 14/87] Improve getMasterURL() to add [] to IPv6 if needed (#1825) Resolves #1344 Spark 3.4 supports IPv6: - https://github.com/apache/spark/pull/36868 So I want to make the operator support IPv6. I can confirm that this can submit the spark-job in IPv6-only environment. Although it is necessary to add the following environment variables to the operator ```yaml apiVersion: apps/v1 kind: Deployment metadata: name: spark-on-k8s-spark-operator spec: template: spec: containers: - name: spark-operator env: - name: _JAVA_OPTIONS value: "-Djava.net.preferIPv6Addresses=true" - name: KUBERNETES_DISABLE_HOSTNAME_VERIFICATION value: "true" ``` --- pkg/controller/sparkapplication/submission.go | 4 ++ .../sparkapplication/submission_test.go | 51 +++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/pkg/controller/sparkapplication/submission.go b/pkg/controller/sparkapplication/submission.go index b8e4e1b8de..98bb882e05 100644 --- a/pkg/controller/sparkapplication/submission.go +++ b/pkg/controller/sparkapplication/submission.go @@ -207,6 +207,10 @@ func getMasterURL() (string, error) { if kubernetesServicePort == "" { return "", fmt.Errorf("environment variable %s is not found", kubernetesServicePortEnvVar) } + // check if the host is IPv6 address + if strings.Contains(kubernetesServiceHost, ":") && !strings.HasPrefix(kubernetesServiceHost, "[") { + return fmt.Sprintf("k8s://https://[%s]:%s", kubernetesServiceHost, kubernetesServicePort), nil + } return fmt.Sprintf("k8s://https://%s:%s", kubernetesServiceHost, kubernetesServicePort), nil } diff --git a/pkg/controller/sparkapplication/submission_test.go b/pkg/controller/sparkapplication/submission_test.go index 9f32bbb15f..20e247a01b 100644 --- a/pkg/controller/sparkapplication/submission_test.go +++ b/pkg/controller/sparkapplication/submission_test.go @@ -583,3 +583,54 @@ func TestProxyUserArg(t *testing.T) { assert.Equal(t, "--proxy-user", args[4]) assert.Equal(t, "foo", args[5]) } + +func Test_getMasterURL(t *testing.T) { + setEnv := func(host string, port string) { + if err := os.Setenv(kubernetesServiceHostEnvVar, host); err != nil { + t.Fatal(err) + } + if err := os.Setenv(kubernetesServicePortEnvVar, port); err != nil { + t.Fatal(err) + } + } + + tests := []struct { + name string + host string + port string + want string + wantErr assert.ErrorAssertionFunc + }{ + { + name: "should return a valid master url when IPv4 address is used", + host: "localhost", + port: "6443", + want: "k8s://https://localhost:6443", + wantErr: assert.NoError, + }, + { + name: "should return a valid master url when IPv6 address is used", + host: "::1", + port: "6443", + want: "k8s://https://[::1]:6443", + wantErr: assert.NoError, + }, + { + name: "should throw an error when the host is empty", + host: "", + port: "6443", + want: "", + wantErr: assert.Error, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + setEnv(tt.host, tt.port) + got, err := getMasterURL() + if !tt.wantErr(t, err, fmt.Sprintf("getMasterURL()")) { + return + } + assert.Equalf(t, tt.want, got, "getMasterURL()") + }) + } +} From 223ec8d3177ad740d6c094cf62b418916d14ff4b Mon Sep 17 00:00:00 2001 From: Alexander Ryabov <73594+haron@users.noreply.github.com> Date: Thu, 26 Oct 2023 20:26:13 +0400 Subject: [PATCH 15/87] Link to helm upgrade docs fixed (#1783) --- docs/quick-start-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/quick-start-guide.md b/docs/quick-start-guide.md index 8913666909..b473e3533e 100644 --- a/docs/quick-start-guide.md +++ b/docs/quick-start-guide.md @@ -162,7 +162,7 @@ To upgrade the the operator, e.g., to use a newer version container image with a $ helm upgrade --set image.repository=org/image --set image.tag=newTag ``` -Refer to the Helm [documentation](https://docs.helm.sh/helm/#helm-upgrade) for more details on `helm upgrade`. +Refer to the Helm [documentation](https://helm.sh/docs/helm/helm_upgrade/) for more details on `helm upgrade`. ## About the Spark Job Namespace From 93a9aa88df6696008036167a018ea2feb8d849a3 Mon Sep 17 00:00:00 2001 From: JunaidChaudry <49313431+JunaidChaudry@users.noreply.github.com> Date: Thu, 26 Oct 2023 12:29:29 -0400 Subject: [PATCH 16/87] Updating webhook docs to also mention eks (#1763) --- docs/quick-start-guide.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/quick-start-guide.md b/docs/quick-start-guide.md index b473e3533e..01dd103576 100644 --- a/docs/quick-start-guide.md +++ b/docs/quick-start-guide.md @@ -18,7 +18,7 @@ For a more detailed guide on how to use, compose, and work with `SparkApplicatio - [Work Queue Metrics](#work-queue-metrics) - [Driver UI Access and Ingress](#driver-ui-access-and-ingress) - [About the Mutating Admission Webhook](#about-the-mutating-admission-webhook) - - [Mutating Admission Webhooks on a private GKE cluster](#mutating-admission-webhooks-on-a-private-gke-cluster) + - [Mutating Admission Webhooks on a private GKE or EKS cluster](#mutating-admission-webhooks-on-a-private-gke-or-eks-cluster) ## Installation @@ -267,9 +267,9 @@ $ kubectl apply -f manifest/spark-operator-with-webhook.yaml This will create a Deployment named `sparkoperator` and a Service named `spark-webhook` for the webhook in namespace `spark-operator`. -### Mutating Admission Webhooks on a private GKE cluster +### Mutating Admission Webhooks on a private GKE or EKS cluster -If you are deploying the operator on a GKE cluster with the [Private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters) setting enabled, and you wish to deploy the cluster with the [Mutating Admission Webhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), then make sure to change the `webhookPort` to `443`. Alternatively you can choose to allow connections to the default port (8080). +If you are deploying the operator on a GKE cluster with the [Private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters) setting enabled, or on an enterprise AWS EKS cluster and you wish to deploy the cluster with the [Mutating Admission Webhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), then make sure to change the `webhookPort` to `443`. Alternatively you can choose to allow connections to the default port (8080). > By default, firewall rules restrict your cluster master to only initiate TCP connections to your nodes on ports 443 (HTTPS) and 10250 (kubelet). For some Kubernetes features, you might need to add firewall rules to allow access on additional ports. For example, in Kubernetes 1.9 and older, kubectl top accesses heapster, which needs a firewall rule to allow TCP connections on port 8080. To grant such access, you can add firewall rules. [From the docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules) From 86d71c828bd05fb0486f4190efacf839771eba16 Mon Sep 17 00:00:00 2001 From: ordukhanian Date: Thu, 26 Oct 2023 20:30:27 +0400 Subject: [PATCH 17/87] * Added support for `seccompProfile` in Spark application CRD. It is necessary for Kubernetes Pod Security Standards Restricted profile. (#1768) https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted * Fixed pre-commit jobs. `build-helm-chart` and `integration-test` were failing with: `Run manusa/actions-setup-minikube@v2.4.2 Error: Unsupported OS, action only works in Ubuntu 18 or 20` --- .github/workflows/main.yaml | 4 +- ...tor.k8s.io_scheduledsparkapplications.yaml | 30 +++++++++++ ...parkoperator.k8s.io_sparkapplications.yaml | 30 +++++++++++ docs/api-docs.md | 14 +++++ ...tor.k8s.io_scheduledsparkapplications.yaml | 53 +++++++++++++++++++ ...parkoperator.k8s.io_sparkapplications.yaml | 53 +++++++++++++++++++ 6 files changed, 182 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index c896e0a82a..9f2afd4f30 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -98,7 +98,7 @@ jobs: done build-helm-chart: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - name: Checkout source code uses: actions/checkout@v2 @@ -153,7 +153,7 @@ jobs: ct install integration-test: - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 steps: - name: Checkout source code uses: actions/checkout@v2 diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 1d23c4d8f9..20e1e00126 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -919,6 +919,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -1272,6 +1277,11 @@ spec: type: array securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -1704,6 +1714,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -2731,6 +2746,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -2980,6 +3000,11 @@ spec: type: array securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -3408,6 +3433,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml index 5ca31001cb..63e70b276e 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -905,6 +905,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -1258,6 +1263,11 @@ spec: type: array securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -1690,6 +1700,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -2717,6 +2732,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -2966,6 +2986,11 @@ spec: type: array securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -3394,6 +3419,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: diff --git a/docs/api-docs.md b/docs/api-docs.md index 5889e415ed..0403b6e587 100644 --- a/docs/api-docs.md +++ b/docs/api-docs.md @@ -1293,6 +1293,20 @@ GC settings or other logging.

+lifecycle
+ + +Kubernetes core/v1.Lifecycle + + + + +(Optional) +

Lifecycle for running preStop or postStart commands

+ + + + deleteOnTermination
bool diff --git a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 3ee45fb2a4..20e1e00126 100644 --- a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -919,6 +919,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -1272,6 +1277,11 @@ spec: type: array securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -1704,6 +1714,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -2731,6 +2746,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -2980,6 +3000,11 @@ spec: type: array securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -3408,6 +3433,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -3936,6 +3966,29 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + spec: + properties: + accessModes: + items: + type: string + type: array + resources: + properties: + requests: + properties: + storage: + type: string + type: object + type: object + storageClassName: + type: string + type: object + type: object + type: object fc: properties: fsType: diff --git a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml index fbb52e2aa0..63e70b276e 100644 --- a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -905,6 +905,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -1258,6 +1263,11 @@ spec: type: array securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -1690,6 +1700,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -2717,6 +2732,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -2966,6 +2986,11 @@ spec: type: array securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -3394,6 +3419,11 @@ spec: type: object securityContext: properties: + seccompProfile: + type: object + properties: + type: + type: string allowPrivilegeEscalation: type: boolean capabilities: @@ -3924,6 +3954,29 @@ spec: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object + ephemeral: + properties: + volumeClaimTemplate: + properties: + spec: + properties: + accessModes: + items: + type: string + type: array + resources: + properties: + requests: + properties: + storage: + type: string + type: object + type: object + storageClassName: + type: string + type: object + type: object + type: object fc: properties: fsType: From f21a4e51e791562becddea32fb8b1f0da8a6129d Mon Sep 17 00:00:00 2001 From: Xuefeng Chen Date: Thu, 26 Oct 2023 09:31:03 -0700 Subject: [PATCH 18/87] allowing optional annotation on rbac (#1770) --- charts/spark-operator-chart/README.md | 1 + charts/spark-operator-chart/templates/rbac.yaml | 3 +++ charts/spark-operator-chart/values.yaml | 2 ++ 3 files changed, 6 insertions(+) diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 2314c82be6..50c7cd8406 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -112,6 +112,7 @@ All charts linted successfully | rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | | rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | | rbac.createRole | bool | `true` | Create and use RBAC `Role` resources | +| rbac.annotations | object | `{}` | Optional annotations for the spark rbac | | replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 | | resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | | resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | diff --git a/charts/spark-operator-chart/templates/rbac.yaml b/charts/spark-operator-chart/templates/rbac.yaml index 6f5d97c0d6..c78a73df94 100644 --- a/charts/spark-operator-chart/templates/rbac.yaml +++ b/charts/spark-operator-chart/templates/rbac.yaml @@ -7,6 +7,9 @@ metadata: "helm.sh/hook": pre-install, pre-upgrade "helm.sh/hook-delete-policy": hook-failed, before-hook-creation "helm.sh/hook-weight": "-10" +{{- with .Values.rbac.annotations }} +{{ toYaml . | indent 4 }} +{{- end }} labels: {{- include "spark-operator.labels" . | nindent 4 }} rules: diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index c7d672f60c..3436a8f758 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -33,6 +33,8 @@ rbac: createRole: true # -- Create and use RBAC `ClusterRole` resources createClusterRole: true + # -- Optional annotations for rbac + annotations: {} serviceAccounts: spark: From 678d313650c1ae68376e1a46d2c4af6f6933ead9 Mon Sep 17 00:00:00 2001 From: Jose Soto Date: Thu, 26 Oct 2023 13:31:55 -0300 Subject: [PATCH 19/87] Add qualytics to who is using (#1736) Co-authored-by: Yinan Li --- docs/who-is-using.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/who-is-using.md b/docs/who-is-using.md index c9c2fd49e6..4f5e41f0da 100644 --- a/docs/who-is-using.md +++ b/docs/who-is-using.md @@ -41,4 +41,5 @@ | [C2FO](https://www.c2fo.com/) | @vanhoale | Production | Data Platform / Data Infrastructure | | [Kognita](https://kognita.com.br/) | @andreclaudino | Production | MLOps, Data Platform / Data Infrastructure, ML/AI | | [Molex](https://www.molex.com/) | @AshishPushpSingh | Evaluation/Production | Data Platform | +| [Qualytics](https://www.qualytics.co/) | @josecsotomorales | Production | Data Quality Platform | | [Roblox](https://www.roblox.com/) | @matschaffer-roblox | Evaluation | Data Infrastructure | From e1468489592076a558d86cd0d92eda65cb6cc484 Mon Sep 17 00:00:00 2001 From: Volodymyr Kot Date: Thu, 26 Oct 2023 17:33:58 +0100 Subject: [PATCH 20/87] Fix docs for Volcano integration (#1719) Co-authored-by: Volodymyr Kot --- docs/volcano-integration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/volcano-integration.md b/docs/volcano-integration.md index 30cd91f8da..ecb325d3b5 100644 --- a/docs/volcano-integration.md +++ b/docs/volcano-integration.md @@ -16,7 +16,7 @@ same environment, please refer [Quick Start Guide](https://github.com/volcano-sh Within the help of Helm chart, Kubernetes Operator for Apache Spark with Volcano can be easily installed with the command below: ```bash $ helm repo add spark-operator https://googlecloudplatform.github.io/spark-on-k8s-operator -$ helm install my-release spark-operator/spark-operator --namespace spark-operator --set enableBatchScheduler=true --set enableWebhook=true +$ helm install my-release spark-operator/spark-operator --namespace spark-operator --set batchScheduler.enable=true --set webhook.enable=true ``` # Run Spark Application with Volcano scheduler From cd78251daf70d4c257c9c099bb81098193a7323f Mon Sep 17 00:00:00 2001 From: Wilton Rodrigues Date: Fri, 27 Oct 2023 16:17:38 -0300 Subject: [PATCH 21/87] Handle invalid API resources in discovery (#1758) --- pkg/util/capabilities.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/util/capabilities.go b/pkg/util/capabilities.go index 27b7f712b0..5040da6e96 100644 --- a/pkg/util/capabilities.go +++ b/pkg/util/capabilities.go @@ -19,6 +19,8 @@ package util import ( "strings" + "github.com/golang/glog" + "k8s.io/client-go/discovery" "k8s.io/client-go/kubernetes" ) @@ -44,7 +46,11 @@ func getPreferredAvailableAPIs(client kubernetes.Interface, kind string) (Capabi discoveryclient := client.Discovery() lists, err := discoveryclient.ServerPreferredResources() if err != nil { - return nil, err + if discovery.IsGroupDiscoveryFailedError(err) { + glog.Infof("There is an orphaned API service. Server reports: %s", err) + } else { + return nil, err + } } caps := Capabilities{} From b49b1b48d8292940216c0784b7c23c2754cc3856 Mon Sep 17 00:00:00 2001 From: Jacob Salway Date: Fri, 10 Nov 2023 07:34:20 +1100 Subject: [PATCH 22/87] Add Rokt to who-is-using.md (#1867) --- docs/who-is-using.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/who-is-using.md b/docs/who-is-using.md index 4f5e41f0da..ca953f8135 100644 --- a/docs/who-is-using.md +++ b/docs/who-is-using.md @@ -43,3 +43,4 @@ | [Molex](https://www.molex.com/) | @AshishPushpSingh | Evaluation/Production | Data Platform | | [Qualytics](https://www.qualytics.co/) | @josecsotomorales | Production | Data Quality Platform | | [Roblox](https://www.roblox.com/) | @matschaffer-roblox | Evaluation | Data Infrastructure | +| [Rokt](https://www.rokt.com) | @jacobsalway | Production | Data Infrastructure | From 8cb6c8035eabd73c045f4078b27fa35767e2a52c Mon Sep 17 00:00:00 2001 From: disaster37 Date: Thu, 9 Nov 2023 21:43:12 +0100 Subject: [PATCH 23/87] fix: fix issue #1723 about spark-operator not working with volcano on OCP (#1724) * fix: fix issue #1723 about spark-operator not working with volcano on OCP Signed-off-by: disaster37 * Update volcano_scheduler.go --------- Signed-off-by: disaster37 --- charts/spark-operator-chart/templates/rbac.yaml | 2 ++ manifest/spark-operator-install/spark-operator-rbac.yaml | 2 +- pkg/batchscheduler/volcano/volcano_scheduler.go | 7 +++++-- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/charts/spark-operator-chart/templates/rbac.yaml b/charts/spark-operator-chart/templates/rbac.yaml index c78a73df94..6f9b57292a 100644 --- a/charts/spark-operator-chart/templates/rbac.yaml +++ b/charts/spark-operator-chart/templates/rbac.yaml @@ -85,8 +85,10 @@ rules: resources: - sparkapplications - sparkapplications/status + - sparkapplications/finalizers - scheduledsparkapplications - scheduledsparkapplications/status + - scheduledsparkapplications/finalizers verbs: - "*" {{- if .Values.batchScheduler.enable }} diff --git a/manifest/spark-operator-install/spark-operator-rbac.yaml b/manifest/spark-operator-install/spark-operator-rbac.yaml index ec613ad323..71a053b487 100644 --- a/manifest/spark-operator-install/spark-operator-rbac.yaml +++ b/manifest/spark-operator-install/spark-operator-rbac.yaml @@ -58,7 +58,7 @@ rules: resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] verbs: ["create", "get", "update", "delete"] - apiGroups: ["sparkoperator.k8s.io"] - resources: ["sparkapplications", "scheduledsparkapplications", "sparkapplications/status", "scheduledsparkapplications/status"] + resources: ["sparkapplications", "scheduledsparkapplications", "sparkapplications/status", "scheduledsparkapplications/status", "sparkapplications/finalizers", "scheduledsparkapplications/finalizers"] verbs: ["*"] - apiGroups: ["scheduling.volcano.sh"] resources: ["podgroups", "queues", "queues/status"] diff --git a/pkg/batchscheduler/volcano/volcano_scheduler.go b/pkg/batchscheduler/volcano/volcano_scheduler.go index 1e14953351..31179ebf2a 100644 --- a/pkg/batchscheduler/volcano/volcano_scheduler.go +++ b/pkg/batchscheduler/volcano/volcano_scheduler.go @@ -115,9 +115,12 @@ func (v *VolcanoBatchScheduler) getAppPodGroupName(app *v1beta2.SparkApplication } func (v *VolcanoBatchScheduler) syncPodGroup(app *v1beta2.SparkApplication, size int32, minResource corev1.ResourceList) error { - var err error + var ( + err error + pg *v1beta1.PodGroup + ) podGroupName := v.getAppPodGroupName(app) - if pg, err := v.volcanoClient.SchedulingV1beta1().PodGroups(app.Namespace).Get(context.TODO(), podGroupName, metav1.GetOptions{}); err != nil { + if pg, err = v.volcanoClient.SchedulingV1beta1().PodGroups(app.Namespace).Get(context.TODO(), podGroupName, metav1.GetOptions{}); err != nil { if !errors.IsNotFound(err) { return err } From 0a448230027c8d7dee4c0fecf32882236bbfe52c Mon Sep 17 00:00:00 2001 From: James Liu <37026441+zijianjoy@users.noreply.github.com> Date: Fri, 15 Mar 2024 10:20:01 -0700 Subject: [PATCH 24/87] Create OWNERS https://github.com/kubeflow/community/issues/684 --- OWNERS | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 OWNERS diff --git a/OWNERS b/OWNERS new file mode 100644 index 0000000000..dc9b5fe9f0 --- /dev/null +++ b/OWNERS @@ -0,0 +1,5 @@ +approvers: + - andreyvelich + - mwielgus + - yuchaoran2011 + - vara-bonthu From ec1c2e314ff9e01751e7e09ca1bdec09fa1a6ae0 Mon Sep 17 00:00:00 2001 From: karbyshevds Date: Tue, 2 Apr 2024 19:17:22 +0300 Subject: [PATCH 25/87] Allow to set webhook job resource limits (#1429,#1300) (#1946) Signed-off-by: Dmitriy Karbyshev --- charts/spark-operator-chart/README.md | 2 ++ .../templates/webhook-cleanup-job.yaml | 2 ++ .../templates/webhook-init-job.yaml | 2 ++ charts/spark-operator-chart/values.yaml | 17 +++++++++++++++++ 4 files changed, 23 insertions(+) diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 50c7cd8406..004d1248e7 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -129,9 +129,11 @@ All charts linted successfully | uiService.enable | bool | `true` | Enable UI service creation for Spark application | | webhook.cleanupAnnotations | object | `{"helm.sh/hook":"pre-delete, pre-upgrade","helm.sh/hook-delete-policy":"hook-succeeded"}` | The annotations applied to the cleanup job, required for helm lifecycle hooks | | webhook.cleanupPodLabels | object | `{}` | The podLabels applied to the pod of the cleanup job | +| webhook.cleanupResources | object | `{}` | Cleanup job Pod resource requests and limits | | webhook.enable | bool | `false` | Enable webhook server | | webhook.initAnnotations | object | `{"helm.sh/hook":"pre-install, pre-upgrade","helm.sh/hook-weight":"50"}` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | | webhook.initPodLabels | object | `{}` | The podLabels applied to the pod of the init job | +| webhook.initResources | object | `{}` | Init job Pod resource requests and limits | | webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | | webhook.port | int | `8080` | Webhook service port | | webhook.timeout | int | `30` | | diff --git a/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml b/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml index a6f2c1ea96..f115d955c5 100644 --- a/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml +++ b/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml @@ -49,6 +49,8 @@ spec: -H \"Content-Type: application/json\" \ --data \"{\\\"kind\\\":\\\"DeleteOptions\\\",\\\"apiVersion\\\":\\\"batch/v1\\\",\\\"propagationPolicy\\\":\\\"Foreground\\\"}\" \ https://kubernetes.default.svc/apis/batch/v1/namespaces/{{ .Release.Namespace }}/jobs/{{ include "spark-operator.fullname" . }}-webhook-init" + resources: + {{- toYaml .Values.webhook.cleanupResources | nindent 10 }} {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} diff --git a/charts/spark-operator-chart/templates/webhook-init-job.yaml b/charts/spark-operator-chart/templates/webhook-init-job.yaml index 009879d997..09f398c4c1 100644 --- a/charts/spark-operator-chart/templates/webhook-init-job.yaml +++ b/charts/spark-operator-chart/templates/webhook-init-job.yaml @@ -39,6 +39,8 @@ spec: "-r", "{{ include "spark-operator.fullname" . }}-webhook-certs", "-p" ] + resources: + {{- toYaml .Values.webhook.initResources | nindent 10 }} {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 8 }} diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index 3436a8f758..ba20921d31 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -105,6 +105,14 @@ webhook: "helm.sh/hook-weight": "50" # -- The podLabels applied to the pod of the init job initPodLabels: {} + # -- Resources applied to init job + initResources: {} + # limits: + # cpu: 100m + # memory: 300Mi + # requests: + # cpu: 100m + # memory: 300Mi # -- The annotations applied to the cleanup job, required for helm lifecycle hooks cleanupAnnotations: "helm.sh/hook": pre-delete, pre-upgrade @@ -112,6 +120,15 @@ webhook: # -- Webhook Timeout in seconds # -- The podLabels applied to the pod of the cleanup job cleanupPodLabels: {} + # -- Resources applied to cleanup job + cleanupResources: {} + # limits: + # cpu: 100m + # memory: 300Mi + # requests: + # cpu: 100m + # memory: 300Mi + timeout: 30 metrics: From 717a3eccb1763709bcb887ecd23d2a07ce2716f4 Mon Sep 17 00:00:00 2001 From: Yang Date: Sun, 7 Apr 2024 01:32:25 +0800 Subject: [PATCH 26/87] Add PVC permission to Operator role (#1889) --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/templates/rbac.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 437804b35b..d41ef4b0e6 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.1.28 +version: 1.1.29 appVersion: v1beta2-1.3.8-3.1.1 keywords: - spark diff --git a/charts/spark-operator-chart/templates/rbac.yaml b/charts/spark-operator-chart/templates/rbac.yaml index 6f9b57292a..f19e5f1cd1 100644 --- a/charts/spark-operator-chart/templates/rbac.yaml +++ b/charts/spark-operator-chart/templates/rbac.yaml @@ -17,6 +17,7 @@ rules: - "" resources: - pods + - persistentvolumeclaims verbs: - "*" - apiGroups: From 0dd08b220efaad5733bc98b576fd83fb4710bed7 Mon Sep 17 00:00:00 2001 From: Taeyeop Kim Date: Sun, 7 Apr 2024 02:33:25 +0900 Subject: [PATCH 27/87] remove unmatched quotes from user-guide.md (#1584) --- docs/user-guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/user-guide.md b/docs/user-guide.md index 1aeeb1248e..4d2e7102de 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -130,7 +130,7 @@ spec: sparkConf: spark.ui.port: "4045" spark.eventLog.enabled: "true" - spark.eventLog.dir": "hdfs://hdfs-namenode-1:8020/spark/spark-events" + spark.eventLog.dir: "hdfs://hdfs-namenode-1:8020/spark/spark-events" ``` ### Specifying Hadoop Configuration From 14a39e63a0244209cf41b5fb4e0934564dff9dc2 Mon Sep 17 00:00:00 2001 From: Ataf Fazledin Ahamed Date: Sat, 6 Apr 2024 23:34:25 +0600 Subject: [PATCH 28/87] BUGFIX: Added cancel method to fix context leak (#1917) Signed-off-by: fazledyn-or --- sparkctl/cmd/event.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sparkctl/cmd/event.go b/sparkctl/cmd/event.go index 5b645b7c5e..c0c2a5a81c 100644 --- a/sparkctl/cmd/event.go +++ b/sparkctl/cmd/event.go @@ -154,7 +154,8 @@ func streamEvents(events watch.Interface, streamSince int64) error { table = prepareNewTable() table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false}) ctx := context.TODO() - ctx, _ = context.WithTimeout(ctx, watchExpire) + ctx, cancel := context.WithTimeout(ctx, watchExpire) + defer cancel() _, err := clientWatch.UntilWithoutRetry(ctx, events, func(ev watch.Event) (bool, error) { if event, isEvent := ev.Object.(*v1.Event); isEvent { // Ensure to display events which are newer than last creation time of SparkApplication From 43f489dd9a1cc2669c42d1c90d7f278ca4e67f5e Mon Sep 17 00:00:00 2001 From: shahsiddharth08 <83676694+shahsiddharth08@users.noreply.github.com> Date: Sat, 6 Apr 2024 14:43:25 -0400 Subject: [PATCH 29/87] README for installing operator using kustomize with custom namespace and service name (#1778) * Installing operator using kustomize and custom namespace and service name * update quick start guide with suggested changes. --- docs/quick-start-guide.md | 70 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/docs/quick-start-guide.md b/docs/quick-start-guide.md index 01dd103576..9c2ec3edb9 100644 --- a/docs/quick-start-guide.md +++ b/docs/quick-start-guide.md @@ -47,7 +47,77 @@ Now you should see the operator running in the cluster by checking the status of ```bash $ helm status --namespace spark-operator my-release ``` +### Installation using kustomize +You can also install `spark-operator` using [kustomize](https://github.com/kubernetes-sigs/kustomize). Run + +``` +kubectl apply -k {manifest_directory} +``` +Kustomize default manifest directory is part of the repo [here](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/tree/master/manifest/spark-operator-with-webhook-install) + +The manifest directory contains primarily the `crds` and `spark-operator-with-webhook.yaml` which holds configurations of spark operator init job, a webhook service and finally a deployemnt. + +Spark operator with above manifest installs `spark-operator` in default namespace `spark-operator` with default webhook service `spark-webhook`. If you wish to install `spark-operator` in a namespace other than `spark-opertor` and webhook service name other than `spark-webhook`, `Job` manifest in `spark-operator-with-webhook.yaml` should look like below. You need to pass the desired namespace name and service name as arguements in `command` field in `containers`. + +``` +apiVersion: batch/v1 +kind: Job +metadata: + name: sparkoperator-init + namespace: myorg-spark-operator + labels: + app.kubernetes.io/name: sparkoperator + app.kubernetes.io/version: v2.4.0-v1beta1 +spec: + backoffLimit: 3 + template: + metadata: + labels: + app.kubernetes.io/name: sparkoperator + app.kubernetes.io/version: v2.4.0-v1beta1 + spec: + serviceAccountName: sparkoperator + restartPolicy: Never + containers: + - name: main + image: gcr.io/spark-operator/spark-operator:v2.4.0-v1beta1-latest + imagePullPolicy: IfNotPresent + command: ["/usr/bin/gencerts.sh", "-p", "--namespace", "myorg-spark-operator", "--service", "myorg-spark-webhook"] +``` +And Service will be + +``` +kind: Service +apiVersion: v1 +metadata: + name: myorg-spark-webhook +... +``` + +And `args` in `Deployement` will look like: + +``` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: sparkoperator +... + + args: + - -logtostderr + - -enable-webhook=true + - -v=2 + - webhook-svc-namespace=myorg-spark-operator + - webhook-svc-name=myorg-spark-webhook +``` + +This will install `spark-operator` in `myorg-spark-operator` namespace and the webhook service will be called `myorg-spark-webhook`. + +To unintall operator, run +``` +kustomize build '{manifest_directory}' | kubectl delete -f - +``` ## Running the Examples To run the Spark Pi example, run the following command: From c9bfa816630206c414e8a2b96a702f45212bd63c Mon Sep 17 00:00:00 2001 From: Andrii Chubatiuk Date: Sun, 7 Apr 2024 08:48:26 +0300 Subject: [PATCH 30/87] upgraded golang and dependencies (#1954) Signed-off-by: Andrew Chubatiuk --- go.mod | 156 +++-- go.sum | 1224 ++++++++++++++++++++++++++++++++++++---- sparkctl/cmd/create.go | 2 +- sparkctl/cmd/gcs.go | 4 +- sparkctl/cmd/s3.go | 41 +- 5 files changed, 1245 insertions(+), 182 deletions(-) diff --git a/go.mod b/go.mod index 80dd79f6e2..b07b1bb18c 100644 --- a/go.mod +++ b/go.mod @@ -1,75 +1,100 @@ module github.com/GoogleCloudPlatform/spark-on-k8s-operator -go 1.19 +go 1.22 require ( - cloud.google.com/go/storage v1.10.0 - github.com/aws/aws-sdk-go v1.38.49 - github.com/evanphx/json-patch v4.12.0+incompatible - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/google/go-cloud v0.1.1 - github.com/google/uuid v1.1.2 - github.com/olekukonko/tablewriter v0.0.4 + cloud.google.com/go/storage v1.40.0 + github.com/aws/aws-sdk-go-v2 v1.26.1 + github.com/aws/aws-sdk-go-v2/config v1.27.11 + github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 + github.com/evanphx/json-patch v5.9.0+incompatible + github.com/golang/glog v1.2.1 + github.com/google/uuid v1.6.0 + github.com/olekukonko/tablewriter v0.0.5 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.12.1 - github.com/prometheus/client_model v0.2.0 + github.com/prometheus/client_golang v1.19.0 + github.com/prometheus/client_model v0.6.1 github.com/robfig/cron v1.2.0 - github.com/spf13/cobra v1.4.0 - github.com/stretchr/testify v1.8.0 - golang.org/x/net v0.0.0-20220722155237-a158d28d115b - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 - k8s.io/api v0.25.3 - k8s.io/apiextensions-apiserver v0.25.3 - k8s.io/apimachinery v0.25.3 - k8s.io/client-go v0.25.3 + github.com/spf13/cobra v1.8.0 + github.com/stretchr/testify v1.9.0 + gocloud.dev v0.37.0 + golang.org/x/net v0.24.0 + golang.org/x/sync v0.7.0 + golang.org/x/time v0.5.0 + k8s.io/api v0.29.3 + k8s.io/apiextensions-apiserver v0.29.3 + k8s.io/apimachinery v0.29.3 + k8s.io/client-go v1.5.2 k8s.io/kubectl v0.25.3 - k8s.io/kubernetes v1.25.3 - k8s.io/utils v0.0.0-20221012122500-cfd413dd9e85 + k8s.io/kubernetes v1.29.3 + k8s.io/utils v0.0.0-20240310230437-4693a0247e57 volcano.sh/volcano v1.1.0 ) require ( - cloud.google.com/go v0.97.0 // indirect + cloud.google.com/go v0.112.2 // indirect + cloud.google.com/go/compute v1.25.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v1.1.7 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.27 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect + github.com/Azure/go-autorest/autorest v0.11.29 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/aws/aws-sdk-go v1.51.16 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 // indirect + github.com/aws/smithy-go v1.20.2 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/elazarl/goproxy v0.0.0-20200421181703-e76ad31c14f6 // indirect - github.com/emicklei/go-restful/v3 v3.8.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/fatih/camelcase v1.0.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-errors/errors v1.0.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.2.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.8 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/google/gnostic v0.7.0 // indirect + github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/s2a-go v0.1.7 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/gax-go/v2 v2.1.1 // indirect + github.com/google/wire v0.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect - github.com/imdario/mergo v0.3.7 // indirect - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/mailru/easyjson v0.7.6 // indirect - github.com/mattn/go-runewidth v0.0.7 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -77,34 +102,41 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect + github.com/prometheus/common v0.52.2 // indirect + github.com/prometheus/procfs v0.13.0 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/xlab/treeprint v1.1.0 // indirect - go.opencensus.io v0.23.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect + go.opentelemetry.io/otel v1.25.0 // indirect + go.opentelemetry.io/otel/metric v1.25.0 // indirect + go.opentelemetry.io/otel/trace v1.25.0 // indirect go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect - golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/api v0.60.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect - google.golang.org/grpc v1.47.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect + golang.org/x/crypto v0.22.0 // indirect + golang.org/x/oauth2 v0.19.0 // indirect + golang.org/x/sys v0.19.0 // indirect + golang.org/x/term v0.19.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + google.golang.org/api v0.172.0 // indirect + google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/grpc v1.63.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/cli-runtime v0.25.3 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.12.1 // indirect sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) replace ( diff --git a/go.sum b/go.sum index 8765d422d8..f4d211cf11 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,10 @@ bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= -cloud.google.com/go v0.25.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -18,6 +18,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -27,29 +28,584 @@ cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWc cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw= +cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= +cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= +cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/stackdriver v0.0.0-20180421005815-665cf5131b71/go.mod h1:QeFzMJDAw8TXt5+aRaSuE8l5BwaMIOIlaVkBOPRuMuw= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= @@ -58,13 +614,16 @@ github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -86,11 +645,10 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20180321230639-1e456b1c68cb/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo= github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b/go.mod h1:FNj4KYEAAHfYu68kRYolGoxkaJn+6mdEsaM12VTwuI0= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= -github.com/Jeffail/gabs v0.0.0-20180420203615-7a0fed31069a/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.8.10-0.20200715222032-5eafd1556990/go.mod h1:ay/0dTb7NsG8QMDfsRfLHgZo/6xAJShLe1+ePPflihk= @@ -99,21 +657,26 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -124,11 +687,48 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= github.com/aws/aws-sdk-go v1.6.10/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= -github.com/aws/aws-sdk-go v1.13.20/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.38.49 h1:E31vxjCe6a5I+mJLmUGaZobiWmg9KdWaud9IfceYeYQ= github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-xray-sdk-go v1.0.0-rc.5/go.mod h1:XtMKdBQfpVut+tJEwI7+dJFRxxRdxHDyVNp2tHXRq04= +github.com/aws/aws-sdk-go v1.51.16 h1:vnWKK8KjbftEkuPX8bRj3WHsLy1uhotn0eXptpvrxJI= +github.com/aws/aws-sdk-go v1.51.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= +github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= +github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA= +github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE= +github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs= +github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 h1:7Zwtt/lP3KNRkeZre7soMELMGNoBrutx8nobg1jKWmo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15/go.mod h1:436h2adoHb57yd+8W+gYPrrA9U/R/SuAuOO42Ushzhw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -142,16 +742,22 @@ github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-ecosystem/opencensus-go-exporter-aws v0.0.0-20180411051634-41633bc1ff6b/go.mod h1:icwlHTP1AjScKRxD/s/Qinb7mpbcoUPpqaiBvrSS/QI= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= @@ -168,10 +774,14 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= @@ -210,6 +820,7 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -220,8 +831,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/daviddengcn/go-colortext v1.0.0/go.mod h1:zDqEI5NVUop5QPpVJUxE9UO10hRnmkD5G4Pmri9+m4c= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dnaeon/go-vcr v0.0.0-20180504081357-f8a7e8b9c630/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -239,8 +848,9 @@ github.com/elazarl/goproxy v0.0.0-20200421181703-e76ad31c14f6/go.mod h1:Ro8st/El github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= +github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -248,20 +858,29 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -276,14 +895,20 @@ github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.9.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-ini/ini v1.37.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -291,8 +916,11 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -307,15 +935,17 @@ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwds github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -338,15 +968,18 @@ github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/ github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= -github.com/go-sql-driver/mysql v0.0.0-20180308100310-1a676ac6e4dc/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= @@ -356,11 +989,14 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -377,7 +1013,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -394,9 +1029,12 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= github.com/golangplus/bytes v1.0.0/go.mod h1:AdRaCFwmc/00ZzELMWb01soso6W1R/++O1XL80yAn+A= github.com/golangplus/fmt v1.0.0/go.mod h1:zpM0OfbMCjPtd2qkTD/jX2MgiFCqklhSUFyDW44gVQE= @@ -407,10 +1045,12 @@ github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/cadvisor v0.37.3/go.mod h1:BalYQhwl2UV8lpB3oFssiaW8Uj6sqfFDxw5nEs9sBgU= github.com/google/cel-go v0.12.5/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/go-cloud v0.1.1 h1:fxL7UyMhjMR1S9UucUPme5iFmLzT9HZ/1Un7zZQoo1A= -github.com/google/go-cloud v0.1.1/go.mod h1:zmCjQNIQ3IDpNX1J/JbFOPyYAX/1BSZv31coUzx5HHw= +github.com/google/gnostic v0.7.0 h1:d7EpuFp8vVdML+y0JJJYiKeOLjKTdH/GvVkLOBWqJpw= +github.com/google/gnostic v0.7.0/go.mod h1:IAcUyMl6vtC95f60EZ8oXyqTsOersP6HbwjeG7EyDPM= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= +github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -423,17 +1063,26 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-replayers/grpcreplay v1.1.0 h1:S5+I3zYyZ+GQz68OfbURDdt/+cSMqCK1wrvNx7WBzTE= +github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk= +github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk= +github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -443,6 +1092,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -450,24 +1100,45 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go v1.0.0/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI= +github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gopherjs/gopherjs v0.0.0-20180424202546-8dffc02ea1cb/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -483,6 +1154,8 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -507,13 +1180,15 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -535,17 +1210,21 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v0.0.0-20170503224851-77f18212c9c7/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -553,6 +1232,10 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -568,32 +1251,40 @@ github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180723221831-d5012789d665/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -633,14 +1324,14 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -678,47 +1369,60 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck= +github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= +github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= @@ -726,11 +1430,17 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= @@ -744,24 +1454,24 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180301161246-7678a5452ebe/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/gunit v0.0.0-20180314194857-6f0d6275bdcd/go.mod h1:XUKj4gbqj2QvJk/OdLWzyZ3FYli0f+MdpngyryX0gcw= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -777,9 +1487,10 @@ github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -787,18 +1498,15 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= -github.com/tidwall/gjson v1.1.2 h1:2cScOmQ0oRDK1idscWbg9Va8xvQ88Lqb73rkgg8scEo= -github.com/tidwall/gjson v1.1.2/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1HtCkPA= -github.com/tidwall/match v0.0.0-20171002075945-1731857f09b1 h1:pWIN9LOlFRCJFqWIOEbHLvY0WWJddsjH2FQ6N0HKZdU= -github.com/tidwall/match v0.0.0-20171002075945-1731857f09b1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/sjson v1.0.0 h1:hOrzQPtGKlKAudQVmU43GkxEgG8TOgKyiKUyb7sE0rs= -github.com/tidwall/sjson v1.0.0/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -813,6 +1521,9 @@ github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYp github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200520041808-52d707b772fe/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= @@ -824,6 +1535,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= @@ -838,27 +1551,40 @@ go.etcd.io/etcd/server/v3 v3.5.4/go.mod h1:S5/YTU15KxymM5l3T6b09sNOHPXqGYIZStpuu go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.opencensus.io v0.12.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0 h1:zvpPXY7RfYAGSdYQLjp6zxdJNSYD/+FFoCTQN9IPxBs= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0/go.mod h1:BMn8NB1vsxTljvuorms2hyOs8IBuuBEq0pl7ltOfy30= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= +go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= +go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM= +go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -871,6 +1597,8 @@ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9i go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +gocloud.dev v0.37.0 h1:XF1rN6R0qZI/9DYjN16Uy0durAmSlf58DHOcb28GPro= +gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -888,11 +1616,18 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -901,15 +1636,27 @@ golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -933,9 +1680,14 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180702212446-ed29d75add3d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -982,6 +1734,7 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= @@ -989,14 +1742,32 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1013,8 +1784,21 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= +golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1026,9 +1810,15 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180329131831-378d26f46672/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1096,12 +1886,15 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1110,20 +1903,53 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1132,17 +1958,29 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180314180217-d853e8088c62/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1166,6 +2004,7 @@ golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1199,31 +2038,47 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.0.0-20180606215403-8e9de5a6de6d/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1255,17 +2110,44 @@ google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6 google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.60.0 h1:eq/zs5WPH4J9undYM9IP1O7dSr7Yh8Y0GtSCpzGzIUk= google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= +google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180627194029-ff3583edef7d/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1303,10 +2185,13 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -1325,9 +2210,90 @@ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 h1:hrbNEivu7Zn1pxvHk6MBrq9iE22woVILTHqexqBxe6I= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= +google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= +google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda h1:b6F6WIV4xHHD0FA4oIyzU6mHWg2WI2X1RBehwa5QN38= +google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda/go.mod h1:AHcE/gZH76Bk/ROZhQphlRoWo5xKDEtz3eVEO1LfA8c= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1355,9 +2321,22 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1372,21 +2351,25 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.37.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -1419,6 +2402,7 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ= k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI= k8s.io/apiextensions-apiserver v0.25.3 h1:bfI4KS31w2f9WM1KLGwnwuVlW3RSRPuIsfNF/3HzR0k= @@ -1447,14 +2431,15 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-aggregator v0.25.3/go.mod h1:w87nqmzJMf7S73FRYcnexqfYW0AFiLJiCkvVCwM3feE= k8s.io/kube-controller-manager v0.25.3/go.mod h1:InfGO/O9vIPxpbgd0gUK22xVDsaGnJAUsATzwKk6BNg= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20220401212409-b28bf2818661/go.mod h1:daOouuuwd9JXpv1L7Y34iV3yf6nxzipkKMWWlqlvK9M= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= +k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 h1:w6nThEmGo9zcL+xH1Tu6pjxJ3K1jXFW+V0u4peqN8ks= +k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/kube-proxy v0.25.3/go.mod h1:A/aOKVIY+tivIHk/i6hEF6IyLSDHKGooLnedg4dBJa8= k8s.io/kube-scheduler v0.25.3/go.mod h1:0EKmWTnwNaHnmWwan4bABGQm4XyYpc146XyFWX4ey5E= k8s.io/kubectl v0.25.3 h1:HnWJziEtmsm4JaJiKT33kG0kadx68MXxUE8UEbXnN4U= @@ -1472,20 +2457,55 @@ k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/ k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20221012122500-cfd413dd9e85 h1:cTdVh7LYu82xeClmfzGtgyspNh6UxpwLWGi8R4sspNo= -k8s.io/utils v0.0.0-20221012122500-cfd413dd9e85/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33/go.mod h1:soWkSNf2tZC7aMibXEqVhCd73GOY5fJikn8qbdzemB0= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= @@ -1495,10 +2515,12 @@ sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2 sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= volcano.sh/volcano v1.1.0 h1:jxLaQEMpvToYrSEOTwBU7R5Vg+tsASdbAdmbqoZY2DY= volcano.sh/volcano v1.1.0/go.mod h1:zddAnaLKfnKMYkyMbdhlb8J3HwGeqvryeLl1tux/G4M= diff --git a/sparkctl/cmd/create.go b/sparkctl/cmd/create.go index 21b56aa5d2..9fb8313a9e 100644 --- a/sparkctl/cmd/create.go +++ b/sparkctl/cmd/create.go @@ -26,8 +26,8 @@ import ( "reflect" "unicode/utf8" - "github.com/google/go-cloud/blob" "github.com/spf13/cobra" + "gocloud.dev/blob" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" diff --git a/sparkctl/cmd/gcs.go b/sparkctl/cmd/gcs.go index cc5475ce71..e91f87066e 100644 --- a/sparkctl/cmd/gcs.go +++ b/sparkctl/cmd/gcs.go @@ -20,8 +20,8 @@ import ( "fmt" "cloud.google.com/go/storage" - "github.com/google/go-cloud/blob/gcsblob" - "github.com/google/go-cloud/gcp" + "gocloud.dev/blob/gcsblob" + "gocloud.dev/gcp" "golang.org/x/net/context" ) diff --git a/sparkctl/cmd/s3.go b/sparkctl/cmd/s3.go index 8b9544d51f..8bfe586d47 100644 --- a/sparkctl/cmd/s3.go +++ b/sparkctl/cmd/s3.go @@ -20,14 +20,14 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/google/go-cloud/blob/s3blob" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/s3" + "gocloud.dev/blob/s3blob" ) type blobS3 struct { - s *session.Session + client *s3.Client } func (blob blobS3) setPublicACL( @@ -35,9 +35,7 @@ func (blob blobS3) setPublicACL( bucket string, filePath string) error { acl := "public-read" - svc := s3.New(blob.s) - - if _, err := svc.PutObjectAcl(&s3.PutObjectAclInput{Bucket: &bucket, Key: &filePath, ACL: &acl}); err != nil { + if _, err := blob.client.PutObjectAcl(&s3.PutObjectAclInput{Bucket: &bucket, Key: &filePath, ACL: &acl}); err != nil { return fmt.Errorf("failed to set ACL on S3 object %s: %v", filePath, err) } @@ -49,18 +47,29 @@ func newS3Blob( bucket string, endpoint string, region string, - forcePathStyle bool) (*uploadHandler, error) { + usePathStyle bool) (*uploadHandler, error) { // AWS SDK does require specifying regions, thus set it to default S3 region if region == "" { region = "us-east1" } - c := &aws.Config{ - Region: aws.String(region), - Endpoint: aws.String(endpoint), - S3ForcePathStyle: aws.Bool(forcePathStyle), - } - sess := session.Must(session.NewSession(c)) - b, err := s3blob.OpenBucket(ctx, sess, bucket) + endpointResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { + if service == s3.ServiceID && endpoint != "" { + return aws.Endpoint{ + PartitionID: "aws", + URL: endpoint, + SigningRegion: region, + }, nil + } + return aws.Endpoint{}, &aws.EndpointNotFoundError{} + }) + c := config.LoadDefaultConfig( + ctx, config.WithRegion(region), + config.WithEndpointResolverWithOptions(endpointResolver), + ) + client := s3.NewFromConfig(conf, func(o *s3.Options) { + o.UsePathStyle = usePathStyle + }) + b, err := s3blob.OpenBucketV2(ctx, client, bucket) return &uploadHandler{ blob: blobS3{s: sess}, ctx: ctx, From 8a31a4638cf9a047661568250147f81023c4d1f4 Mon Sep 17 00:00:00 2001 From: Cian Gallagher Date: Sun, 7 Apr 2024 17:58:26 +0100 Subject: [PATCH 31/87] feat: add support for custom service labels (#1952) * feat: add support for custom service labels Signed-off-by: Cian Gallagher * chore: correctly format files Signed-off-by: Cian Gallagher * chore: bump chart version to 1.1.30 Signed-off-by: Cian Gallagher --------- Signed-off-by: Cian Gallagher --- charts/spark-operator-chart/Chart.yaml | 2 +- ...tor.k8s.io_scheduledsparkapplications.yaml | 4 ++ ...parkoperator.k8s.io_sparkapplications.yaml | 4 ++ docs/api-docs.md | 12 ++++ examples/spark-pi.yaml | 3 + go.sum | 1 + ...tor.k8s.io_scheduledsparkapplications.yaml | 4 ++ ...parkoperator.k8s.io_sparkapplications.yaml | 4 ++ .../sparkoperator.k8s.io/v1beta2/types.go | 3 + .../sparkapplication/sparkapp_util.go | 10 +++ pkg/controller/sparkapplication/sparkui.go | 8 +++ .../sparkapplication/sparkui_test.go | 64 +++++++++++++++++++ test/e2e/basic_test.go | 2 + 13 files changed, 120 insertions(+), 1 deletion(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index d41ef4b0e6..45fc7de437 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.1.29 +version: 1.1.30 appVersion: v1beta2-1.3.8-3.1.1 keywords: - spark diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 20e1e00126..9f04e1dba3 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -3750,6 +3750,10 @@ spec: additionalProperties: type: string type: object + serviceLabels: + additionalProperties: + type: string + type: object ingressAnnotations: additionalProperties: type: string diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml index 63e70b276e..fb71683a8b 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -3736,6 +3736,10 @@ spec: additionalProperties: type: string type: object + serviceLabels: + additionalProperties: + type: string + type: object ingressAnnotations: additionalProperties: type: string diff --git a/docs/api-docs.md b/docs/api-docs.md index 0403b6e587..fbedbb8fbd 100644 --- a/docs/api-docs.md +++ b/docs/api-docs.md @@ -3120,6 +3120,18 @@ map[string]string +serviceLabels
+ +map[string]string + + + +(Optional) +

ServiceLables is a map of key,value pairs of labels that might be added to the service object.

+ + + + ingressAnnotations
map[string]string diff --git a/examples/spark-pi.yaml b/examples/spark-pi.yaml index 1f7fafae43..986fe505ff 100644 --- a/examples/spark-pi.yaml +++ b/examples/spark-pi.yaml @@ -26,6 +26,9 @@ spec: mainClass: org.apache.spark.examples.SparkPi mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.1.1.jar" sparkVersion: "3.1.1" + sparkUIOptions: + serviceLabels: + test-label/v1: 'true' restartPolicy: type: Never volumes: diff --git a/go.sum b/go.sum index f4d211cf11..fc7758ecd8 100644 --- a/go.sum +++ b/go.sum @@ -2427,6 +2427,7 @@ k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= diff --git a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 20e1e00126..9f04e1dba3 100644 --- a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -3750,6 +3750,10 @@ spec: additionalProperties: type: string type: object + serviceLabels: + additionalProperties: + type: string + type: object ingressAnnotations: additionalProperties: type: string diff --git a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml index 63e70b276e..fb71683a8b 100644 --- a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -3736,6 +3736,10 @@ spec: additionalProperties: type: string type: object + serviceLabels: + additionalProperties: + type: string + type: object ingressAnnotations: additionalProperties: type: string diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go index 2c89fe9482..616c2fc681 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go @@ -317,6 +317,9 @@ type SparkUIConfiguration struct { // ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object. // +optional ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + // ServiceLables is a map of key,value pairs of labels that might be added to the service object. + // +optional + ServiceLabels map[string]string `json:"serviceLabels,omitempty"` // IngressAnnotations is a map of key,value pairs of annotations that might be added to the ingress object. i.e. specify nginx as ingress.class // +optional IngressAnnotations map[string]string `json:"ingressAnnotations,omitempty"` diff --git a/pkg/controller/sparkapplication/sparkapp_util.go b/pkg/controller/sparkapplication/sparkapp_util.go index ef41ade98c..30cce2c1c7 100644 --- a/pkg/controller/sparkapplication/sparkapp_util.go +++ b/pkg/controller/sparkapplication/sparkapp_util.go @@ -90,6 +90,16 @@ func getServiceAnnotations(app *v1beta2.SparkApplication) map[string]string { return serviceAnnotations } +func getServiceLabels(app *v1beta2.SparkApplication) map[string]string { + serviceLabels := map[string]string{} + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceLabels != nil { + for key, value := range app.Spec.SparkUIOptions.ServiceLabels { + serviceLabels[key] = value + } + } + return serviceLabels +} + func getIngressResourceAnnotations(app *v1beta2.SparkApplication) map[string]string { ingressAnnotations := map[string]string{} if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.IngressAnnotations != nil { diff --git a/pkg/controller/sparkapplication/sparkui.go b/pkg/controller/sparkapplication/sparkui.go index c3c856aafa..533c3e953d 100644 --- a/pkg/controller/sparkapplication/sparkui.go +++ b/pkg/controller/sparkapplication/sparkui.go @@ -71,6 +71,7 @@ type SparkService struct { targetPort intstr.IntOrString serviceIP string serviceAnnotations map[string]string + serviceLabels map[string]string } // SparkIngress encapsulates information about the driver UI ingress. @@ -285,6 +286,12 @@ func createSparkUIService( service.ObjectMeta.Annotations = serviceAnnotations } + serviceLabels := getServiceLabels(app) + if len(serviceLabels) != 0 { + glog.Infof("Creating a service labels %s for the Spark UI: %v", service.Name, &serviceLabels) + service.ObjectMeta.Labels = serviceLabels + } + glog.Infof("Creating a service %s for the Spark UI for application %s", service.Name, app.Name) service, err = kubeClient.CoreV1().Services(app.Namespace).Create(context.TODO(), service, metav1.CreateOptions{}) if err != nil { @@ -299,6 +306,7 @@ func createSparkUIService( targetPort: service.Spec.Ports[0].TargetPort, serviceIP: service.Spec.ClusterIP, serviceAnnotations: serviceAnnotations, + serviceLabels: serviceLabels, }, nil } diff --git a/pkg/controller/sparkapplication/sparkui_test.go b/pkg/controller/sparkapplication/sparkui_test.go index acec9b7afc..80d8e1075e 100644 --- a/pkg/controller/sparkapplication/sparkui_test.go +++ b/pkg/controller/sparkapplication/sparkui_test.go @@ -87,6 +87,10 @@ func TestCreateSparkUIService(t *testing.T) { if !reflect.DeepEqual(serviceAnnotations, test.expectedService.serviceAnnotations) { t.Errorf("%s: unexpected annotations wanted %s got %s", test.name, test.expectedService.serviceAnnotations, serviceAnnotations) } + serviceLabels := service.ObjectMeta.Labels + if !reflect.DeepEqual(serviceLabels, test.expectedService.serviceLabels) { + t.Errorf("%s: unexpected labels wanted %s got %s", test.name, test.expectedService.serviceLabels, serviceLabels) + } } defaultPort := defaultSparkWebUIPort defaultPortName := defaultSparkWebUIPortName @@ -205,6 +209,25 @@ func TestCreateSparkUIService(t *testing.T) { ExecutionAttempts: 1, }, } + app8 := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo8", + Namespace: "default", + UID: "foo-123", + }, + Spec: v1beta2.SparkApplicationSpec{ + SparkUIOptions: &v1beta2.SparkUIConfiguration{ + ServiceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo8", + "key": "value", + }, + }, + }, + Status: v1beta2.SparkApplicationStatus{ + SparkApplicationID: "foo-8", + ExecutionAttempts: 1, + }, + } testcases := []testcase{ { name: "service with custom serviceport and serviceport and target port are same", @@ -214,6 +237,9 @@ func TestCreateSparkUIService(t *testing.T) { serviceType: apiv1.ServiceTypeClusterIP, servicePortName: defaultPortName, servicePort: 4041, + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo1", + }, targetPort: intstr.IntOrString{ Type: intstr.Int, IntVal: int32(4041), @@ -233,6 +259,9 @@ func TestCreateSparkUIService(t *testing.T) { serviceType: apiv1.ServiceTypeClusterIP, servicePortName: defaultPortName, servicePort: int32(defaultPort), + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo2", + }, }, expectedSelector: map[string]string{ config.SparkAppNameLabel: "foo2", @@ -248,6 +277,9 @@ func TestCreateSparkUIService(t *testing.T) { serviceType: apiv1.ServiceTypeClusterIP, servicePortName: defaultPortName, servicePort: 80, + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo4", + }, targetPort: intstr.IntOrString{ Type: intstr.Int, IntVal: int32(4041), @@ -267,6 +299,9 @@ func TestCreateSparkUIService(t *testing.T) { serviceType: apiv1.ServiceTypeNodePort, servicePortName: defaultPortName, servicePort: int32(defaultPort), + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo5", + }, }, expectedSelector: map[string]string{ config.SparkAppNameLabel: "foo5", @@ -282,6 +317,9 @@ func TestCreateSparkUIService(t *testing.T) { serviceType: apiv1.ServiceTypeClusterIP, servicePortName: "http-spark-test", servicePort: int32(80), + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo6", + }, }, expectedSelector: map[string]string{ config.SparkAppNameLabel: "foo6", @@ -300,6 +338,9 @@ func TestCreateSparkUIService(t *testing.T) { serviceAnnotations: map[string]string{ "key": "value", }, + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo7", + }, targetPort: intstr.IntOrString{ Type: intstr.Int, IntVal: int32(4041), @@ -311,6 +352,29 @@ func TestCreateSparkUIService(t *testing.T) { }, expectError: false, }, + { + name: "service with custom labels", + app: app8, + expectedService: SparkService{ + serviceName: fmt.Sprintf("%s-ui-svc", app8.GetName()), + serviceType: apiv1.ServiceTypeClusterIP, + servicePortName: defaultPortName, + servicePort: defaultPort, + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo8", + "key": "value", + }, + targetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: int32(4041), + }, + }, + expectedSelector: map[string]string{ + config.SparkAppNameLabel: "foo8", + config.SparkRoleLabel: config.SparkDriverRole, + }, + expectError: false, + }, { name: "service with bad port configurations", app: app3, diff --git a/test/e2e/basic_test.go b/test/e2e/basic_test.go index 15c72e8e72..db6c703b1a 100644 --- a/test/e2e/basic_test.go +++ b/test/e2e/basic_test.go @@ -18,6 +18,7 @@ package e2e import ( "context" + "log" "strings" "testing" @@ -64,6 +65,7 @@ func TestSubmitSparkPiYaml(t *testing.T) { app, _ := appFramework.GetSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) podName := app.Status.DriverInfo.PodName + log.Printf("LABELS: %v", app.ObjectMeta.GetLabels()) rawLogs, err := framework.KubeClient.CoreV1().Pods(appFramework.SparkTestNamespace).GetLogs(podName, &v1.PodLogOptions{}).Do(context.TODO()).Raw() assert.Equal(t, nil, err) assert.NotEqual(t, -1, strings.Index(string(rawLogs), "Pi is roughly 3")) From 4d1f5c5bc4c01c408872ec256259627238e262cd Mon Sep 17 00:00:00 2001 From: Andrii Chubatiuk Date: Mon, 8 Apr 2024 04:16:27 +0300 Subject: [PATCH 32/87] Cleanup after golang upgrade (#1956) * cleanup after upgrade Signed-off-by: Andrew Chubatiuk * pr comments Signed-off-by: Andrew Chubatiuk * feat: add support for custom service labels (#1952) * feat: add support for custom service labels Signed-off-by: Cian Gallagher * chore: correctly format files Signed-off-by: Cian Gallagher * chore: bump chart version to 1.1.30 Signed-off-by: Cian Gallagher --------- Signed-off-by: Cian Gallagher Signed-off-by: Andrew Chubatiuk --------- Signed-off-by: Andrew Chubatiuk Signed-off-by: Cian Gallagher Co-authored-by: Cian Gallagher --- .github/workflows/main.yaml | 55 +++---- .github/workflows/release.yaml | 139 ++++++++++++------ Dockerfile | 7 +- Dockerfile.rh | 57 ------- Makefile | 6 +- charts/spark-operator-chart/Chart.yaml | 4 +- charts/spark-operator-chart/README.md | 14 +- charts/spark-operator-chart/values.yaml | 2 +- go.mod | 2 +- .../api-docs/api-docs-template/placeholder.go | 2 +- .../v1beta1/zz_generated.deepcopy.go | 1 + .../v1beta2/defaults_test.go | 2 +- .../v1beta2/zz_generated.deepcopy.go | 1 + .../clientset/versioned/fake/register.go | 14 +- .../clientset/versioned/scheme/register.go | 14 +- pkg/controller/sparkapplication/controller.go | 70 ++++----- sparkctl/cmd/create.go | 5 +- sparkctl/cmd/gcs.go | 2 +- sparkctl/cmd/s3.go | 14 +- 19 files changed, 202 insertions(+), 209 deletions(-) delete mode 100644 Dockerfile.rh diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 9f2afd4f30..9ad6e96a76 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -15,21 +15,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout source code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: "0" - - name: The API should not change once published - run: | - if ! git diff --quiet origin/master -- pkg/apis/sparkoperator.k8s.io/v1beta1; then - echo "sparkoperator.k8s.io/v1beta1 api has changed" - false - fi - if ! git diff --quiet origin/master -- pkg/apis/sparkoperator.k8s.io/v1beta2; then - echo "sparkoperator.k8s.io/v1beta2 api has changed" - false - fi - - name: The API documentation hasn't changed run: | make build-api-docs @@ -43,12 +32,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout source code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: "0" - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: go-version-file: "go.mod" @@ -60,12 +49,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout source code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: "0" - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: go-version-file: "go.mod" @@ -101,14 +90,14 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout source code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: "0" - name: Install Helm - uses: azure/setup-helm@v1 + uses: azure/setup-helm@v4 with: - version: v3.7.1 + version: v3.14.3 - name: Produce the helm documentation run: | @@ -119,7 +108,7 @@ jobs: fi - name: Set up chart-testing - uses: helm/chart-testing-action@v2.0.1 + uses: helm/chart-testing-action@v2.6.1 - name: Print chart-testing version information run: ct version @@ -139,44 +128,44 @@ jobs: run: make detect-crds-drift - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 + uses: manusa/actions-setup-minikube@v2.10.0 with: - minikube version: "v1.24.0" - kubernetes version: "v1.20.8" + minikube version: v1.32.0 + kubernetes version: v1.28.8 start args: --memory 6g --cpus=2 --addons ingress github token: ${{ inputs.github-token }} - name: Run chart-testing (install) run: | - docker build -t ghcr.io/googlecloudplatform/spark-operator:local . - minikube image load ghcr.io/googlecloudplatform/spark-operator:local + docker build -t ghcr.io/kubeflow/spark-operator:local . + minikube image load ghcr.io/kubeflow/spark-operator:local ct install integration-test: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - name: Checkout source code - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: "0" - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: go-version-file: "go.mod" - name: setup minikube - uses: manusa/actions-setup-minikube@v2.4.2 + uses: manusa/actions-setup-minikube@v2.10.0 with: - minikube version: "v1.24.0" - kubernetes version: "v1.20.8" + minikube version: v1.32.0 + kubernetes version: v1.28.8 start args: --memory 6g --cpus=2 --addons ingress github token: ${{ inputs.github-token }} - name: Build local spark-operator docker image for minikube testing run: | - docker build -t gcr.io/spark-operator/spark-operator:local . - minikube image load gcr.io/spark-operator/spark-operator:local + docker build -t gcr.io/kubeflow/spark-operator:local . + minikube image load gcr.io/kubeflow/spark-operator:local # The integration tests are currently broken see: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/issues/1416 # - name: Run chart-testing (integration test) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 3fd685dc81..2e46f1226f 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -4,73 +4,122 @@ on: push: branches: - master +env: + REGISTRY_IMAGE: ghcr.io/kubeflow/spark-operator jobs: + build-skip-check: + runs-on: ubuntu-latest + outputs: + skip: ${{ steps.skip-check.outputs.skip }} + version: ${{ steps.skip-check.outputs.VERSION_TAG }} + steps: + - name: Check if build should be skipped + id: skip-check + run: | + VERSION_TAG=$(cat charts/spark-operator-chart/Chart.yaml | grep "appVersion: .*" | cut -c13-) + if git rev-parse -q --verify "refs/tags/$VERSION_TAG"; then + echo "Spark-Operator Docker Image Tag $VERSION_TAG already exists!" + echo "skip=true" >> "$GITHUB_OUTPUT" + else + git tag $VERSION_TAG + git push origin $VERSION_TAG + echo "Spark-Operator Docker Image new tag: $VERSION_TAG released" + echo "skip=false" >> "$GITHUB_OUTPUT" + fi + echo "VERSION_TAG=${VERSION_TAG}" >> "$GITHUB_OUTPUT" release: runs-on: ubuntu-latest + needs: + - build-skip-check + if: needs.build-skip-check.outputs.skip == 'false' + strategy: + fail-fast: false + matrix: + platform: + - linux/amd64 + - linux/arm64 steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: - fetch-depth: "0" - + fetch-depth: 1 - name: Configure Git run: | git config user.name "$GITHUB_ACTOR" git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + - name: Set up QEMU + timeout-minutes: 1 + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 - name: Install Helm - uses: azure/setup-helm@v1 + uses: azure/setup-helm@v4 with: - version: v3.7.1 - - # TODO: Maintainer of repository to follow: - # https://github.com/docker/login-action#google-container-registry-gcr to add credentials so - # we can push from github actions - # - name: log in to google container registry - # uses: docker/login-action@v1 - # with: - # registry: gcr.io - # username: ${{ secrets.DOCKER_USERNAME }} - # password: ${{ secrets.DOCKER_PASSWORD }} - + version: v3.14.3 - name: Login to Packages Container registry - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Build and Push Spark-Operator Docker Image to github container registry + uses: docker/build-push-action@v5 + with: + context: . + platforms: ${{ matrix.platform }} + cache-to: type=gha,mode=max + cache-from: type=gha + push: true + outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true + - name: Export digest run: | - DOCKER_TAG=$(cat charts/spark-operator-chart/Chart.yaml | grep "appVersion: .*" | cut -c13-) - docker build -t gcr.io/spark-operator/spark-operator:${DOCKER_TAG} . - echo "Ideally, we'd release the docker container at this point, but the maintainer of this repo needs to approve..." - docker tag gcr.io/spark-operator/spark-operator:${DOCKER_TAG} ghcr.io/googlecloudplatform/spark-operator:${DOCKER_TAG} - if ! docker pull ghcr.io/googlecloudplatform/spark-operator:${DOCKER_TAG}; then - docker push ghcr.io/googlecloudplatform/spark-operator:${DOCKER_TAG} - else - echo "Spark-Operator Docker Image alredy exists" - fi - + mkdir -p /tmp/digests + digest="${{ steps.build.outputs.digest }}" + touch "/tmp/digests/${digest#sha256:}" + - name: Upload digest + uses: actions/upload-artifact@v4 + with: + name: digests + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + publish: + runs-on: ubuntu-latest + needs: + - release + - build-skip-check + if: needs.build-skip-check.outputs.skip == 'false' + steps: + - name: Download digests + uses: actions/download-artifact@v4 + with: + name: digests + path: /tmp/digests + - name: Setup Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY_IMAGE }} + tags: preview + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ vars.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASS }} + - name: Create manifest list and push + working-directory: /tmp/digests + run: | + docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ + $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) + - name: Inspect image + run: | + docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} - name: Release Spark-Operator Helm Chart - uses: helm/chart-releaser-action@v1.1.0 + uses: helm/chart-releaser-action@v1.6.0 env: CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" CR_RELEASE_NAME_TEMPLATE: "spark-operator-chart-{{ .Version }}" - - - name: Release Spark-Operator Docker Image - run: | - DOCKER_TAG=$(cat charts/spark-operator-chart/Chart.yaml | grep "appVersion: .*" | cut -c13-) - if git rev-parse -q --verify "refs/tags/$DOCKER_TAG"; then - echo "Spark-Operator Docker Image Tag $DOCKER_TAG already exists!" - else - git tag $DOCKER_TAG - git push origin $DOCKER_TAG - echo "Spark-Operator Docker Image new tag: $DOCKER_TAG released" - fi - - - name: Setup tmate session - if: failure() - uses: mxschmitt/action-tmate@v3 - timeout-minutes: 15 diff --git a/Dockerfile b/Dockerfile index 1c97e8a493..1d696970a3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,9 +14,9 @@ # limitations under the License. # -ARG SPARK_IMAGE=gcr.io/spark-operator/spark:v3.1.1 +ARG SPARK_IMAGE=spark:3.5.0 -FROM golang:1.19.2-alpine as builder +FROM golang:1.22-alpine as builder WORKDIR /workspace @@ -32,7 +32,8 @@ COPY main.go main.go COPY pkg/ pkg/ # Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o /usr/bin/spark-operator main.go +ARG TARGETARCH +RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -a -o /usr/bin/spark-operator main.go FROM ${SPARK_IMAGE} USER root diff --git a/Dockerfile.rh b/Dockerfile.rh deleted file mode 100644 index 4fe0110cac..0000000000 --- a/Dockerfile.rh +++ /dev/null @@ -1,57 +0,0 @@ -# syntax=docker/dockerfile:1.0-experimental -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Build an OpenShift image. -# Before running docker build, make sure -# 1. Your Docker version is >= 18.09.3 -# 2. export DOCKER_BUILDKIT=1 - -ARG SPARK_IMAGE=gcr.io/spark-operator/spark:v3.1.1 - -FROM golang:1.14.0-alpine as builder - -WORKDIR /workspace - -# Copy the Go Modules manifests -COPY go.mod go.mod -COPY go.sum go.sum -# Cache deps before building and copying source so that we don't need to re-download as much -# and so that source changes don't invalidate our downloaded layer -RUN go mod download - -# Copy the go source code -COPY main.go main.go -COPY pkg/ pkg/ - -# Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o /usr/bin/spark-operator main.go - -FROM ${SPARK_IMAGE} -COPY --from=builder /usr/bin/spark-operator /usr/bin/ -USER root - -# Comment out the following three lines if you do not have a RedHat subscription. -COPY hack/install_packages.sh / -RUN --mount=target=/opt/spark/credentials,type=secret,id=credentials,required /install_packages.sh -RUN rm /install_packages.sh - -RUN chmod -R u+x /tmp - -COPY hack/gencerts.sh /usr/bin/ -COPY entrypoint.sh /usr/bin/ -USER 185 -ENTRYPOINT ["/usr/bin/entrypoint.sh"] diff --git a/Makefile b/Makefile index c6cd71b2c1..7b21845cdf 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ SPARK_OPERATOR_GOPATH=/go/src/github.com/GoogleCloudPlatform/spark-on-k8s-operat DEP_VERSION:=`grep DEP_VERSION= Dockerfile | awk -F\" '{print $$2}'` BUILDER=`grep "FROM golang:" Dockerfile | awk '{print $$2}'` UNAME:=`uname | tr '[:upper:]' '[:lower:]'` -REPO=github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg +REPO=github.com/GoogleCloudPlatform/spark-on-k8s-operator all: clean-sparkctl build-sparkctl install-sparkctl @@ -45,7 +45,7 @@ build-api-docs: -out-file /repo/docs/api-docs.md" helm-docs: - docker run --rm --volume "$(pwd):/helm-docs" -u "$(id -u)" jnorwood/helm-docs:latest + docker run --rm --volume "$$(pwd):/helm-docs" -u "$(id -u)" jnorwood/helm-docs:latest fmt-check: clean @echo "running fmt check"; cd "$(dirname $0)"; \ @@ -62,7 +62,7 @@ detect-crds-drift: clean: @echo "cleaning up caches and output" - go clean -cache -testcache -r -x ./... 2>&1 >/dev/null + go clean -cache -testcache -r -x 2>&1 >/dev/null -rm -rf _output unit-test: clean diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 45fc7de437..25baba297f 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.1.30 -appVersion: v1beta2-1.3.8-3.1.1 +version: 1.2.0 +appVersion: v1beta2-1.3.8-3.5.0 keywords: - spark home: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 004d1248e7..c4be8dd6b4 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -83,9 +83,10 @@ All charts linted successfully | batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application | | commonLabels | object | `{}` | Common labels to add to the resources | | controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage | +| envFrom | list | `[]` | Pod environment variable sources | | fullnameOverride | string | `""` | String to override release name | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repository | string | `"gcr.io/spark-operator/spark-operator"` | Image repository | +| image.repository | string | `"ghcr.io/kubeflow/spark-operator"` | Image repository | | image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. | | imagePullSecrets | list | `[]` | Image pull secrets | | ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. | @@ -109,10 +110,10 @@ All charts linted successfully | podMonitor.labels | object | `{}` | Pod monitor labels | | podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | | podSecurityContext | object | `{}` | Pod security context | +| rbac.annotations | object | `{}` | Optional annotations for rbac | | rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | | rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | | rbac.createRole | bool | `true` | Create and use RBAC `Role` resources | -| rbac.annotations | object | `{}` | Optional annotations for the spark rbac | | replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 | | resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | | resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | @@ -124,16 +125,19 @@ All charts linted successfully | serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account | | serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | | serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | +| sidecars | list | `[]` | Sidecar containers | | sparkJobNamespace | string | `""` | Set this if running spark jobs in a different namespace than the operator | | tolerations | list | `[]` | List of node taints to tolerate | | uiService.enable | bool | `true` | Enable UI service creation for Spark application | +| volumeMounts | list | `[]` | | +| volumes | list | `[]` | | | webhook.cleanupAnnotations | object | `{"helm.sh/hook":"pre-delete, pre-upgrade","helm.sh/hook-delete-policy":"hook-succeeded"}` | The annotations applied to the cleanup job, required for helm lifecycle hooks | | webhook.cleanupPodLabels | object | `{}` | The podLabels applied to the pod of the cleanup job | -| webhook.cleanupResources | object | `{}` | Cleanup job Pod resource requests and limits | +| webhook.cleanupResources | object | `{}` | Resources applied to cleanup job | | webhook.enable | bool | `false` | Enable webhook server | | webhook.initAnnotations | object | `{"helm.sh/hook":"pre-install, pre-upgrade","helm.sh/hook-weight":"50"}` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | | webhook.initPodLabels | object | `{}` | The podLabels applied to the pod of the init job | -| webhook.initResources | object | `{}` | Init job Pod resource requests and limits | +| webhook.initResources | object | `{}` | Resources applied to init job | | webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | | webhook.port | int | `8080` | Webhook service port | | webhook.timeout | int | `30` | | @@ -142,4 +146,4 @@ All charts linted successfully | Name | Email | Url | | ---- | ------ | --- | -| yuchaoran2011 | yuchaoran2011@gmail.com | | +| yuchaoran2011 | | | diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index ba20921d31..da71ecd0cf 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -11,7 +11,7 @@ replicaCount: 1 image: # -- Image repository - repository: ghcr.io/googlecloudplatform/spark-operator + repository: ghcr.io/kubeflow/spark-operator # -- Image pull policy pullPolicy: IfNotPresent # -- if set, override the image tag whose default is the chart appVersion. diff --git a/go.mod b/go.mod index b07b1bb18c..4d9d7a2846 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( k8s.io/apiextensions-apiserver v0.29.3 k8s.io/apimachinery v0.29.3 k8s.io/client-go v1.5.2 - k8s.io/kubectl v0.25.3 + k8s.io/kubectl v0.29.3 k8s.io/kubernetes v1.29.3 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 volcano.sh/volcano v1.1.0 diff --git a/hack/api-docs/api-docs-template/placeholder.go b/hack/api-docs/api-docs-template/placeholder.go index 914ab1975e..cc8f1453ac 100644 --- a/hack/api-docs/api-docs-template/placeholder.go +++ b/hack/api-docs/api-docs-template/placeholder.go @@ -1,2 +1,2 @@ // Placeholder file to make Go vendor this directory properly. -package template \ No newline at end of file +package template diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go b/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go index d0f1528159..7d7c09d03c 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated // Code generated by k8s code-generator DO NOT EDIT. diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go index f852952c1a..6c1be13de6 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go @@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go index 80995147a1..308afff898 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go @@ -1,3 +1,4 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated // Code generated by k8s code-generator DO NOT EDIT. diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index d238bb8763..d2ff4ab5ad 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -41,14 +41,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index 9f2aad9c03..cbcdafc8a2 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -41,14 +41,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/pkg/controller/sparkapplication/controller.go b/pkg/controller/sparkapplication/controller.go index 024882f037..f6f3732978 100644 --- a/pkg/controller/sparkapplication/controller.go +++ b/pkg/controller/sparkapplication/controller.go @@ -483,41 +483,41 @@ func shouldRetry(app *v1beta2.SparkApplication) bool { } // State Machine for SparkApplication: -//+--------------------------------------------------------------------------------------------------------------------+ -//| +---------------------------------------------------------------------------------------------+ | -//| | +----------+ | | -//| | | | | | -//| | | | | | -//| | |Submission| | | -//| | +----> Failed +----+------------------------------------------------------------------+ | | -//| | | | | | | | | -//| | | | | | | | | -//| | | +----^-----+ | +-----------------------------------------+ | | | -//| | | | | | | | | | -//| | | | | | | | | | -//| +-+--+----+ | +-----v--+-+ +----------+ +-----v-----+ +----v--v--+ | -//| | | | | | | | | | | | | -//| | | | | | | | | | | | | -//| | New +---------> Submitted+----------> Running +-----------> Failing +----------> Failed | | -//| | | | | | | | | | | | | -//| | | | | | | | | | | | | -//| | | | | | | | | | | | | -//| +---------+ | +----^-----+ +-----+----+ +-----+-----+ +----------+ | -//| | | | | | -//| | | | | | -//| +------------+ | | +-------------------------------+ | -//| | | | +-----+-----+ | | +-----------+ +----------+ | -//| | | | | Pending | | | | | | | | -//| | | +---+ Rerun <-------+ +---------------->Succeeding +---------->Completed | | -//| |Invalidating| | <-------+ | | | | | -//| | +-------> | | | | | | | -//| | | | | | | | | | | -//| | | +-----------+ | +-----+-----+ +----------+ | -//| +------------+ | | | -//| | | | -//| +-------------------------------+ | -//| | -//+--------------------------------------------------------------------------------------------------------------------+ +// +--------------------------------------------------------------------------------------------------------------------+ +// | +---------------------------------------------------------------------------------------------+ | +// | | +----------+ | | +// | | | | | | +// | | | | | | +// | | |Submission| | | +// | | +----> Failed +----+------------------------------------------------------------------+ | | +// | | | | | | | | | +// | | | | | | | | | +// | | | +----^-----+ | +-----------------------------------------+ | | | +// | | | | | | | | | | +// | | | | | | | | | | +// | +-+--+----+ | +-----v--+-+ +----------+ +-----v-----+ +----v--v--+ | +// | | | | | | | | | | | | | +// | | | | | | | | | | | | | +// | | New +---------> Submitted+----------> Running +-----------> Failing +----------> Failed | | +// | | | | | | | | | | | | | +// | | | | | | | | | | | | | +// | | | | | | | | | | | | | +// | +---------+ | +----^-----+ +-----+----+ +-----+-----+ +----------+ | +// | | | | | | +// | | | | | | +// | +------------+ | | +-------------------------------+ | +// | | | | +-----+-----+ | | +-----------+ +----------+ | +// | | | | | Pending | | | | | | | | +// | | | +---+ Rerun <-------+ +---------------->Succeeding +---------->Completed | | +// | |Invalidating| | <-------+ | | | | | +// | | +-------> | | | | | | | +// | | | | | | | | | | | +// | | | +-----------+ | +-----+-----+ +----------+ | +// | +------------+ | | | +// | | | | +// | +-------------------------------+ | +// | | +// +--------------------------------------------------------------------------------------------------------------------+ func (c *Controller) syncSparkApplication(key string) error { namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { diff --git a/sparkctl/cmd/create.go b/sparkctl/cmd/create.go index 9fb8313a9e..e70ca5c31e 100644 --- a/sparkctl/cmd/create.go +++ b/sparkctl/cmd/create.go @@ -28,6 +28,7 @@ import ( "github.com/spf13/cobra" "gocloud.dev/blob" + "gocloud.dev/gcerrors" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -323,11 +324,11 @@ func (uh uploadHandler) uploadToBucket(uploadPath, localFilePath string) (string uploadFilePath := filepath.Join(uploadPath, fileName) // Check if exists by trying to fetch metadata - reader, err := uh.b.NewRangeReader(uh.ctx, uploadFilePath, 0, 0) + reader, err := uh.b.NewRangeReader(uh.ctx, uploadFilePath, 0, 0, nil) if err == nil { reader.Close() } - if (blob.IsNotExist(err)) || (err == nil && Override) { + if (gcerrors.Code(err) == gcerrors.NotFound) || (err == nil && Override) { fmt.Printf("uploading local file: %s\n", fileName) // Prepare the file for upload. diff --git a/sparkctl/cmd/gcs.go b/sparkctl/cmd/gcs.go index e91f87066e..3fa2c35ff1 100644 --- a/sparkctl/cmd/gcs.go +++ b/sparkctl/cmd/gcs.go @@ -68,7 +68,7 @@ func newGCSBlob( return nil, err } - b, err := gcsblob.OpenBucket(ctx, bucket, c) + b, err := gcsblob.OpenBucket(ctx, c, bucket, nil) return &uploadHandler{ blob: blobGCS{endpoint: endpoint, region: region, projectId: string(projectId)}, ctx: ctx, diff --git a/sparkctl/cmd/s3.go b/sparkctl/cmd/s3.go index 8bfe586d47..abc92cc4c9 100644 --- a/sparkctl/cmd/s3.go +++ b/sparkctl/cmd/s3.go @@ -23,6 +23,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" "gocloud.dev/blob/s3blob" ) @@ -34,8 +35,8 @@ func (blob blobS3) setPublicACL( ctx context.Context, bucket string, filePath string) error { - acl := "public-read" - if _, err := blob.client.PutObjectAcl(&s3.PutObjectAclInput{Bucket: &bucket, Key: &filePath, ACL: &acl}); err != nil { + acl := types.ObjectCannedACLPublicRead + if _, err := blob.client.PutObjectAcl(ctx, &s3.PutObjectAclInput{Bucket: &bucket, Key: &filePath, ACL: acl}); err != nil { return fmt.Errorf("failed to set ACL on S3 object %s: %v", filePath, err) } @@ -62,16 +63,19 @@ func newS3Blob( } return aws.Endpoint{}, &aws.EndpointNotFoundError{} }) - c := config.LoadDefaultConfig( + conf, err := config.LoadDefaultConfig( ctx, config.WithRegion(region), config.WithEndpointResolverWithOptions(endpointResolver), ) + if err != nil { + return nil, err + } client := s3.NewFromConfig(conf, func(o *s3.Options) { o.UsePathStyle = usePathStyle }) - b, err := s3blob.OpenBucketV2(ctx, client, bucket) + b, err := s3blob.OpenBucketV2(ctx, client, bucket, nil) return &uploadHandler{ - blob: blobS3{s: sess}, + blob: blobS3{client: client}, ctx: ctx, b: b, blobUploadBucket: bucket, From 1591668b2fc3ead2b502a4d48b06f8e1bcf0e53c Mon Sep 17 00:00:00 2001 From: Aakcht Date: Mon, 8 Apr 2024 09:20:27 +0300 Subject: [PATCH 33/87] Chart: add patch permissions for spark operator SA to support spark 3.5.0 (#1884) Signed-off-by: aakcht --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/templates/rbac.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 25baba297f..dca24945b1 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.0 +version: 1.2.1 appVersion: v1beta2-1.3.8-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/templates/rbac.yaml b/charts/spark-operator-chart/templates/rbac.yaml index f19e5f1cd1..342270f8f7 100644 --- a/charts/spark-operator-chart/templates/rbac.yaml +++ b/charts/spark-operator-chart/templates/rbac.yaml @@ -31,6 +31,7 @@ rules: - get - delete - update + - patch - apiGroups: - extensions - networking.k8s.io From 7e34e7ea1edddbd1b18428b6da9a2d44ef4f2958 Mon Sep 17 00:00:00 2001 From: Zev Isert Date: Mon, 8 Apr 2024 11:01:27 -0700 Subject: [PATCH 34/87] chore: replace GoogleCloudPlatform/spark-on-k8s-operator with kubeflow/spark-operator (#1937) Signed-off-by: Zev Isert --- .github/workflows/main.yaml | 3 +-- Makefile | 6 +++--- README.md | 4 ++-- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 8 ++++---- charts/spark-operator-chart/README.md.gotmpl | 2 +- ...ator.k8s.io_scheduledsparkapplications.yaml | 2 +- ...sparkoperator.k8s.io_sparkapplications.yaml | 2 +- charts/spark-operator-chart/values.yaml | 4 ++-- docs/api-docs.md | 4 ++-- docs/developer-guide.md | 8 ++++---- docs/quick-start-guide.md | 4 ++-- docs/user-guide.md | 4 ++-- docs/volcano-integration.md | 2 +- go.mod | 2 +- hack/update-codegen.sh | 2 +- main.go | 16 ++++++++-------- ...ator.k8s.io_scheduledsparkapplications.yaml | 2 +- ...sparkoperator.k8s.io_sparkapplications.yaml | 2 +- .../sparkoperator.k8s.io/v1beta1/register.go | 2 +- .../sparkoperator.k8s.io/v1beta2/register.go | 2 +- pkg/batchscheduler/interface/interface.go | 2 +- pkg/batchscheduler/scheduler_manager.go | 4 ++-- .../volcano/volcano_scheduler.go | 4 ++-- .../volcano/volcano_scheduler_test.go | 2 +- pkg/client/clientset/versioned/clientset.go | 4 ++-- .../versioned/fake/clientset_generated.go | 10 +++++----- .../clientset/versioned/fake/register.go | 4 ++-- .../clientset/versioned/scheme/register.go | 4 ++-- .../fake/fake_scheduledsparkapplication.go | 2 +- .../v1beta1/fake/fake_sparkapplication.go | 2 +- .../fake/fake_sparkoperator.k8s.io_client.go | 2 +- .../v1beta1/scheduledsparkapplication.go | 4 ++-- .../v1beta1/sparkapplication.go | 4 ++-- .../v1beta1/sparkoperator.k8s.io_client.go | 4 ++-- .../fake/fake_scheduledsparkapplication.go | 2 +- .../v1beta2/fake/fake_sparkapplication.go | 2 +- .../fake/fake_sparkoperator.k8s.io_client.go | 2 +- .../v1beta2/scheduledsparkapplication.go | 4 ++-- .../v1beta2/sparkapplication.go | 4 ++-- .../v1beta2/sparkoperator.k8s.io_client.go | 4 ++-- .../informers/externalversions/factory.go | 6 +++--- .../informers/externalversions/generic.go | 4 ++-- .../internalinterfaces/factory_interfaces.go | 2 +- .../sparkoperator.k8s.io/interface.go | 6 +++--- .../sparkoperator.k8s.io/v1beta1/interface.go | 2 +- .../v1beta1/scheduledsparkapplication.go | 8 ++++---- .../v1beta1/sparkapplication.go | 8 ++++---- .../sparkoperator.k8s.io/v1beta2/interface.go | 2 +- .../v1beta2/scheduledsparkapplication.go | 8 ++++---- .../v1beta2/sparkapplication.go | 8 ++++---- .../v1beta1/scheduledsparkapplication.go | 2 +- .../v1beta1/sparkapplication.go | 2 +- .../v1beta2/scheduledsparkapplication.go | 2 +- .../v1beta2/sparkapplication.go | 2 +- pkg/config/config.go | 2 +- pkg/config/config_test.go | 2 +- pkg/config/secret.go | 2 +- pkg/config/secret_test.go | 2 +- .../scheduledsparkapplication/controller.go | 12 ++++++------ .../controller_test.go | 8 ++++---- .../controller_util.go | 2 +- pkg/controller/sparkapplication/controller.go | 18 +++++++++--------- .../sparkapplication/controller_test.go | 10 +++++----- .../sparkapplication/monitoring_config.go | 4 ++-- .../sparkapplication/monitoring_config_test.go | 4 ++-- .../sparkapplication/spark_pod_eventhandler.go | 4 ++-- .../spark_pod_eventhandler_test.go | 2 +- .../sparkapplication/sparkapp_metrics.go | 4 ++-- .../sparkapplication/sparkapp_metrics_test.go | 2 +- .../sparkapplication/sparkapp_util.go | 4 ++-- .../sparkapplication/sparkapp_util_test.go | 2 +- pkg/controller/sparkapplication/sparkui.go | 6 +++--- .../sparkapplication/sparkui_test.go | 6 +++--- pkg/controller/sparkapplication/submission.go | 4 ++-- .../sparkapplication/submission_test.go | 4 ++-- pkg/util/util.go | 4 ++-- pkg/webhook/patch.go | 6 +++--- pkg/webhook/patch_test.go | 4 ++-- pkg/webhook/resourceusage/enforcer.go | 4 ++-- pkg/webhook/resourceusage/handlers.go | 2 +- pkg/webhook/resourceusage/util.go | 4 ++-- pkg/webhook/resourceusage/watcher.go | 2 +- pkg/webhook/webhook.go | 14 +++++++------- pkg/webhook/webhook_test.go | 8 ++++---- sparkctl/README.md | 2 +- sparkctl/cmd/client.go | 4 ++-- sparkctl/cmd/create.go | 4 ++-- sparkctl/cmd/create_test.go | 2 +- sparkctl/cmd/delete.go | 2 +- sparkctl/cmd/event.go | 2 +- sparkctl/cmd/forward.go | 2 +- sparkctl/cmd/list.go | 2 +- sparkctl/cmd/log.go | 2 +- sparkctl/cmd/status.go | 4 ++-- sparkctl/main.go | 2 +- test/e2e/basic_test.go | 2 +- test/e2e/framework/framework.go | 2 +- test/e2e/framework/sparkapplication.go | 4 ++-- test/e2e/lifecycle_test.go | 4 ++-- test/e2e/main_test.go | 4 ++-- test/e2e/volume_mount_test.go | 2 +- 102 files changed, 206 insertions(+), 207 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 9ad6e96a76..46918dc3f3 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -10,7 +10,6 @@ on: - master jobs: - build-api-docs: runs-on: ubuntu-latest steps: @@ -167,7 +166,7 @@ jobs: docker build -t gcr.io/kubeflow/spark-operator:local . minikube image load gcr.io/kubeflow/spark-operator:local - # The integration tests are currently broken see: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/issues/1416 + # The integration tests are currently broken see: https://github.com/kubeflow/spark-operator/issues/1416 # - name: Run chart-testing (integration test) # run: make integation-test diff --git a/Makefile b/Makefile index 7b21845cdf..3f2a5206c0 100644 --- a/Makefile +++ b/Makefile @@ -2,11 +2,11 @@ .SILENT: .PHONY: clean-sparkctl -SPARK_OPERATOR_GOPATH=/go/src/github.com/GoogleCloudPlatform/spark-on-k8s-operator +SPARK_OPERATOR_GOPATH=/go/src/github.com/kubeflow/spark-operator DEP_VERSION:=`grep DEP_VERSION= Dockerfile | awk -F\" '{print $$2}'` BUILDER=`grep "FROM golang:" Dockerfile | awk '{print $$2}'` UNAME:=`uname | tr '[:upper:]' '[:lower:]'` -REPO=github.com/GoogleCloudPlatform/spark-on-k8s-operator +REPO=github.com/kubeflow/spark-operator all: clean-sparkctl build-sparkctl install-sparkctl @@ -40,7 +40,7 @@ build-api-docs: docker run -v $$(pwd):/repo/ temp-api-ref-docs \ sh -c "cd /repo/ && /go/gen-crd-api-reference-docs/gen-crd-api-reference-docs \ -config /repo/hack/api-docs/api-docs-config.json \ - -api-dir github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2 \ + -api-dir github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2 \ -template-dir /repo/hack/api-docs/api-docs-template \ -out-file /repo/docs/api-docs.md" diff --git a/README.md b/README.md index 7c9d07c541..f8c5f900bd 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Go Report Card](https://goreportcard.com/badge/github.com/GoogleCloudPlatform/spark-on-k8s-operator)](https://goreportcard.com/report/github.com/GoogleCloudPlatform/spark-on-k8s-operator) +[![Go Report Card](https://goreportcard.com/badge/github.com/kubeflow/spark-operator)](https://goreportcard.com/report/github.com/kubeflow/spark-operator) **This is not an officially supported Google product.** @@ -28,7 +28,7 @@ Customization of Spark pods, e.g., mounting arbitrary volumes and setting pod af The easiest way to install the Kubernetes Operator for Apache Spark is to use the Helm [chart](charts/spark-operator-chart/). ```bash -$ helm repo add spark-operator https://googlecloudplatform.github.io/spark-on-k8s-operator +$ helm repo add spark-operator https://kubeflow.github.io/spark-operator $ helm install my-release spark-operator/spark-operator --namespace spark-operator --create-namespace ``` diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index dca24945b1..7a8ea4364d 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -5,7 +5,7 @@ version: 1.2.1 appVersion: v1beta2-1.3.8-3.5.0 keywords: - spark -home: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator +home: https://github.com/kubeflow/spark-operator maintainers: - name: yuchaoran2011 email: yuchaoran2011@gmail.com diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index c4be8dd6b4..7e98f381b2 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -4,7 +4,7 @@ A Helm chart for Spark on Kubernetes operator ## Introduction -This chart bootstraps a [Kubernetes Operator for Apache Spark](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator) deployment using the [Helm](https://helm.sh) package manager. +This chart bootstraps a [Kubernetes Operator for Apache Spark](https://github.com/kubeflow/spark-operator) deployment using the [Helm](https://helm.sh) package manager. ## Prerequisites @@ -23,7 +23,7 @@ The previous `spark-operator` Helm chart hosted at [helm/charts](https://github. ```shell -$ helm repo add spark-operator https://googlecloudplatform.github.io/spark-on-k8s-operator +$ helm repo add spark-operator https://kubeflow.github.io/spark-operator $ helm install my-release spark-operator/spark-operator ``` @@ -92,7 +92,7 @@ All charts linted successfully | ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. | | istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate | | labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. | -| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. | +| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. | | leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace | | logLevel | int | `2` | Set higher levels for more verbose logging | | metrics.enable | bool | `true` | Enable prometheus metric scraping | @@ -115,7 +115,7 @@ All charts linted successfully | rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | | rbac.createRole | bool | `true` | Create and use RBAC `Role` resources | | replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 | -| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | +| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | | resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | | resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting | | securityContext | object | `{}` | Operator container security context | diff --git a/charts/spark-operator-chart/README.md.gotmpl b/charts/spark-operator-chart/README.md.gotmpl index b903f0e2b0..070a2a82ad 100644 --- a/charts/spark-operator-chart/README.md.gotmpl +++ b/charts/spark-operator-chart/README.md.gotmpl @@ -23,7 +23,7 @@ The previous `spark-operator` Helm chart hosted at [helm/charts](https://github. ```shell -$ helm repo add spark-operator https://googlecloudplatform.github.io/spark-on-k8s-operator +$ helm repo add spark-operator https://kubeflow.github.io/spark-operator $ helm install my-release spark-operator/spark-operator ``` diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 9f04e1dba3..47c7113c5c 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -5,7 +5,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: (unknown) - api-approved.kubernetes.io: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/pull/1298 + api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 name: scheduledsparkapplications.sparkoperator.k8s.io spec: group: sparkoperator.k8s.io diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml index fb71683a8b..daadc2c48f 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -5,7 +5,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: (unknown) - api-approved.kubernetes.io: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/pull/1298 + api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 name: sparkapplications.sparkoperator.k8s.io spec: group: sparkoperator.k8s.io diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index da71ecd0cf..349265ca5b 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -190,12 +190,12 @@ batchScheduler: resourceQuotaEnforcement: # -- Whether to enable the ResourceQuota enforcement for SparkApplication resources. # Requires the webhook to be enabled by setting `webhook.enable` to true. - # Ref: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. + # Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. enable: false leaderElection: # -- Leader election lock name. - # Ref: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. + # Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. lockName: "spark-operator-lock" # -- Optionally store the lock in another namespace. Defaults to operator's namespace lockNamespace: "" diff --git a/docs/api-docs.md b/docs/api-docs.md index fbedbb8fbd..fc9b73195c 100644 --- a/docs/api-docs.md +++ b/docs/api-docs.md @@ -2590,7 +2590,7 @@ ApplicationState executorState
-map[string]github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2.ExecutorState +map[string]github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2.ExecutorState @@ -2814,7 +2814,7 @@ Deprecated. Consider using env instead.

envSecretKeyRefs
-map[string]github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2.NameKey +map[string]github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2.NameKey diff --git a/docs/developer-guide.md b/docs/developer-guide.md index aefcfa7654..a4f6acce27 100644 --- a/docs/developer-guide.md +++ b/docs/developer-guide.md @@ -26,10 +26,10 @@ $ docker build -t -f Dockerfile.rh . If you'd like to build/test the spark-operator locally, follow the instructions below: ```bash -$ mkdir -p $GOPATH/src/github.com/GoogleCloudPlatform -$ cd $GOPATH/src/github.com/GoogleCloudPlatform -$ git clone git@github.com:GoogleCloudPlatform/spark-on-k8s-operator.git -$ cd spark-on-k8s-operator +$ mkdir -p $GOPATH/src/github.com/kubeflow +$ cd $GOPATH/src/github.com/kubeflow +$ git clone git@github.com:kubeflow/spark-operator.git +$ cd spark-operator ``` To update the auto-generated code, run the following command. (This step is only required if the CRD types have been changed): diff --git a/docs/quick-start-guide.md b/docs/quick-start-guide.md index 9c2ec3edb9..4ee06c904f 100644 --- a/docs/quick-start-guide.md +++ b/docs/quick-start-guide.md @@ -25,7 +25,7 @@ For a more detailed guide on how to use, compose, and work with `SparkApplicatio To install the operator, use the Helm [chart](../charts/spark-operator-chart). ```bash -$ helm repo add spark-operator https://googlecloudplatform.github.io/spark-on-k8s-operator +$ helm repo add spark-operator https://kubeflow.github.io/spark-operator $ helm install my-release spark-operator/spark-operator --namespace spark-operator --create-namespace ``` @@ -54,7 +54,7 @@ You can also install `spark-operator` using [kustomize](https://github.com/kuber ``` kubectl apply -k {manifest_directory} ``` -Kustomize default manifest directory is part of the repo [here](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/tree/master/manifest/spark-operator-with-webhook-install) +Kustomize default manifest directory is part of the repo [here](https://github.com/kubeflow/spark-operator/tree/master/manifest/spark-operator-with-webhook-install) The manifest directory contains primarily the `crds` and `spark-operator-with-webhook.yaml` which holds configurations of spark operator init job, a webhook service and finally a deployemnt. diff --git a/docs/user-guide.md b/docs/user-guide.md index 4d2e7102de..9d7221b77e 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -842,6 +842,6 @@ To customize the operator, you can follow the steps below: 1. Compile Spark distribution with Kubernetes support as per [Spark documentation](https://spark.apache.org/docs/latest/building-spark.html#building-with-kubernetes-support). 2. Create docker images to be used for Spark with [docker-image tool](https://spark.apache.org/docs/latest/running-on-kubernetes.html#docker-images). -3. Create a new operator image based on the above image. You need to modify the `FROM` tag in the [Dockerfile](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/master/Dockerfile) with your Spark image. +3. Create a new operator image based on the above image. You need to modify the `FROM` tag in the [Dockerfile](https://github.com/kubeflow/spark-operator/blob/master/Dockerfile) with your Spark image. 4. Build and push your operator image built above. -5. Deploy the new image by modifying the [/manifest/spark-operator.yaml](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/master/manifest/spark-operator.yaml) file and specifying your operator image. +5. Deploy the new image by modifying the [/manifest/spark-operator-install/spark-operator.yaml](https://github.com/kubeflow/spark-operator/blob/master/manifest/spark-operator-install/spark-operator.yaml) file and specifying your operator image. diff --git a/docs/volcano-integration.md b/docs/volcano-integration.md index ecb325d3b5..7d67276a94 100644 --- a/docs/volcano-integration.md +++ b/docs/volcano-integration.md @@ -15,7 +15,7 @@ same environment, please refer [Quick Start Guide](https://github.com/volcano-sh Within the help of Helm chart, Kubernetes Operator for Apache Spark with Volcano can be easily installed with the command below: ```bash -$ helm repo add spark-operator https://googlecloudplatform.github.io/spark-on-k8s-operator +$ helm repo add spark-operator https://kubeflow.github.io/spark-operator $ helm install my-release spark-operator/spark-operator --namespace spark-operator --set batchScheduler.enable=true --set webhook.enable=true ``` diff --git a/go.mod b/go.mod index 4d9d7a2846..a6b09386b8 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/GoogleCloudPlatform/spark-on-k8s-operator +module github.com/kubeflow/spark-operator go 1.22 diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index ac4af898c8..a74d7b3e64 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -25,7 +25,7 @@ SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. # k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir # instead of the $GOPATH directly. For normal projects this can be dropped. ${SCRIPT_ROOT}/hack/generate-groups.sh "all" \ - github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis \ + github.com/kubeflow/spark-operator/pkg/client github.com/kubeflow/spark-operator/pkg/apis \ sparkoperator.k8s.io:v1beta1,v1beta2 \ --go-header-file "$(dirname ${BASH_SOURCE})/custom-boilerplate.go.txt" \ --output-base "$(dirname ${BASH_SOURCE})/../../../.." diff --git a/main.go b/main.go index db4861b2e6..96886a87ee 100644 --- a/main.go +++ b/main.go @@ -40,14 +40,14 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/utils/clock" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler" - crclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" - crinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" - operatorConfig "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/controller/scheduledsparkapplication" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/controller/sparkapplication" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/webhook" + "github.com/kubeflow/spark-operator/pkg/batchscheduler" + crclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" + crinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" + operatorConfig "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/controller/scheduledsparkapplication" + "github.com/kubeflow/spark-operator/pkg/controller/sparkapplication" + "github.com/kubeflow/spark-operator/pkg/util" + "github.com/kubeflow/spark-operator/pkg/webhook" ) var ( diff --git a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 9f04e1dba3..47c7113c5c 100644 --- a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -5,7 +5,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: (unknown) - api-approved.kubernetes.io: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/pull/1298 + api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 name: scheduledsparkapplications.sparkoperator.k8s.io spec: group: sparkoperator.k8s.io diff --git a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml index fb71683a8b..daadc2c48f 100644 --- a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -5,7 +5,7 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: (unknown) - api-approved.kubernetes.io: https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/pull/1298 + api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 name: sparkapplications.sparkoperator.k8s.io spec: group: sparkoperator.k8s.io diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/register.go b/pkg/apis/sparkoperator.k8s.io/v1beta1/register.go index fbc06a1bc6..0280f01cd5 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta1/register.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta1/register.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io" ) const Version = "v1beta1" diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/register.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/register.go index f70432e431..20d087b7fc 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/register.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/register.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io" ) const Version = "v1beta2" diff --git a/pkg/batchscheduler/interface/interface.go b/pkg/batchscheduler/interface/interface.go index b2072b7a7a..6ed18c8cd7 100644 --- a/pkg/batchscheduler/interface/interface.go +++ b/pkg/batchscheduler/interface/interface.go @@ -17,7 +17,7 @@ limitations under the License. package schedulerinterface import ( - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) type BatchScheduler interface { diff --git a/pkg/batchscheduler/scheduler_manager.go b/pkg/batchscheduler/scheduler_manager.go index fe84954807..41ff744b0e 100644 --- a/pkg/batchscheduler/scheduler_manager.go +++ b/pkg/batchscheduler/scheduler_manager.go @@ -22,8 +22,8 @@ import ( "k8s.io/client-go/rest" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler/interface" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler/volcano" + "github.com/kubeflow/spark-operator/pkg/batchscheduler/interface" + "github.com/kubeflow/spark-operator/pkg/batchscheduler/volcano" ) type schedulerInitializeFunc func(config *rest.Config) (schedulerinterface.BatchScheduler, error) diff --git a/pkg/batchscheduler/volcano/volcano_scheduler.go b/pkg/batchscheduler/volcano/volcano_scheduler.go index 31179ebf2a..8ef79f200a 100644 --- a/pkg/batchscheduler/volcano/volcano_scheduler.go +++ b/pkg/batchscheduler/volcano/volcano_scheduler.go @@ -30,8 +30,8 @@ import ( "volcano.sh/volcano/pkg/apis/scheduling/v1beta1" volcanoclient "volcano.sh/volcano/pkg/client/clientset/versioned" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - schedulerinterface "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler/interface" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + schedulerinterface "github.com/kubeflow/spark-operator/pkg/batchscheduler/interface" ) const ( diff --git a/pkg/batchscheduler/volcano/volcano_scheduler_test.go b/pkg/batchscheduler/volcano/volcano_scheduler_test.go index a3d9cf796c..1587ef1065 100644 --- a/pkg/batchscheduler/volcano/volcano_scheduler_test.go +++ b/pkg/batchscheduler/volcano/volcano_scheduler_test.go @@ -22,7 +22,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) func TestGetDriverResource(t *testing.T) { diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index 9d70d02812..81bf0d0206 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -23,8 +23,8 @@ package versioned import ( "fmt" - sparkoperatorv1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1" - sparkoperatorv1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2" + sparkoperatorv1beta1 "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1" + sparkoperatorv1beta2 "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 96a86852b4..11da2fb2f2 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -21,11 +21,11 @@ limitations under the License. package fake import ( - clientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" - sparkoperatorv1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1" - fakesparkoperatorv1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake" - sparkoperatorv1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2" - fakesparkoperatorv1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake" + clientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" + sparkoperatorv1beta1 "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1" + fakesparkoperatorv1beta1 "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake" + sparkoperatorv1beta2 "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2" + fakesparkoperatorv1beta2 "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index d2ff4ab5ad..2ba94243f1 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -21,8 +21,8 @@ limitations under the License. package fake import ( - sparkoperatorv1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - sparkoperatorv1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + sparkoperatorv1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + sparkoperatorv1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index cbcdafc8a2..d12cb60d48 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -21,8 +21,8 @@ limitations under the License. package scheme import ( - sparkoperatorv1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - sparkoperatorv1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + sparkoperatorv1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + sparkoperatorv1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_scheduledsparkapplication.go index 0df7e9ee41..ac41f935cc 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_scheduledsparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_scheduledsparkapplication.go @@ -23,7 +23,7 @@ package fake import ( "context" - v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkapplication.go index 961f010208..e8772e7ba1 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkapplication.go @@ -23,7 +23,7 @@ package fake import ( "context" - v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkoperator.k8s.io_client.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkoperator.k8s.io_client.go index 0722c1154c..d125413562 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkoperator.k8s.io_client.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkoperator.k8s.io_client.go @@ -21,7 +21,7 @@ limitations under the License. package fake import ( - v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go index 1bef3d3842..65336a68eb 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go @@ -24,8 +24,8 @@ import ( "context" "time" - v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - scheme "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" + v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + scheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkapplication.go index 090e297b5c..e4308e3093 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkapplication.go @@ -24,8 +24,8 @@ import ( "context" "time" - v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - scheme "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" + v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + scheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkoperator.k8s.io_client.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkoperator.k8s.io_client.go index 6dd0595280..c347da5be3 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkoperator.k8s.io_client.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkoperator.k8s.io_client.go @@ -21,8 +21,8 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" + v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go index 121526102d..6d2218ba46 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go @@ -23,7 +23,7 @@ package fake import ( "context" - v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go index ff4996928b..aa2a994390 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go @@ -23,7 +23,7 @@ package fake import ( "context" - v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkoperator.k8s.io_client.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkoperator.k8s.io_client.go index 7dfc2ed988..e5fb85132a 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkoperator.k8s.io_client.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkoperator.k8s.io_client.go @@ -21,7 +21,7 @@ limitations under the License. package fake import ( - v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go index 6166c2daed..38b0063685 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go @@ -24,8 +24,8 @@ import ( "context" "time" - v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - scheme "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" + v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + scheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go index cf0c0b0ddc..cc541f5dd9 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go @@ -24,8 +24,8 @@ import ( "context" "time" - v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - scheme "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" + v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + scheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go index 81cb691855..cb8dc20734 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go @@ -21,8 +21,8 @@ limitations under the License. package v1beta2 import ( - v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" + v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go index 8710635b9e..7bcf90f634 100644 --- a/pkg/client/informers/externalversions/factory.go +++ b/pkg/client/informers/externalversions/factory.go @@ -25,9 +25,9 @@ import ( sync "sync" time "time" - versioned "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" - internalinterfaces "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/internalinterfaces" - sparkoperatork8sio "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/sparkoperator.k8s.io" + versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" + sparkoperatork8sio "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/sparkoperator.k8s.io" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 4d372af3a4..6992de7df7 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -23,8 +23,8 @@ package externalversions import ( "fmt" - v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) diff --git a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go index ac753d54db..e6c5d76afa 100644 --- a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -23,7 +23,7 @@ package internalinterfaces import ( time "time" - versioned "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" + versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/interface.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/interface.go index c5aefa968e..51d0d2469d 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/interface.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/interface.go @@ -21,9 +21,9 @@ limitations under the License. package sparkoperator import ( - internalinterfaces "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/internalinterfaces" - v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1" - v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2" + internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" + v1beta1 "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1" + v1beta2 "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2" ) // Interface provides access to each of this group's versions. diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/interface.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/interface.go index 047e564a42..af3efd38e6 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/interface.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/interface.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - internalinterfaces "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/internalinterfaces" + internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go index 1d900bad93..46de9ba529 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go @@ -24,10 +24,10 @@ import ( "context" time "time" - sparkoperatork8siov1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - versioned "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" - internalinterfaces "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/internalinterfaces" - v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta1" + sparkoperatork8siov1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" + v1beta1 "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/sparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/sparkapplication.go index 48068e1c5e..f38734232a 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/sparkapplication.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/sparkapplication.go @@ -24,10 +24,10 @@ import ( "context" time "time" - sparkoperatork8siov1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - versioned "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" - internalinterfaces "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/internalinterfaces" - v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta1" + sparkoperatork8siov1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" + v1beta1 "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/interface.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/interface.go index 78b8f5f6dd..d41ff90fc7 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/interface.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/interface.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta2 import ( - internalinterfaces "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/internalinterfaces" + internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go index 8b02b71c3e..ffa1fddd37 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go @@ -24,10 +24,10 @@ import ( "context" time "time" - sparkoperatork8siov1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - versioned "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" - internalinterfaces "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/internalinterfaces" - v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" + sparkoperatork8siov1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" + v1beta2 "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go index 1008a97a33..da42c12ec0 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go @@ -24,10 +24,10 @@ import ( "context" time "time" - sparkoperatork8siov1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - versioned "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" - internalinterfaces "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions/internalinterfaces" - v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" + sparkoperatork8siov1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" + internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" + v1beta2 "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go index 4bcffb8ef8..f3921e8106 100644 --- a/pkg/client/listers/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta1/sparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta1/sparkapplication.go index d19215f1cb..51ceafa4dd 100644 --- a/pkg/client/listers/sparkoperator.k8s.io/v1beta1/sparkapplication.go +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta1/sparkapplication.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go index 2c37bccad8..f70331d5b0 100644 --- a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go index 3e11815673..4818a3cf77 100644 --- a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/config/config.go b/pkg/config/config.go index 89415597ef..18a708c6db 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -19,7 +19,7 @@ package config import ( "fmt" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) // GetDriverAnnotationOption returns a spark-submit option for a driver annotation of the given key and value. diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index d6f60af0b3..485c1cb282 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) func TestGetDriverEnvVarConfOptions(t *testing.T) { diff --git a/pkg/config/secret.go b/pkg/config/secret.go index b76b682c07..1a2c7fa492 100644 --- a/pkg/config/secret.go +++ b/pkg/config/secret.go @@ -20,7 +20,7 @@ import ( "fmt" "path/filepath" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) // GetDriverSecretConfOptions returns a list of spark-submit options for mounting driver secrets. diff --git a/pkg/config/secret_test.go b/pkg/config/secret_test.go index 6157ecb5c1..fcd0ea9922 100644 --- a/pkg/config/secret_test.go +++ b/pkg/config/secret_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) func TestGetDriverSecretConfOptions(t *testing.T) { diff --git a/pkg/controller/scheduledsparkapplication/controller.go b/pkg/controller/scheduledsparkapplication/controller.go index 820f8741eb..643518aa01 100644 --- a/pkg/controller/scheduledsparkapplication/controller.go +++ b/pkg/controller/scheduledsparkapplication/controller.go @@ -38,12 +38,12 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/utils/clock" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" - crdscheme "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" - crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" - crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" + crdscheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" + crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" + crdlisters "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" ) var ( diff --git a/pkg/controller/scheduledsparkapplication/controller_test.go b/pkg/controller/scheduledsparkapplication/controller_test.go index 37722b69aa..9ef610113d 100644 --- a/pkg/controller/scheduledsparkapplication/controller_test.go +++ b/pkg/controller/scheduledsparkapplication/controller_test.go @@ -31,10 +31,10 @@ import ( "k8s.io/client-go/tools/cache" clocktesting "k8s.io/utils/clock/testing" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientfake "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/fake" - crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + crdclientfake "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/fake" + crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" + "github.com/kubeflow/spark-operator/pkg/config" ) func TestSyncScheduledSparkApplication_Allow(t *testing.T) { diff --git a/pkg/controller/scheduledsparkapplication/controller_util.go b/pkg/controller/scheduledsparkapplication/controller_util.go index 563d3181d6..8cb33ab749 100644 --- a/pkg/controller/scheduledsparkapplication/controller_util.go +++ b/pkg/controller/scheduledsparkapplication/controller_util.go @@ -17,7 +17,7 @@ limitations under the License. package scheduledsparkapplication import ( - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) type sparkApps []*v1beta2.SparkApplication diff --git a/pkg/controller/sparkapplication/controller.go b/pkg/controller/sparkapplication/controller.go index f6f3732978..5d43a72b0a 100644 --- a/pkg/controller/sparkapplication/controller.go +++ b/pkg/controller/sparkapplication/controller.go @@ -42,15 +42,15 @@ import ( "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler" - schedulerinterface "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/batchscheduler/interface" - crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" - crdscheme "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/scheme" - crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" - crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/batchscheduler" + schedulerinterface "github.com/kubeflow/spark-operator/pkg/batchscheduler/interface" + crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" + crdscheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" + crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" + crdlisters "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/util" ) const ( diff --git a/pkg/controller/sparkapplication/controller_test.go b/pkg/controller/sparkapplication/controller_test.go index 1dcb8e549f..44f9003dbf 100644 --- a/pkg/controller/sparkapplication/controller_test.go +++ b/pkg/controller/sparkapplication/controller_test.go @@ -37,11 +37,11 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientfake "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/fake" - crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + crdclientfake "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/fake" + crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" + "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/util" ) func newFakeController(app *v1beta2.SparkApplication, pods ...*apiv1.Pod) (*Controller, *record.FakeRecorder) { diff --git a/pkg/controller/sparkapplication/monitoring_config.go b/pkg/controller/sparkapplication/monitoring_config.go index c856fd6365..ea88326b16 100644 --- a/pkg/controller/sparkapplication/monitoring_config.go +++ b/pkg/controller/sparkapplication/monitoring_config.go @@ -27,8 +27,8 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" ) const ( diff --git a/pkg/controller/sparkapplication/monitoring_config_test.go b/pkg/controller/sparkapplication/monitoring_config_test.go index c8e317f4a8..3eb20b8f91 100644 --- a/pkg/controller/sparkapplication/monitoring_config_test.go +++ b/pkg/controller/sparkapplication/monitoring_config_test.go @@ -24,8 +24,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" ) func TestConfigPrometheusMonitoring(t *testing.T) { diff --git a/pkg/controller/sparkapplication/spark_pod_eventhandler.go b/pkg/controller/sparkapplication/spark_pod_eventhandler.go index 6978de6f05..8ebb398cbb 100644 --- a/pkg/controller/sparkapplication/spark_pod_eventhandler.go +++ b/pkg/controller/sparkapplication/spark_pod_eventhandler.go @@ -22,8 +22,8 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" - crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + crdlisters "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" ) // sparkPodEventHandler monitors Spark executor pods and update the SparkApplication objects accordingly. diff --git a/pkg/controller/sparkapplication/spark_pod_eventhandler_test.go b/pkg/controller/sparkapplication/spark_pod_eventhandler_test.go index 34cd636020..2fa8360221 100644 --- a/pkg/controller/sparkapplication/spark_pod_eventhandler_test.go +++ b/pkg/controller/sparkapplication/spark_pod_eventhandler_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/config" ) func TestOnPodAdded(t *testing.T) { diff --git a/pkg/controller/sparkapplication/sparkapp_metrics.go b/pkg/controller/sparkapplication/sparkapp_metrics.go index 023b01733a..1dfb309b1c 100644 --- a/pkg/controller/sparkapplication/sparkapp_metrics.go +++ b/pkg/controller/sparkapplication/sparkapp_metrics.go @@ -22,8 +22,8 @@ import ( "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/util" ) type sparkAppMetrics struct { diff --git a/pkg/controller/sparkapplication/sparkapp_metrics_test.go b/pkg/controller/sparkapplication/sparkapp_metrics_test.go index e2a6fd761e..a860d7f411 100644 --- a/pkg/controller/sparkapplication/sparkapp_metrics_test.go +++ b/pkg/controller/sparkapplication/sparkapp_metrics_test.go @@ -17,7 +17,7 @@ limitations under the License. package sparkapplication import ( - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" + "github.com/kubeflow/spark-operator/pkg/util" "net/http" "sync" "testing" diff --git a/pkg/controller/sparkapplication/sparkapp_util.go b/pkg/controller/sparkapplication/sparkapp_util.go index 30cce2c1c7..ff321625cc 100644 --- a/pkg/controller/sparkapplication/sparkapp_util.go +++ b/pkg/controller/sparkapplication/sparkapp_util.go @@ -23,8 +23,8 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/apis/policy" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" apiv1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" ) diff --git a/pkg/controller/sparkapplication/sparkapp_util_test.go b/pkg/controller/sparkapplication/sparkapp_util_test.go index 744abe4504..c1605656ed 100644 --- a/pkg/controller/sparkapplication/sparkapp_util_test.go +++ b/pkg/controller/sparkapplication/sparkapp_util_test.go @@ -19,7 +19,7 @@ package sparkapplication import ( "testing" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) var expectedStatusString = `{ diff --git a/pkg/controller/sparkapplication/sparkui.go b/pkg/controller/sparkapplication/sparkui.go index 533c3e953d..5ac64062e6 100644 --- a/pkg/controller/sparkapplication/sparkui.go +++ b/pkg/controller/sparkapplication/sparkui.go @@ -32,9 +32,9 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" clientset "k8s.io/client-go/kubernetes" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/util" ) const ( diff --git a/pkg/controller/sparkapplication/sparkui_test.go b/pkg/controller/sparkapplication/sparkui_test.go index 80d8e1075e..6122c88108 100644 --- a/pkg/controller/sparkapplication/sparkui_test.go +++ b/pkg/controller/sparkapplication/sparkui_test.go @@ -29,9 +29,9 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes/fake" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/util" ) func TestCreateSparkUIService(t *testing.T) { diff --git a/pkg/controller/sparkapplication/submission.go b/pkg/controller/sparkapplication/submission.go index 98bb882e05..9c7d8c8cac 100644 --- a/pkg/controller/sparkapplication/submission.go +++ b/pkg/controller/sparkapplication/submission.go @@ -29,8 +29,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/apis/policy" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" ) const ( diff --git a/pkg/controller/sparkapplication/submission_test.go b/pkg/controller/sparkapplication/submission_test.go index 20e247a01b..3c34950c64 100644 --- a/pkg/controller/sparkapplication/submission_test.go +++ b/pkg/controller/sparkapplication/submission_test.go @@ -31,8 +31,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" ) const ( diff --git a/pkg/util/util.go b/pkg/util/util.go index d556d045e4..d39e2b19bd 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -24,8 +24,8 @@ import ( apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" ) // NewHash32 returns a 32-bit hash computed from the given byte slice. diff --git a/pkg/webhook/patch.go b/pkg/webhook/patch.go index bc9a748172..a7c20a8168 100644 --- a/pkg/webhook/patch.go +++ b/pkg/webhook/patch.go @@ -26,9 +26,9 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/util" ) const ( diff --git a/pkg/webhook/patch_test.go b/pkg/webhook/patch_test.go index 4b66b62a7e..99f821f37c 100644 --- a/pkg/webhook/patch_test.go +++ b/pkg/webhook/patch_test.go @@ -28,8 +28,8 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" ) func TestPatchSparkPod_OwnerReference(t *testing.T) { diff --git a/pkg/webhook/resourceusage/enforcer.go b/pkg/webhook/resourceusage/enforcer.go index b4aece7956..987895bc5a 100644 --- a/pkg/webhook/resourceusage/enforcer.go +++ b/pkg/webhook/resourceusage/enforcer.go @@ -2,8 +2,8 @@ package resourceusage import ( "fmt" - so "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" + so "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" "github.com/golang/glog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" diff --git a/pkg/webhook/resourceusage/handlers.go b/pkg/webhook/resourceusage/handlers.go index a515a98d82..c4d86fd765 100644 --- a/pkg/webhook/resourceusage/handlers.go +++ b/pkg/webhook/resourceusage/handlers.go @@ -1,7 +1,7 @@ package resourceusage import ( - so "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + so "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/golang/glog" corev1 "k8s.io/api/core/v1" diff --git a/pkg/webhook/resourceusage/util.go b/pkg/webhook/resourceusage/util.go index 37ee4e27e4..d256f3a733 100644 --- a/pkg/webhook/resourceusage/util.go +++ b/pkg/webhook/resourceusage/util.go @@ -2,8 +2,8 @@ package resourceusage import ( "fmt" - so "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + so "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/webhook/resourceusage/watcher.go b/pkg/webhook/resourceusage/watcher.go index ba0089287f..49395bf11a 100644 --- a/pkg/webhook/resourceusage/watcher.go +++ b/pkg/webhook/resourceusage/watcher.go @@ -4,7 +4,7 @@ import ( "fmt" "sync" - crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" + crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/resource" diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go index 38a9b2479c..661912f29d 100644 --- a/pkg/webhook/webhook.go +++ b/pkg/webhook/webhook.go @@ -37,13 +37,13 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" - crdapi "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io" - crdv1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" - crdlisters "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/util" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/webhook/resourceusage" + crdapi "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io" + crdv1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + crinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" + crdlisters "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/util" + "github.com/kubeflow/spark-operator/pkg/webhook/resourceusage" ) const ( diff --git a/pkg/webhook/webhook_test.go b/pkg/webhook/webhook_test.go index 06e2ebefb4..63e7b46f17 100644 --- a/pkg/webhook/webhook_test.go +++ b/pkg/webhook/webhook_test.go @@ -30,10 +30,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - spov1beta2 "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientfake "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned/fake" - crdinformers "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/informers/externalversions" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + spov1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + crdclientfake "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/fake" + crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" + "github.com/kubeflow/spark-operator/pkg/config" ) func TestMutatePod(t *testing.T) { diff --git a/sparkctl/README.md b/sparkctl/README.md index d94e4d072c..70bd03535e 100644 --- a/sparkctl/README.md +++ b/sparkctl/README.md @@ -2,7 +2,7 @@ `sparkctl` is a command-line tool of the Spark Operator for creating, listing, checking status of, getting logs of, and deleting `SparkApplication`s. It can also do port forwarding from a local port to the Spark web UI port for accessing the Spark web UI on the driver. Each function is implemented as a sub-command of `sparkctl`. -To build `sparkctl`, make sure you followed build steps [here](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator/blob/master/docs/developer-guide.md#build-the-operator) and have all the dependencies, then run the following command from within `sparkctl/`: +To build `sparkctl`, make sure you followed build steps [here](https://github.com/kubeflow/spark-operator/blob/master/docs/developer-guide.md#build-the-operator) and have all the dependencies, then run the following command from within `sparkctl/`: ```bash $ go build -o sparkctl diff --git a/sparkctl/cmd/client.go b/sparkctl/cmd/client.go index 9db135ecb8..b280045045 100644 --- a/sparkctl/cmd/client.go +++ b/sparkctl/cmd/client.go @@ -25,8 +25,8 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) func buildConfig(kubeConfig string) (*rest.Config, error) { diff --git a/sparkctl/cmd/create.go b/sparkctl/cmd/create.go index e70ca5c31e..1809b3d8ff 100644 --- a/sparkctl/cmd/create.go +++ b/sparkctl/cmd/create.go @@ -36,8 +36,8 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" clientset "k8s.io/client-go/kubernetes" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) const bufferSize = 1024 diff --git a/sparkctl/cmd/create_test.go b/sparkctl/cmd/create_test.go index 7ff730286a..e319ddfb2e 100644 --- a/sparkctl/cmd/create_test.go +++ b/sparkctl/cmd/create_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" ) func TestIsLocalFile(t *testing.T) { diff --git a/sparkctl/cmd/delete.go b/sparkctl/cmd/delete.go index 293fc345a2..d6366c7472 100644 --- a/sparkctl/cmd/delete.go +++ b/sparkctl/cmd/delete.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" + crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) var deleteCmd = &cobra.Command{ diff --git a/sparkctl/cmd/event.go b/sparkctl/cmd/event.go index c0c2a5a81c..5553c9c276 100644 --- a/sparkctl/cmd/event.go +++ b/sparkctl/cmd/event.go @@ -33,7 +33,7 @@ import ( clientWatch "k8s.io/client-go/tools/watch" "k8s.io/kubernetes/pkg/util/interrupt" - crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" + crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) var FollowEvents bool diff --git a/sparkctl/cmd/forward.go b/sparkctl/cmd/forward.go index c4a59e979c..dbaeb9c673 100644 --- a/sparkctl/cmd/forward.go +++ b/sparkctl/cmd/forward.go @@ -34,7 +34,7 @@ import ( "k8s.io/client-go/tools/portforward" "k8s.io/client-go/transport/spdy" - crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" + crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) var LocalPort int32 diff --git a/sparkctl/cmd/list.go b/sparkctl/cmd/list.go index 463f75e479..0ecbe16bb7 100644 --- a/sparkctl/cmd/list.go +++ b/sparkctl/cmd/list.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" + crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) var listCmd = &cobra.Command{ diff --git a/sparkctl/cmd/log.go b/sparkctl/cmd/log.go index f7ce655fb4..764c21484c 100644 --- a/sparkctl/cmd/log.go +++ b/sparkctl/cmd/log.go @@ -29,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" - crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" + crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) var ExecutorId int32 diff --git a/sparkctl/cmd/status.go b/sparkctl/cmd/status.go index 6fc77d177f..8502e72b03 100644 --- a/sparkctl/cmd/status.go +++ b/sparkctl/cmd/status.go @@ -23,8 +23,8 @@ import ( "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) var statusCmd = &cobra.Command{ diff --git a/sparkctl/main.go b/sparkctl/main.go index 63a103533f..80c89a81b6 100644 --- a/sparkctl/main.go +++ b/sparkctl/main.go @@ -19,7 +19,7 @@ package main import ( _ "k8s.io/client-go/plugin/pkg/client/auth" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/sparkctl/cmd" + "github.com/kubeflow/spark-operator/sparkctl/cmd" ) func main() { diff --git a/test/e2e/basic_test.go b/test/e2e/basic_test.go index db6c703b1a..f6e2edf216 100644 --- a/test/e2e/basic_test.go +++ b/test/e2e/basic_test.go @@ -27,7 +27,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" - appFramework "github.com/GoogleCloudPlatform/spark-on-k8s-operator/test/e2e/framework" + appFramework "github.com/kubeflow/spark-operator/test/e2e/framework" ) func TestSubmitSparkPiYaml(t *testing.T) { diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 9480ee970b..a3d7c17a78 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -21,7 +21,7 @@ import ( "fmt" "time" - crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" + crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/test/e2e/framework/sparkapplication.go b/test/e2e/framework/sparkapplication.go index 27c0f8264c..b9adab0eb4 100644 --- a/test/e2e/framework/sparkapplication.go +++ b/test/e2e/framework/sparkapplication.go @@ -25,8 +25,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/yaml" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientset "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/client/clientset/versioned" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) func MakeSparkApplicationFromYaml(pathToYaml string) (*v1beta2.SparkApplication, error) { diff --git a/test/e2e/lifecycle_test.go b/test/e2e/lifecycle_test.go index be73fe728f..95b93a76b4 100644 --- a/test/e2e/lifecycle_test.go +++ b/test/e2e/lifecycle_test.go @@ -27,8 +27,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - appFramework "github.com/GoogleCloudPlatform/spark-on-k8s-operator/test/e2e/framework" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + appFramework "github.com/kubeflow/spark-operator/test/e2e/framework" ) func TestLifeCycleManagement(t *testing.T) { diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go index 12b08b7c2d..07b0a19ee2 100644 --- a/test/e2e/main_test.go +++ b/test/e2e/main_test.go @@ -18,14 +18,14 @@ package e2e import ( "flag" - "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/stretchr/testify/assert" "log" "os" "testing" "time" - operatorFramework "github.com/GoogleCloudPlatform/spark-on-k8s-operator/test/e2e/framework" + operatorFramework "github.com/kubeflow/spark-operator/test/e2e/framework" ) var framework *operatorFramework.Framework diff --git a/test/e2e/volume_mount_test.go b/test/e2e/volume_mount_test.go index 5b8aea4343..2bb78a5012 100644 --- a/test/e2e/volume_mount_test.go +++ b/test/e2e/volume_mount_test.go @@ -29,7 +29,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/kubectl/pkg/describe" - appFramework "github.com/GoogleCloudPlatform/spark-on-k8s-operator/test/e2e/framework" + appFramework "github.com/kubeflow/spark-operator/test/e2e/framework" ) type describeClient struct { From 835281d4d3110d265687a9d5b7504a261538af10 Mon Sep 17 00:00:00 2001 From: Andrii Chubatiuk Date: Thu, 11 Apr 2024 07:46:47 +0300 Subject: [PATCH 35/87] support multiple namespaces (#1955) * support multiple namespaces Signed-off-by: Andrew Chubatiuk * bump helm chart version Signed-off-by: Andrew Chubatiuk --------- Signed-off-by: Andrew Chubatiuk --- .github/workflows/main.yaml | 13 +++++++++++- .github/workflows/release.yaml | 21 +++++++++++++------ charts/spark-operator-chart/Chart.yaml | 4 ++-- charts/spark-operator-chart/README.md | 2 +- .../templates/deployment.yaml | 7 +++++-- .../templates/spark-rbac.yaml | 18 +++++++++------- .../templates/spark-serviceaccount.yaml | 13 +++++++----- charts/spark-operator-chart/values.yaml | 4 ++-- 8 files changed, 56 insertions(+), 26 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 46918dc3f3..105fc8b095 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -16,7 +16,18 @@ jobs: - name: Checkout source code uses: actions/checkout@v4 with: - fetch-depth: "0" + fetch-depth: '0' + + - name: The API should not change once published + run: | + if ! git diff --quiet origin/master -- pkg/apis/sparkoperator.k8s.io/v1beta1; then + echo "sparkoperator.k8s.io/v1beta1 api has changed" + false + fi + if ! git diff --quiet origin/master -- pkg/apis/sparkoperator.k8s.io/v1beta2; then + echo "sparkoperator.k8s.io/v1beta2 api has changed" + false + fi - name: The API documentation hasn't changed run: | diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 2e46f1226f..6d6f190686 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -14,6 +14,10 @@ jobs: skip: ${{ steps.skip-check.outputs.skip }} version: ${{ steps.skip-check.outputs.VERSION_TAG }} steps: + - name: Checkout source code + uses: actions/checkout@v4 + with: + fetch-depth: '0' - name: Check if build should be skipped id: skip-check run: | @@ -48,6 +52,9 @@ jobs: run: | git config user.name "$GITHUB_ACTOR" git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + platform=${{ matrix.platform }} + echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV + echo "SCOPE=${platform//\//-}" >> $GITHUB_ENV - name: Set up QEMU timeout-minutes: 1 @@ -69,8 +76,8 @@ jobs: with: context: . platforms: ${{ matrix.platform }} - cache-to: type=gha,mode=max - cache-from: type=gha + cache-to: type=gha,mode=max,scope=${{ env.SCOPE }} + cache-from: type=gha,scope=${{ env.SCOPE }} push: true outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true - name: Export digest @@ -81,7 +88,7 @@ jobs: - name: Upload digest uses: actions/upload-artifact@v4 with: - name: digests + name: digests-${{ env.PLATFORM_PAIR }} path: /tmp/digests/* if-no-files-found: error retention-days: 1 @@ -95,8 +102,9 @@ jobs: - name: Download digests uses: actions/download-artifact@v4 with: - name: digests + pattern: digests-* path: /tmp/digests + merge-multiple: true - name: Setup Docker Buildx uses: docker/setup-buildx-action@v3 - name: Docker meta @@ -108,8 +116,9 @@ jobs: - name: Login to Docker Hub uses: docker/login-action@v3 with: - username: ${{ vars.DOCKER_USER }} - password: ${{ secrets.DOCKER_PASS }} + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Create manifest list and push working-directory: /tmp/digests run: | diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 7a8ea4364d..e15408bff1 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.1 -appVersion: v1beta2-1.3.8-3.5.0 +version: 1.2.2 +appVersion: v1beta2-1.4.0-3.5.0 keywords: - spark home: https://github.com/kubeflow/spark-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 7e98f381b2..9dd82c676b 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -126,7 +126,7 @@ All charts linted successfully | serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | | serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | | sidecars | list | `[]` | Sidecar containers | -| sparkJobNamespace | string | `""` | Set this if running spark jobs in a different namespace than the operator | +| sparkJobNamespaces | list | `[]` | List of namespaces where to run spark jobs, operator namespace is included only when list of namespaces is empty | | tolerations | list | `[]` | List of node taints to tolerate | | uiService.enable | bool | `true` | Enable UI service creation for Spark application | | volumeMounts | list | `[]` | | diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml index 9deacae43f..9d99028b34 100644 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ b/charts/spark-operator-chart/templates/deployment.yaml @@ -3,7 +3,8 @@ # In the post-install hook, the token corresponding to the operator service account # is used to authenticate with the Kubernetes API server to install the secret bundle. - +{{- $jobNamespaces := .Values.sparkJobNamespaces | default (list .Release.Namespace) }} +--- apiVersion: apps/v1 kind: Deployment metadata: @@ -59,7 +60,9 @@ spec: args: - -v={{ .Values.logLevel }} - -logtostderr - - -namespace={{ .Values.sparkJobNamespace }} + {{- if le (len $jobNamespaces) 1 }} + - -namespace={{ index $jobNamespaces 0 }} + {{- end }} - -enable-ui-service={{ .Values.uiService.enable}} - -ingress-url-format={{ .Values.ingressUrlFormat }} - -controller-threads={{ .Values.controllerThreads }} diff --git a/charts/spark-operator-chart/templates/spark-rbac.yaml b/charts/spark-operator-chart/templates/spark-rbac.yaml index 1ac7a92e5e..2b645538b0 100644 --- a/charts/spark-operator-chart/templates/spark-rbac.yaml +++ b/charts/spark-operator-chart/templates/spark-rbac.yaml @@ -1,11 +1,14 @@ {{- if or .Values.rbac.create .Values.rbac.createRole }} +{{- $jobNamespaces := .Values.sparkJobNamespaces | default (list .Release.Namespace) }} +{{- range $jobNamespace := $jobNamespaces }} +--- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: spark-role - namespace: {{ default .Release.Namespace .Values.sparkJobNamespace }} + namespace: {{ $jobNamespace }} labels: - {{- include "spark-operator.labels" . | nindent 4 }} + {{- include "spark-operator.labels" $ | nindent 4 }} rules: - apiGroups: - "" @@ -32,20 +35,21 @@ rules: verbs: - "*" --- - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: spark - namespace: {{ default .Release.Namespace .Values.sparkJobNamespace }} + namespace: {{ $jobNamespace }} labels: - {{- include "spark-operator.labels" . | nindent 4 }} + {{- include "spark-operator.labels" $ | nindent 4 }} subjects: - kind: ServiceAccount - name: {{ include "spark.serviceAccountName" . }} - namespace: {{ default .Release.Namespace .Values.sparkJobNamespace }} + name: {{ include "spark.serviceAccountName" $ }} + namespace: {{ $jobNamespace }} roleRef: kind: Role name: spark-role + namespace: {{ $jobNamespace }} apiGroup: rbac.authorization.k8s.io {{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/spark-serviceaccount.yaml b/charts/spark-operator-chart/templates/spark-serviceaccount.yaml index cb8330bfcf..547aee59b9 100644 --- a/charts/spark-operator-chart/templates/spark-serviceaccount.yaml +++ b/charts/spark-operator-chart/templates/spark-serviceaccount.yaml @@ -1,13 +1,16 @@ {{- if .Values.serviceAccounts.spark.create }} +{{- range $sparkJobNamespace := .Values.sparkJobNamespaces | default (list .Release.Namespace) }} +--- apiVersion: v1 kind: ServiceAccount metadata: - name: {{ include "spark.serviceAccountName" . }} - namespace: {{ default .Release.Namespace .Values.sparkJobNamespace }} -{{- with .Values.serviceAccounts.spark.annotations }} + name: {{ include "spark.serviceAccountName" $ }} + namespace: {{ $sparkJobNamespace }} +{{- with $.Values.serviceAccounts.spark.annotations }} annotations: -{{ toYaml . | indent 4 }} +{{ toYaml $ | indent 4 }} {{- end }} labels: - {{- include "spark-operator.labels" . | nindent 4 }} + {{- include "spark-operator.labels" $ | nindent 4 }} +{{- end }} {{- end }} diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index 349265ca5b..24a2f72702 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -52,8 +52,8 @@ serviceAccounts: # -- Optional annotations for the operator service account annotations: {} -# -- Set this if running spark jobs in a different namespace than the operator -sparkJobNamespace: "" +# -- List of namespaces where to run spark jobs, operator namespace is included only when list of namespaces is empty +sparkJobNamespaces: [] # -- Operator concurrency, higher values might increase memory usage controllerThreads: 10 From 4fd6a54b3cf71dce0de00f52af2fe25b76ea45ad Mon Sep 17 00:00:00 2001 From: Andrii Chubatiuk Date: Thu, 11 Apr 2024 19:52:47 +0300 Subject: [PATCH 36/87] added id for a build job to fix digests artifact creation (#1963) * added id for a build job to fix digests artifact creation Signed-off-by: Andrew Chubatiuk * configure user for helm publish Signed-off-by: Andrew Chubatiuk --------- Signed-off-by: Andrew Chubatiuk --- .github/workflows/release.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 6d6f190686..10ac15547c 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -72,6 +72,7 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and Push Spark-Operator Docker Image to github container registry + id: build uses: docker/build-push-action@v5 with: context: . @@ -127,6 +128,10 @@ jobs: - name: Inspect image run: | docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - name: Release Spark-Operator Helm Chart uses: helm/chart-releaser-action@v1.6.0 env: From 59badbfed9970ffee60c9fe4fd14e5359c56bd32 Mon Sep 17 00:00:00 2001 From: Zev Isert Date: Thu, 11 Apr 2024 10:45:47 -0700 Subject: [PATCH 37/87] fix: add containerPort declaration for webhook in helm chart (#1961) * fix: add containerPort declaration for webhook in helm chart Signed-off-by: Zev Isert * docs: update helm chart readme Signed-off-by: Zev Isert * fix: copied helm value should be for webhook Signed-off-by: Zev Isert * build: bump helm chart to 1.2.3 Signed-off-by: Zev Isert * style: undo unrelated editor autoformatting Signed-off-by: Zev Isert --------- Signed-off-by: Zev Isert Co-authored-by: Mason Legere --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 1 + charts/spark-operator-chart/templates/deployment.yaml | 8 +++++++- .../spark-operator-chart/templates/webhook-service.yaml | 2 +- charts/spark-operator-chart/values.yaml | 2 ++ 5 files changed, 12 insertions(+), 3 deletions(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index e15408bff1..d4562c68de 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.2 +version: 1.2.3 appVersion: v1beta2-1.4.0-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 9dd82c676b..1542c3ee2f 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -140,6 +140,7 @@ All charts linted successfully | webhook.initResources | object | `{}` | Resources applied to init job | | webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | | webhook.port | int | `8080` | Webhook service port | +| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name | | webhook.timeout | int | `30` | | ## Maintainers diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml index 9d99028b34..f9ea202702 100644 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ b/charts/spark-operator-chart/templates/deployment.yaml @@ -52,10 +52,16 @@ spec: {{- toYaml .Values.envFrom | nindent 10 }} securityContext: {{- toYaml .Values.securityContext | nindent 10 }} - {{- if .Values.metrics.enable }} + {{- if or .Values.metrics.enable .Values.webhook.enable }} ports: + {{ if .Values.metrics.enable }} - name: {{ .Values.metrics.portName | quote }} containerPort: {{ .Values.metrics.port }} + {{ end }} + {{ if .Values.webhook.enable }} + - name: {{ .Values.webhook.portName | quote }} + containerPort: {{ .Values.webhook.port }} + {{ end }} {{ end }} args: - -v={{ .Values.logLevel }} diff --git a/charts/spark-operator-chart/templates/webhook-service.yaml b/charts/spark-operator-chart/templates/webhook-service.yaml index babe7f4f6c..a26375db67 100644 --- a/charts/spark-operator-chart/templates/webhook-service.yaml +++ b/charts/spark-operator-chart/templates/webhook-service.yaml @@ -8,7 +8,7 @@ metadata: spec: ports: - port: 443 - targetPort: {{ .Values.webhook.port }} + targetPort: {{ .Values.webhook.portName | quote }} name: webhook selector: {{- include "spark-operator.selectorLabels" . | nindent 4 }} diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index 24a2f72702..de1aee6d32 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -96,6 +96,8 @@ webhook: enable: false # -- Webhook service port port: 8080 + # -- Webhook container port name and service target port name + portName: webhook # -- The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. # Empty string (default) will operate on all namespaces namespaceSelector: "" From 179ad8b1cfecaf294609abe1e0e2050cae53bdd0 Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Sat, 13 Apr 2024 11:18:48 +0800 Subject: [PATCH 38/87] Update helm chart README (#1958) Signed-off-by: Yi Chen --- .pre-commit-config.yaml | 9 +++ charts/spark-operator-chart/README.md | 68 +++++++++---------- charts/spark-operator-chart/README.md.gotmpl | 70 ++++++++++---------- 3 files changed, 79 insertions(+), 68 deletions(-) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..0385ecba56 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,9 @@ +repos: + - repo: https://github.com/norwoodj/helm-docs + rev: "v1.13.1" + hooks: + - id: helm-docs + args: + # Make the tool search for charts only under the `charts` directory + - --chart-search-root=charts + - --template-files=README.md.gotmpl diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 1542c3ee2f..6a1ffae49c 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,7 +1,11 @@ # spark-operator +![Version: 1.2.1](https://img.shields.io/badge/Version-1.2.1-informational?style=flat-square) ![AppVersion: v1beta2-1.3.8-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.3.8--3.5.0-informational?style=flat-square) + A Helm chart for Spark on Kubernetes operator +**Homepage:** + ## Introduction This chart bootstraps a [Kubernetes Operator for Apache Spark](https://github.com/kubeflow/spark-operator) deployment using the [Helm](https://helm.sh) package manager. @@ -19,62 +23,58 @@ The previous `spark-operator` Helm chart hosted at [helm/charts](https://github. - Previous versions of the Helm chart have not been migrated, and the version has been set to `1.0.0` at the onset. If you are looking for old versions of the chart, it's best to run `helm pull incubator/sparkoperator --version ` until you are ready to move to this repository's version. - Several configuration properties have been changed, carefully review the [values](#values) section below to make sure you're aligned with the new values. -## Installing the chart +## Usage -```shell +### Add Helm Repo -$ helm repo add spark-operator https://kubeflow.github.io/spark-operator +```shell +helm repo add spark-operator https://kubeflow.github.io/spark-operator -$ helm install my-release spark-operator/spark-operator +helm repo update ``` -This will create a release of `spark-operator` in the default namespace. To install in a different one: +See [helm repo](https://helm.sh/docs/helm/helm_repo) for command documentation. + +### Install the chart ```shell -$ helm install -n spark my-release spark-operator/spark-operator +helm install [RELEASE_NAME] spark-operator/spark-operator ``` -Note that `helm` will fail to install if the namespace doesn't exist. Either create the namespace beforehand or pass the `--create-namespace` flag to the `helm install` command. +For example, if you want to create a release with name `spark-operator` in the `default` namespace: -## Uninstalling the chart +```shell +helm install spark-operator spark-operator/spark-operator +``` -To uninstall `my-release`: +Note that `helm` will fail to install if the namespace doesn't exist. Either create the namespace beforehand or pass the `--create-namespace` flag to the `helm install` command. ```shell -$ helm uninstall my-release +helm install spark-operator spark-operator/spark-operator \ + --namespace spark-operator \ + --create-namespace ``` -The command removes all the Kubernetes components associated with the chart and deletes the release, except for the `crds`, those will have to be removed manually. +See [helm install](https://helm.sh/docs/helm/helm_install) for command documentation. -## Test the chart +### Upgrade the chart -Install [chart-testing cli](https://github.com/helm/chart-testing#installation) +```shell +helm upgrade [RELEASE_NAME] spark-operator/spark-operator [flags] +``` -In Mac OS, you can just: +See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade) for command documentation. -```bash -pip install yamale -pip install yamllint -brew install chart-testing -``` +### Uninstall the chart -Run ct lint and Verify `All charts linted successfully` - -```bash -Chart version ok. -Validating /Users/chethanuk/Work/Github/Personal/spark-on-k8s-operator-1/charts/spark-operator-chart/Chart.yaml... -Validation success! 👍 -Validating maintainers... -==> Linting charts/spark-operator-chart -[INFO] Chart.yaml: icon is recommended - -1 chart(s) linted, 0 chart(s) failed ------------------------------------------------------------------------------------------------------------------------- - ✔︎ spark-operator => (version: "1.1.0", path: "charts/spark-operator-chart") ------------------------------------------------------------------------------------------------------------------------- -All charts linted successfully +```shell +helm uninstall [RELEASE_NAME] ``` +This removes all the Kubernetes resources associated with the chart and deletes the release, except for the `crds`, those will have to be removed manually. + +See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command documentation. + ## Values | Key | Type | Default | Description | diff --git a/charts/spark-operator-chart/README.md.gotmpl b/charts/spark-operator-chart/README.md.gotmpl index 070a2a82ad..a20ed517ee 100644 --- a/charts/spark-operator-chart/README.md.gotmpl +++ b/charts/spark-operator-chart/README.md.gotmpl @@ -1,7 +1,13 @@ {{ template "chart.header" . }} +{{ template "chart.deprecationWarning" . }} + +{{ template "chart.badgesSection" . }} + {{ template "chart.description" . }} +{{ template "chart.homepageLine" . }} + ## Introduction This chart bootstraps a [Kubernetes Operator for Apache Spark]({{template "chart.homepage" . }}) deployment using the [Helm](https://helm.sh) package manager. @@ -19,61 +25,57 @@ The previous `spark-operator` Helm chart hosted at [helm/charts](https://github. - Previous versions of the Helm chart have not been migrated, and the version has been set to `1.0.0` at the onset. If you are looking for old versions of the chart, it's best to run `helm pull incubator/sparkoperator --version ` until you are ready to move to this repository's version. - Several configuration properties have been changed, carefully review the [values](#values) section below to make sure you're aligned with the new values. -## Installing the chart +## Usage -```shell +### Add Helm Repo -$ helm repo add spark-operator https://kubeflow.github.io/spark-operator +```shell +helm repo add spark-operator https://kubeflow.github.io/spark-operator -$ helm install my-release spark-operator/spark-operator +helm repo update ``` -This will create a release of `spark-operator` in the default namespace. To install in a different one: +See [helm repo](https://helm.sh/docs/helm/helm_repo) for command documentation. + +### Install the chart ```shell -$ helm install -n spark my-release spark-operator/spark-operator +helm install [RELEASE_NAME] spark-operator/spark-operator ``` -Note that `helm` will fail to install if the namespace doesn't exist. Either create the namespace beforehand or pass the `--create-namespace` flag to the `helm install` command. +For example, if you want to create a release with name `spark-operator` in the `default` namespace: -## Uninstalling the chart +```shell +helm install spark-operator spark-operator/spark-operator +``` -To uninstall `my-release`: +Note that `helm` will fail to install if the namespace doesn't exist. Either create the namespace beforehand or pass the `--create-namespace` flag to the `helm install` command. ```shell -$ helm uninstall my-release +helm install spark-operator spark-operator/spark-operator \ + --namespace spark-operator \ + --create-namespace ``` -The command removes all the Kubernetes components associated with the chart and deletes the release, except for the `crds`, those will have to be removed manually. +See [helm install](https://helm.sh/docs/helm/helm_install) for command documentation. -## Test the chart +### Upgrade the chart + +```shell +helm upgrade [RELEASE_NAME] spark-operator/spark-operator [flags] +``` -Install [chart-testing cli](https://github.com/helm/chart-testing#installation) +See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade) for command documentation. -In Mac OS, you can just: +### Uninstall the chart -```bash -pip install yamale -pip install yamllint -brew install chart-testing +```shell +helm uninstall [RELEASE_NAME] ``` -Run ct lint and Verify `All charts linted successfully` - -```bash -Chart version ok. -Validating /Users/chethanuk/Work/Github/Personal/spark-on-k8s-operator-1/charts/spark-operator-chart/Chart.yaml... -Validation success! 👍 -Validating maintainers... -==> Linting charts/spark-operator-chart -[INFO] Chart.yaml: icon is recommended - -1 chart(s) linted, 0 chart(s) failed ------------------------------------------------------------------------------------------------------------------------- - ✔︎ spark-operator => (version: "1.1.0", path: "charts/spark-operator-chart") ------------------------------------------------------------------------------------------------------------------------- -All charts linted successfully -``` +This removes all the Kubernetes resources associated with the chart and deletes the release, except for the `crds`, those will have to be removed manually. + +See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command documentation. {{ template "chart.valuesSection" . }} From 76a7947816d6bcc61c706212ab24d1c782d24add Mon Sep 17 00:00:00 2001 From: Andrii Chubatiuk Date: Sat, 13 Apr 2024 06:19:48 +0300 Subject: [PATCH 39/87] publish chart independently, incremented both chart and image versions to trigger build of both (#1964) * publish chart independently, incremented both chart and image versions to trigger build of both Signed-off-by: Andrew Chubatiuk * bump chart version Signed-off-by: Andrew Chubatiuk --------- Signed-off-by: Andrew Chubatiuk --- .github/workflows/release.yaml | 15 ++++++++++++--- charts/spark-operator-chart/Chart.yaml | 4 ++-- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 10ac15547c..dee4e62436 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -17,7 +17,7 @@ jobs: - name: Checkout source code uses: actions/checkout@v4 with: - fetch-depth: '0' + fetch-depth: 0 - name: Check if build should be skipped id: skip-check run: | @@ -55,7 +55,6 @@ jobs: platform=${{ matrix.platform }} echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV echo "SCOPE=${platform//\//-}" >> $GITHUB_ENV - - name: Set up QEMU timeout-minutes: 1 uses: docker/setup-qemu-action@v3 @@ -93,7 +92,7 @@ jobs: path: /tmp/digests/* if-no-files-found: error retention-days: 1 - publish: + publish-image: runs-on: ubuntu-latest needs: - release @@ -128,6 +127,16 @@ jobs: - name: Inspect image run: | docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} + publish-chart: + runs-on: ubuntu-latest + if: needs.publish-image.result == 'success' || needs.publish-image.result == 'skipped' + needs: + - publish-image + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Configure Git run: | git config user.name "$GITHUB_ACTOR" diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index d4562c68de..659ad44f7e 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.3 -appVersion: v1beta2-1.4.0-3.5.0 +version: 1.2.4 +appVersion: v1beta2-1.4.1-3.5.0 keywords: - spark home: https://github.com/kubeflow/spark-operator From 39b79d733b95d6ffe39a5ab50a65f92d03b62cc8 Mon Sep 17 00:00:00 2001 From: Andrii Chubatiuk Date: Sun, 14 Apr 2024 08:59:49 +0300 Subject: [PATCH 40/87] fixed docker image tag and updated chart docs (#1969) Signed-off-by: Andrew Chubatiuk --- .github/workflows/release.yaml | 2 +- charts/spark-operator-chart/Chart.yaml | 4 ++-- charts/spark-operator-chart/README.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index dee4e62436..bbc8dabd0b 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -112,7 +112,7 @@ jobs: uses: docker/metadata-action@v5 with: images: ${{ env.REGISTRY_IMAGE }} - tags: preview + tags: ${{ needs.build-skip-check.outputs.version }} - name: Login to Docker Hub uses: docker/login-action@v3 with: diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 659ad44f7e..0aeb82f869 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.4 -appVersion: v1beta2-1.4.1-3.5.0 +version: 1.2.5 +appVersion: v1beta2-1.4.2-3.5.0 keywords: - spark home: https://github.com/kubeflow/spark-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 6a1ffae49c..d1382b5206 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.2.1](https://img.shields.io/badge/Version-1.2.1-informational?style=flat-square) ![AppVersion: v1beta2-1.3.8-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.3.8--3.5.0-informational?style=flat-square) +![Version: 1.2.5](https://img.shields.io/badge/Version-1.2.5-informational?style=flat-square) ![AppVersion: v1beta2-1.4.2-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.2--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator From 69e984135217146f321f46db9ab0fd1ee3340fcb Mon Sep 17 00:00:00 2001 From: Vara Bonthu Date: Sat, 13 Apr 2024 23:24:49 -0700 Subject: [PATCH 41/87] feat: Doc updates, Issue and PR templates are added (#1970) * Issue templates are added to the repo Signed-off-by: Vara Bonthu * removed Google CLA requirement Signed-off-by: Vara Bonthu * Updated ghcr.io registry references in the workflow Signed-off-by: Vara Bonthu * Added Pull request template Signed-off-by: Vara Bonthu * Updated Main README.md with Kubeflow header and new Slack channel link Signed-off-by: Vara Bonthu * Removed the License header and it will be replaced with Kubeflow guidelines Signed-off-by: Vara Bonthu * Revert "Removed the License header and it will be replaced with Kubeflow guidelines" This reverts commit b892f5c7fa0398cff8b85f961bd292313ef47953. Signed-off-by: Vara Bonthu * Readme line revert for gcp docs Signed-off-by: Vara Bonthu * pre-commit run -a updates Signed-off-by: Vara Bonthu * fixed the helm lint issue by upgrading the Helm chart version Signed-off-by: Vara Bonthu * fixed docker image tag and updated chart docs (#1969) Signed-off-by: Andrew Chubatiuk Signed-off-by: Vara Bonthu * rebase from master Signed-off-by: Vara Bonthu --------- Signed-off-by: Vara Bonthu Signed-off-by: Andrew Chubatiuk Co-authored-by: Andrii Chubatiuk --- .github/ISSUE_TEMPLATE/bug_report.md | 46 +++++++++++++++++++++++ .github/ISSUE_TEMPLATE/feature_request.md | 32 ++++++++++++++++ .github/ISSUE_TEMPLATE/question.md | 20 ++++++++++ .github/PULL_REQUEST_TEMPLATE.md | 38 +++++++++++++++++++ .github/workflows/main.yaml | 10 ++--- CONTRIBUTING.md | 12 ------ README.md | 46 +++++++++++------------ 7 files changed, 163 insertions(+), 41 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/ISSUE_TEMPLATE/question.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..fb28d21ff9 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,46 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '[BUG] Brief description of the issue' +labels: bug +--- + +## Description +Please provide a clear and concise description of the issue you are encountering, and a reproduction of your configuration. + +If your request is for a new feature, please use the `Feature request` template. + +- [ ] ✋ I have searched the open/closed issues and my issue is not listed. + +## Reproduction Code [Required] + + + +Steps to reproduce the behavior: + + +## Expected behavior + + + +## Actual behavior + + + +### Terminal Output Screenshot(s) + + + + +## Environment & Versions + +- Spark Operator App version: +- Helm Chart Version: +- Kubernetes Version: +- Apache Spark version: + +## Additional context + + + + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000000..1992633792 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,32 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '[FEATURE] Brief description of the feature' +labels: enhancement +--- + + + +### Community Note + +* Please vote on this issue by adding a 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/) to the original issue to help the community and maintainers prioritize this request +* Please do not leave "+1" or other comments that do not add relevant new information or questions, they generate extra noise for issue followers and do not help prioritize the request +* If you are interested in working on this issue or have submitted a pull request, please leave a comment + + + +#### What is the outcome that you are trying to reach? + + + +#### Describe the solution you would like + + + +#### Describe alternatives you have considered + + + +#### Additional context + + diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 0000000000..647c8bc90d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,20 @@ +--- +name: Question +about: I have a Question +title: '[QUESTION] Brief description of the Question' +labels: question +--- + +- [ ] ✋ I have searched the open/closed issues and my issue is not listed. + +#### Please describe your question here + + + +#### Provide a link to the example/module related to the question + + + +#### Additional context + + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000..0a09adb741 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,38 @@ +### 🛑 Important: +Please open an issue to discuss significant work before you start. We appreciate your contributions and don't want your efforts to go to waste! + +For guidelines on how to contribute, please review the [CONTRIBUTING.md](CONTRIBUTING.md) document. + +## Purpose of this PR +Provide a clear and concise description of the changes. Explain the motivation behind these changes and link to relevant issues or discussions. + +**Proposed changes:** +- +- +- + +## Change Category +Indicate the type of change by marking the applicable boxes: + +- [ ] Bugfix (non-breaking change which fixes an issue) +- [ ] Feature (non-breaking change which adds functionality) +- [ ] Breaking change (fix or feature that could affect existing functionality) +- [ ] Documentation update + +### Rationale + + + + +## Checklist +Before submitting your PR, please review the following: + +- [ ] I have conducted a self-review of my own code. +- [ ] I have updated documentation accordingly. +- [ ] I have added tests that prove my changes are effective or that my feature works. +- [ ] Existing unit tests pass locally with my changes. + +### Additional Notes + + + diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 105fc8b095..30f8435ce6 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -79,7 +79,7 @@ jobs: - name: Build Spark-Operator Docker Image run: | - docker build -t gcr.io/spark-operator/spark-operator:latest . + docker build -t ghcr.io/kubeflow/spark-operator:latest . - name: Check changes in resources used in docker file run: | @@ -89,7 +89,7 @@ jobs: if ! git diff --quiet origin/master -- $resource; then ## And the appVersion hasn't been updated if ! git diff origin/master -- charts/spark-operator-chart/Chart.yaml | grep +appVersion; then - echo "resource used in gcr.io/spark-operator/spark-operator has changed in $resource, need to update the appVersion in charts/spark-operator-chart/Chart.yaml" + echo "resource used in ghcr.io/kubeflow/spark-operator has changed in $resource, need to update the appVersion in charts/spark-operator-chart/Chart.yaml" git diff origin/master -- $resource; echo "failing the build... " && false fi @@ -174,12 +174,12 @@ jobs: - name: Build local spark-operator docker image for minikube testing run: | - docker build -t gcr.io/kubeflow/spark-operator:local . - minikube image load gcr.io/kubeflow/spark-operator:local + docker build -t ghcr.io/kubeflow/spark-operator:local . + minikube image load ghcr.io/kubeflow/spark-operator:local # The integration tests are currently broken see: https://github.com/kubeflow/spark-operator/issues/1416 # - name: Run chart-testing (integration test) - # run: make integation-test + # run: make integration-test - name: Setup tmate session if: failure() diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c980350f8f..c725819882 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -3,18 +3,6 @@ We'd love to accept your patches and contributions to this project. There are just a few small guidelines you need to follow. -## Contributor License Agreement - -Contributions to this project must be accompanied by a Contributor License -Agreement. You (or your employer) retain the copyright to your contribution; -this simply gives us permission to use and redistribute your contributions as -part of the project. Head over to to see -your current agreements on file or to sign a new one. - -You generally only need to submit a CLA once, so if you've already submitted one -(even if it was for a different project), you probably don't need to do it -again. - ## Code reviews All submissions, including submissions by project members, require review. We diff --git a/README.md b/README.md index f8c5f900bd..2e0edd3be6 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,24 @@ +# Kubeflow Spark Operator [![Go Report Card](https://goreportcard.com/badge/github.com/kubeflow/spark-operator)](https://goreportcard.com/report/github.com/kubeflow/spark-operator) -**This is not an officially supported Google product.** +## Overview +The Kubernetes Operator for Apache Spark aims to make specifying and running [Spark](https://github.com/apache/spark) applications as easy and idiomatic as running other workloads on Kubernetes. It uses +[Kubernetes custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) +for specifying, running, and surfacing status of Spark applications. For a complete reference of the custom resource definitions, please refer to the [API Definition](docs/api-docs.md). For details on its design, please refer to the [design doc](docs/design.md). It requires Spark 2.3 and above that supports Kubernetes as a native scheduler backend. -## Community +The Kubernetes Operator for Apache Spark currently supports the following list of features: -* Join our [Slack](https://kubernetes.slack.com/messages/CALBDHMTL) channel on [Kubernetes on Slack](https://slack.k8s.io/). -* Check out [who is using the Kubernetes Operator for Apache Spark](docs/who-is-using.md). +* Supports Spark 2.3 and up. +* Enables declarative application specification and management of applications through custom resources. +* Automatically runs `spark-submit` on behalf of users for each `SparkApplication` eligible for submission. +* Provides native [cron](https://en.wikipedia.org/wiki/Cron) support for running scheduled applications. +* Supports customization of Spark pods beyond what Spark natively is able to do through the mutating admission webhook, e.g., mounting ConfigMaps and volumes, and setting pod affinity/anti-affinity. +* Supports automatic application re-submission for updated `SparkApplication` objects with updated specification. +* Supports automatic application restart with a configurable restart policy. +* Supports automatic retries of failed submissions with optional linear back-off. +* Supports mounting local Hadoop configuration as a Kubernetes ConfigMap automatically via `sparkctl`. +* Supports automatically staging local application dependencies to Google Cloud Storage (GCS) via `sparkctl`. +* Supports collecting and exporting application-level metrics and driver/executor metrics to Prometheus. ## Project Status @@ -72,26 +85,11 @@ If you are running the Kubernetes Operator for Apache Spark on Google Kubernetes For more information, check the [Design](docs/design.md), [API Specification](docs/api-docs.md) and detailed [User Guide](docs/user-guide.md). -## Overview - -The Kubernetes Operator for Apache Spark aims to make specifying and running [Spark](https://github.com/apache/spark) applications as easy and idiomatic as running other workloads on Kubernetes. It uses -[Kubernetes custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) -for specifying, running, and surfacing status of Spark applications. For a complete reference of the custom resource definitions, please refer to the [API Definition](docs/api-docs.md). For details on its design, please refer to the [design doc](docs/design.md). It requires Spark 2.3 and above that supports Kubernetes as a native scheduler backend. - -The Kubernetes Operator for Apache Spark currently supports the following list of features: - -* Supports Spark 2.3 and up. -* Enables declarative application specification and management of applications through custom resources. -* Automatically runs `spark-submit` on behalf of users for each `SparkApplication` eligible for submission. -* Provides native [cron](https://en.wikipedia.org/wiki/Cron) support for running scheduled applications. -* Supports customization of Spark pods beyond what Spark natively is able to do through the mutating admission webhook, e.g., mounting ConfigMaps and volumes, and setting pod affinity/anti-affinity. -* Supports automatic application re-submission for updated `SparkApplication` objects with updated specification. -* Supports automatic application restart with a configurable restart policy. -* Supports automatic retries of failed submissions with optional linear back-off. -* Supports mounting local Hadoop configuration as a Kubernetes ConfigMap automatically via `sparkctl`. -* Supports automatically staging local application dependencies to Google Cloud Storage (GCS) via `sparkctl`. -* Supports collecting and exporting application-level metrics and driver/executor metrics to Prometheus. - ## Contributing Please check [CONTRIBUTING.md](CONTRIBUTING.md) and the [Developer Guide](docs/developer-guide.md) out. + +## Community + +* Join our [Kubeflow Slack Channel](https://kubeflow.slack.com/archives/C06627U3XU3) +* Check out [who is using the Kubernetes Operator for Apache Spark](docs/who-is-using.md). From 6ded3acd1c5058c1d737e40d6135b5606dc68e43 Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Tue, 16 Apr 2024 04:32:52 +0800 Subject: [PATCH 42/87] Add some helm chart unit tests and fix spark service account render failure when extra annotations are specified (#1967) * Add helm unit tests Signed-off-by: Yi Chen * Fix: failed to render spark service account when extra annotations are specified Signed-off-by: Yi Chen * Update developer guide Signed-off-by: Yi Chen * Bump helm chart version Signed-off-by: Yi Chen --------- Signed-off-by: Yi Chen --- Makefile | 6 + charts/spark-operator-chart/.helmignore | 39 ++- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 2 +- .../templates/deployment.yaml | 4 +- .../spark-operator-chart/templates/rbac.yaml | 8 +- .../templates/spark-serviceaccount.yaml | 10 +- .../tests/deployment_test.yaml | 317 ++++++++++++++++++ .../spark-operator-chart/tests/rbac_test.yaml | 90 +++++ .../tests/serviceaccount_test.yaml | 54 +++ .../tests/spark-rbac_test.yaml | 113 +++++++ .../tests/spark-serviceaccount_test.yaml | 112 +++++++ .../tests/webhook-service_test.yaml | 33 ++ docs/developer-guide.md | 132 +++++++- 14 files changed, 894 insertions(+), 28 deletions(-) create mode 100644 charts/spark-operator-chart/tests/deployment_test.yaml create mode 100644 charts/spark-operator-chart/tests/rbac_test.yaml create mode 100644 charts/spark-operator-chart/tests/serviceaccount_test.yaml create mode 100644 charts/spark-operator-chart/tests/spark-rbac_test.yaml create mode 100644 charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml create mode 100644 charts/spark-operator-chart/tests/webhook-service_test.yaml diff --git a/Makefile b/Makefile index 3f2a5206c0..b26d51c23b 100644 --- a/Makefile +++ b/Makefile @@ -44,6 +44,12 @@ build-api-docs: -template-dir /repo/hack/api-docs/api-docs-template \ -out-file /repo/docs/api-docs.md" +helm-unittest: + helm unittest charts/spark-operator-chart --strict + +helm-lint: + docker run --rm --workdir /workspace --volume $(PWD):/workspace quay.io/helmpack/chart-testing:latest ct lint + helm-docs: docker run --rm --volume "$$(pwd):/helm-docs" -u "$(id -u)" jnorwood/helm-docs:latest diff --git a/charts/spark-operator-chart/.helmignore b/charts/spark-operator-chart/.helmignore index f996f51be7..4fbbbf5df1 100644 --- a/charts/spark-operator-chart/.helmignore +++ b/charts/spark-operator-chart/.helmignore @@ -1 +1,38 @@ -ci/ \ No newline at end of file +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. + +ci/ + +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ + +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ + +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ + +# MacOS +.DS_Store + +# helm-unittest +./tests +.debug +__snapshot__ + +# helm-docs +README.md.gotmpl diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 0aeb82f869..f1005f46e2 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.5 +version: 1.2.6 appVersion: v1beta2-1.4.2-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index d1382b5206..d3a230be0c 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.2.5](https://img.shields.io/badge/Version-1.2.5-informational?style=flat-square) ![AppVersion: v1beta2-1.4.2-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.2--3.5.0-informational?style=flat-square) +![Version: 1.2.6](https://img.shields.io/badge/Version-1.2.6-informational?style=flat-square) ![AppVersion: v1beta2-1.4.2-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.2--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml index f9ea202702..699fb61018 100644 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ b/charts/spark-operator-chart/templates/deployment.yaml @@ -97,8 +97,10 @@ spec: - -leader-election-lock-namespace={{ default .Release.Namespace .Values.leaderElection.lockNamespace }} - -leader-election-lock-name={{ .Values.leaderElection.lockName }} {{- end }} + {{- with .Values.resources }} resources: - {{- toYaml .Values.resources | nindent 10 }} + {{- toYaml . | nindent 10 }} + {{- end }} {{- if or .Values.webhook.enable (ne (len .Values.volumeMounts) 0 ) }} volumeMounts: {{- end }} diff --git a/charts/spark-operator-chart/templates/rbac.yaml b/charts/spark-operator-chart/templates/rbac.yaml index 342270f8f7..0f3ad19525 100644 --- a/charts/spark-operator-chart/templates/rbac.yaml +++ b/charts/spark-operator-chart/templates/rbac.yaml @@ -1,4 +1,4 @@ -{{- if or .Values.rbac.create .Values.rbac.createClusterRole }} +{{- if or .Values.rbac.create .Values.rbac.createClusterRole -}} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -7,9 +7,9 @@ metadata: "helm.sh/hook": pre-install, pre-upgrade "helm.sh/hook-delete-policy": hook-failed, before-hook-creation "helm.sh/hook-weight": "-10" -{{- with .Values.rbac.annotations }} -{{ toYaml . | indent 4 }} -{{- end }} + {{- with .Values.rbac.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} labels: {{- include "spark-operator.labels" . | nindent 4 }} rules: diff --git a/charts/spark-operator-chart/templates/spark-serviceaccount.yaml b/charts/spark-operator-chart/templates/spark-serviceaccount.yaml index 547aee59b9..25c00ccfab 100644 --- a/charts/spark-operator-chart/templates/spark-serviceaccount.yaml +++ b/charts/spark-operator-chart/templates/spark-serviceaccount.yaml @@ -1,4 +1,4 @@ -{{- if .Values.serviceAccounts.spark.create }} +{{- if .Values.serviceAccounts.spark.create -}} {{- range $sparkJobNamespace := .Values.sparkJobNamespaces | default (list .Release.Namespace) }} --- apiVersion: v1 @@ -6,11 +6,11 @@ kind: ServiceAccount metadata: name: {{ include "spark.serviceAccountName" $ }} namespace: {{ $sparkJobNamespace }} -{{- with $.Values.serviceAccounts.spark.annotations }} - annotations: -{{ toYaml $ | indent 4 }} -{{- end }} labels: {{- include "spark-operator.labels" $ | nindent 4 }} + {{- with $.Values.serviceAccounts.spark.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} {{- end }} {{- end }} diff --git a/charts/spark-operator-chart/tests/deployment_test.yaml b/charts/spark-operator-chart/tests/deployment_test.yaml new file mode 100644 index 0000000000..247266bb7e --- /dev/null +++ b/charts/spark-operator-chart/tests/deployment_test.yaml @@ -0,0 +1,317 @@ +suite: Test spark operator deployment + +templates: + - deployment.yaml + +release: + name: spark-operator + +tests: + - it: Should add pod annotations if podAnnotations is set + set: + podAnnotations: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.metadata.annotations.key1 + value: value1 + - equal: + path: spec.template.metadata.annotations.key2 + value: value2 + + - it: Should add prometheus annotations if metrics.enable is true + set: + metrics: + enable: true + port: 10254 + endpoint: /metrics + asserts: + - equal: + path: spec.template.metadata.annotations["prometheus.io/scrape"] + value: "true" + - equal: + path: spec.template.metadata.annotations["prometheus.io/port"] + value: "10254" + - equal: + path: spec.template.metadata.annotations["prometheus.io/path"] + value: /metrics + + - it: Should add secrets if imagePullSecrets is set + set: + imagePullSecrets: + - name: test-secret1 + - name: test-secret2 + asserts: + - equal: + path: spec.template.spec.imagePullSecrets[0].name + value: test-secret1 + - equal: + path: spec.template.spec.imagePullSecrets[1].name + value: test-secret2 + + - it: Should add pod securityContext if podSecurityContext is set + set: + podSecurityContext: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + asserts: + - equal: + path: spec.template.spec.securityContext.runAsUser + value: 1000 + - equal: + path: spec.template.spec.securityContext.runAsGroup + value: 2000 + - equal: + path: spec.template.spec.securityContext.fsGroup + value: 3000 + + - it: Should use the specified image repository if image.repository and image.tag is set + set: + image: + repository: test-repository + tag: test-tag + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: test-repository:test-tag + + - it: Should use the specified image pull policy if image.pullPolicy is set + set: + image: + pullPolicy: Always + asserts: + - equal: + path: spec.template.spec.containers[0].imagePullPolicy + value: Always + + - it: Should add container securityContext if securityContext is set + set: + securityContext: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + asserts: + - equal: + path: spec.template.spec.containers[0].securityContext.runAsUser + value: 1000 + - equal: + path: spec.template.spec.containers[0].securityContext.runAsGroup + value: 2000 + - equal: + path: spec.template.spec.containers[0].securityContext.fsGroup + value: 3000 + + - it: Should add metric ports if metrics.enable is true + set: + metrics: + enable: true + port: 10254 + portName: metrics + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: metrics + containerPort: 10254 + count: 1 + + - it: Should add webhook ports if webhook.enable is true + set: + webhook: + enable: true + port: 8080 + portName: webhook + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: webhook + containerPort: 8080 + count: 1 + + - it: Should add resources if resources is set + set: + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + asserts: + - equal: + path: spec.template.spec.containers[0].resources + value: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + + - it: Should add webhook certs volume if webhook.enable is true + set: + webhook: + enable: true + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: webhook-certs + secret: + secretName: spark-operator-webhook-certs + count: 1 + + - it: Should add webhook certs volume mounts if webhook.enable is true + set: + webhook: + enable: true + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: webhook-certs + mountPath: /etc/webhook-certs + count: 1 + + - it: Should add sidecars if sidecars is set + set: + sidecars: + - name: sidecar1 + image: sidecar-image1 + - name: sidecar2 + image: sidecar-image2 + asserts: + - contains: + path: spec.template.spec.containers + content: + name: sidecar1 + image: sidecar-image1 + count: 1 + - contains: + path: spec.template.spec.containers + content: + name: sidecar2 + image: sidecar-image2 + count: 1 + + - it: Should add volumes if volumes is set + set: + volumes: + - name: volume1 + emptyDir: {} + - name: volume2 + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: volume1 + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: volume2 + emptyDir: {} + count: 1 + + - it: Should add volume mounts if volumeMounts is set + set: + volumeMounts: + - name: volume1 + mountPath: /volume1 + - name: volume2 + mountPath: /volume2 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: volume1 + mountPath: /volume1 + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: volume2 + mountPath: /volume2 + count: 1 + + - it: Should add nodeSelector if nodeSelector is set + set: + nodeSelector: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.spec.nodeSelector.key1 + value: value1 + - equal: + path: spec.template.spec.nodeSelector.key2 + value: value2 + + - it: Should add affinity if affinity is set + set: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - antarctica-east1 + - antarctica-west1 + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: another-node-label-key + operator: In + values: + - another-node-label-value + asserts: + - equal: + path: spec.template.spec.affinity + value: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - antarctica-east1 + - antarctica-west1 + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: another-node-label-key + operator: In + values: + - another-node-label-value + + - it: Should add tolerations if tolerations is set + set: + tolerations: + - key: key1 + operator: Equal + value: value1 + effect: NoSchedule + - key: key2 + operator: Exists + effect: NoSchedule + asserts: + - equal: + path: spec.template.spec.tolerations + value: + - key: key1 + operator: Equal + value: value1 + effect: NoSchedule + - key: key2 + operator: Exists + effect: NoSchedule diff --git a/charts/spark-operator-chart/tests/rbac_test.yaml b/charts/spark-operator-chart/tests/rbac_test.yaml new file mode 100644 index 0000000000..f411c4def1 --- /dev/null +++ b/charts/spark-operator-chart/tests/rbac_test.yaml @@ -0,0 +1,90 @@ +suite: Test spark operator rbac + +templates: + - rbac.yaml + +release: + name: spark-operator + +tests: + - it: Should not render spark operator rbac resources if rbac.create is false and rbac.createClusterRole is false + set: + rbac: + create: false + createClusterRole: false + asserts: + - hasDocuments: + count: 0 + + - it: Should render spark operator cluster role if rbac.create is true + set: + rbac: + create: true + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + name: spark-operator + + - it: Should render spark operator cluster role if rbac.createClusterRole is true + set: + rbac: + createClusterRole: true + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + name: spark-operator + + - it: Should render spark operator cluster role binding if rbac.create is true + set: + rbac: + create: true + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + name: spark-operator + + - it: Should render spark operator cluster role binding correctly if rbac.createClusterRole is true + set: + rbac: + createClusterRole: true + release: + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + name: spark-operator + - contains: + path: subjects + content: + kind: ServiceAccount + name: spark-operator + namespace: NAMESPACE + count: 1 + - equal: + path: roleRef + value: + kind: ClusterRole + name: spark-operator + apiGroup: rbac.authorization.k8s.io + + - it: Should add extra annotations to spark operator cluster role if rbac.annotations is set + set: + rbac: + annotations: + key1: value1 + key2: value2 + documentIndex: 0 + asserts: + - equal: + path: metadata.annotations.key1 + value: value1 + - equal: + path: metadata.annotations.key2 + value: value2 diff --git a/charts/spark-operator-chart/tests/serviceaccount_test.yaml b/charts/spark-operator-chart/tests/serviceaccount_test.yaml new file mode 100644 index 0000000000..a9a1e39c60 --- /dev/null +++ b/charts/spark-operator-chart/tests/serviceaccount_test.yaml @@ -0,0 +1,54 @@ +suite: Test spark operator service account + +templates: + - serviceaccount.yaml + +release: + name: spark-operator + +tests: + - it: Should not render service account if serviceAccounts.sparkoperator.create is false + set: + serviceAccounts: + sparkoperator: + create: false + asserts: + - hasDocuments: + count: 0 + + - it: Should render service account if serviceAccounts.sparkoperator.create is true + set: + serviceAccounts: + sparkoperator: + create: true + asserts: + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark-operator + + - it: Should use the specified service account name if serviceAccounts.sparkoperator.name is set + set: + serviceAccounts: + sparkoperator: + name: custom-service-account + asserts: + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: custom-service-account + + - it: Should add extra annotations if serviceAccounts.sparkoperator.annotations is set + set: + serviceAccounts: + sparkoperator: + annotations: + key1: value1 + key2: value2 + asserts: + - equal: + path: metadata.annotations.key1 + value: value1 + - equal: + path: metadata.annotations.key2 + value: value2 diff --git a/charts/spark-operator-chart/tests/spark-rbac_test.yaml b/charts/spark-operator-chart/tests/spark-rbac_test.yaml new file mode 100644 index 0000000000..1a31c7152e --- /dev/null +++ b/charts/spark-operator-chart/tests/spark-rbac_test.yaml @@ -0,0 +1,113 @@ +suite: Test spark rbac + +templates: + - spark-rbac.yaml + +release: + name: spark-operator + +tests: + - it: Should not render spark rbac resources if rbac.create is false and rbac.createRole is false + set: + rbac: + create: false + createRole: false + asserts: + - hasDocuments: + count: 0 + + - it: Should render spark role if rbac.create is true + set: + rbac: + create: true + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + name: spark-role + + - it: Should render spark role if rbac.createRole is true + set: + rbac: + createRole: true + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + name: spark-role + + - it: Should render spark role binding if rbac.create is true + set: + rbac: + create: true + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + name: spark + + - it: Should render spark role binding if rbac.createRole is true + set: + rbac: + createRole: true + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + name: spark + + - it: Should render multiple spark roles if sparkJobNamespaces is set + set: + sparkJobNamespaces: + - ns1 + - ns2 + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + name: spark-role + namespace: ns1 + + - it: Should render multiple spark role bindings if sparkJobNamespaces is set + set: + sparkJobNamespaces: + - ns1 + - ns2 + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + name: spark + namespace: ns1 + + - it: Should render multiple spark roles if sparkJobNamespaces is set + set: + sparkJobNamespaces: + - ns1 + - ns2 + documentIndex: 2 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + name: spark-role + namespace: ns2 + + - it: Should render multiple spark role bindings if sparkJobNamespaces is set + set: + sparkJobNamespaces: + - ns1 + - ns2 + documentIndex: 3 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + name: spark + namespace: ns2 diff --git a/charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml b/charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml new file mode 100644 index 0000000000..f7140f84fb --- /dev/null +++ b/charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml @@ -0,0 +1,112 @@ +suite: Test spark service account + +templates: + - spark-serviceaccount.yaml + +release: + name: spark-operator + +tests: + - it: Should not render service account if serviceAccounts.spark.create is false + set: + serviceAccounts: + spark: + create: false + asserts: + - hasDocuments: + count: 0 + + - it: Should render service account if serviceAccounts.spark.create is true + set: + serviceAccounts: + spark: + create: true + asserts: + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark-operator-spark + + - it: Should use the specified service account name if serviceAccounts.spark.name is set + set: + serviceAccounts: + spark: + name: spark + asserts: + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark + + - it: Should add extra annotations if serviceAccounts.spark.annotations is set + set: + serviceAccounts: + spark: + annotations: + key1: value1 + key2: value2 + asserts: + - equal: + path: metadata.annotations.key1 + value: value1 + - equal: + path: metadata.annotations.key2 + value: value2 + + - it: Should create multiple service accounts if sparkJobNamespaces is set + set: + serviceAccounts: + spark: + name: spark + sparkJobNamespaces: + - ns1 + - ns2 + - ns3 + documentIndex: 0 + asserts: + - hasDocuments: + count: 3 + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark + namespace: ns1 + + + - it: Should create multiple service accounts if sparkJobNamespaces is set + set: + serviceAccounts: + spark: + name: spark + sparkJobNamespaces: + - ns1 + - ns2 + - ns3 + documentIndex: 1 + asserts: + - hasDocuments: + count: 3 + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark + namespace: ns2 + + - it: Should create multiple service accounts if sparkJobNamespaces is set + set: + serviceAccounts: + spark: + name: spark + sparkJobNamespaces: + - ns1 + - ns2 + - ns3 + documentIndex: 2 + asserts: + - hasDocuments: + count: 3 + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark + namespace: ns3 diff --git a/charts/spark-operator-chart/tests/webhook-service_test.yaml b/charts/spark-operator-chart/tests/webhook-service_test.yaml new file mode 100644 index 0000000000..4b57acdf62 --- /dev/null +++ b/charts/spark-operator-chart/tests/webhook-service_test.yaml @@ -0,0 +1,33 @@ +suite: Test spark operator webhook service + +templates: + - webhook-service.yaml + +release: + name: spark-operator + +tests: + - it: Should not render the webhook service if webhook.enable is false + set: + webhook: + enable: false + asserts: + - hasDocuments: + count: 0 + + - it: Should render the webhook service correctly if webhook.enable is true + set: + webhook: + enable: true + portName: webhook + asserts: + - containsDocument: + apiVersion: v1 + kind: Service + name: spark-operator-webhook + - equal: + path: spec.ports[0] + value: + port: 443 + targetPort: webhook + name: webhook diff --git a/docs/developer-guide.md b/docs/developer-guide.md index a4f6acce27..846762ea2a 100644 --- a/docs/developer-guide.md +++ b/docs/developer-guide.md @@ -1,5 +1,28 @@ # Developer Guide +## Configure Git Pre-Commit Hooks + +Git hooks are useful for identifying simple issues before submission to code review. We run hooks on every commit to automatically generate helm chart `README.md` file from `README.md.gotmpl` file. Before you can run git hooks, you need to have the pre-commit package manager installed as follows: + +```shell +# Using pip +pip install pre-commit + +# Using conda +conda install -c conda-forge pre-commit + +# Using Homebrew +brew install pre-commit +``` + +To set up the pre-commit hooks, run the following command: + +```shell +pre-commit install + +pre-commit install-hooks +``` + ## Build the Operator In case you want to build the operator from the source code, e.g., to test a fix or a feature you write, you can do so following the instructions below. @@ -7,60 +30,60 @@ In case you want to build the operator from the source code, e.g., to test a fix The easiest way to build the operator without worrying about its dependencies is to just build an image using the [Dockerfile](../Dockerfile). ```bash -$ docker build -t . +docker build -t . ``` -The operator image is built upon a base Spark image that defaults to `gcr.io/spark-operator/spark:v3.1.1`. If you want to use your own Spark image (e.g., an image with a different version of Spark or some custom dependencies), specify the argument `SPARK_IMAGE` as the following example shows: +The operator image is built upon a base Spark image that defaults to `spark:3.5.0`. If you want to use your own Spark image (e.g., an image with a different version of Spark or some custom dependencies), specify the argument `SPARK_IMAGE` as the following example shows: ```bash -$ docker build --build-arg SPARK_IMAGE= -t . +docker build --build-arg SPARK_IMAGE= -t . ``` If you want to use the operator on OpenShift clusters, first make sure you have Docker version 18.09.3 or above, then build your operator image using the [OpenShift-specific Dockerfile](../Dockerfile.rh). ```bash -$ export DOCKER_BUILDKIT=1 -$ docker build -t -f Dockerfile.rh . +export DOCKER_BUILDKIT=1 +docker build -t -f Dockerfile.rh . ``` If you'd like to build/test the spark-operator locally, follow the instructions below: ```bash -$ mkdir -p $GOPATH/src/github.com/kubeflow -$ cd $GOPATH/src/github.com/kubeflow -$ git clone git@github.com:kubeflow/spark-operator.git -$ cd spark-operator +mkdir -p $GOPATH/src/github.com/kubeflow +cd $GOPATH/src/github.com/kubeflow +git clone git@github.com:kubeflow/spark-operator.git +cd spark-operator ``` To update the auto-generated code, run the following command. (This step is only required if the CRD types have been changed): ```bash -$ hack/update-codegen.sh +hack/update-codegen.sh ``` To update the auto-generated CRD definitions, run the following command. After doing so, you must update the list of required fields under each `ports` field to add the `protocol` field to the list. Skipping this step will make the CRDs incompatible with Kubernetes v1.18+. ```bash -$ GO111MODULE=off go get -u sigs.k8s.io/controller-tools/cmd/controller-gen -$ controller-gen crd:trivialVersions=true,maxDescLen=0,crdVersions=v1beta1 paths="./pkg/apis/sparkoperator.k8s.io/v1beta2" output:crd:artifacts:config=./manifest/crds/ +GO111MODULE=off go get -u sigs.k8s.io/controller-tools/cmd/controller-gen +controller-gen crd:trivialVersions=true,maxDescLen=0,crdVersions=v1beta1 paths="./pkg/apis/sparkoperator.k8s.io/v1beta2" output:crd:artifacts:config=./manifest/crds/ ``` You can verify the current auto-generated code is up to date with: ```bash -$ hack/verify-codegen.sh +hack/verify-codegen.sh ``` To build the operator, run the following command: ```bash -$ GOOS=linux go build -o spark-operator +GOOS=linux go build -o spark-operator ``` To run unit tests, run the following command: ```bash -$ go test ./... +go test ./... ``` ## Build the API Specification Doc @@ -72,3 +95,82 @@ make build-api-docs ``` Running the above command will update the file `docs/api-docs.md`. + +## Develop with the Helm Chart + +### Run helm chart lint + +```shell +$ make helm-lint +Linting charts... + +------------------------------------------------------------------------------------------------------------------------ + Charts to be processed: +------------------------------------------------------------------------------------------------------------------------ + spark-operator => (version: "1.2.4", path: "charts/spark-operator-chart") +------------------------------------------------------------------------------------------------------------------------ + +Linting chart "spark-operator => (version: \"1.2.4\", path: \"charts/spark-operator-chart\")" +Checking chart "spark-operator => (version: \"1.2.4\", path: \"charts/spark-operator-chart\")" for a version bump... +Old chart version: 1.2.1 +New chart version: 1.2.4 +Chart version ok. +Validating /Users/user/go/src/github.com/kubeflow/spark-operator/charts/spark-operator-chart/Chart.yaml... +Validation success! 👍 +Validating maintainers... + +Linting chart with values file "charts/spark-operator-chart/ci/ci-values.yaml"... + +==> Linting charts/spark-operator-chart +[INFO] Chart.yaml: icon is recommended + +1 chart(s) linted, 0 chart(s) failed + +------------------------------------------------------------------------------------------------------------------------ + ✔︎ spark-operator => (version: "1.2.4", path: "charts/spark-operator-chart") +------------------------------------------------------------------------------------------------------------------------ +All charts linted successfully +``` + +### Run helm chart unit tests + +First, you need to install helm chart unit test plugin as follows: + +```shell +helm plugin install https://github.com/helm-unittest/helm-unittest.git +``` + +For more information about how to write helm chart unit tests, please refer to [helm-unittest](https://github.com/helm-unittest/helm-unittest). + +Then, run `make helm-unittest` to run the helm chart unit tests: + +```shell +$ make helm-unittest + +### Chart [ spark-operator ] charts/spark-operator-chart + + PASS Test spark operator deployment charts/spark-operator-chart/tests/deployment_test.yaml + PASS Test spark operator rbac charts/spark-operator-chart/tests/rbac_test.yaml + PASS Test spark operator service account charts/spark-operator-chart/tests/serviceaccount_test.yaml + PASS Test spark rbac charts/spark-operator-chart/tests/spark-rbac_test.yaml + PASS Test spark service account charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml + PASS Test spark operator webhook service charts/spark-operator-chart/tests/webhook-service_test.yaml + +Charts: 1 passed, 1 total +Test Suites: 6 passed, 6 total +Tests: 46 passed, 46 total +Snapshot: 0 passed, 0 total +Time: 107.861083ms +``` + +### Build the Helm Docs + +The Helm chart `README.md` file is generated by [helm-docs](https://github.com/norwoodj/helm-docs) tool. If you want to update the Helm docs, remember to modify `README.md.gotmpl` rather than `README.md`, then run `make helm-docs` to generate the `README.md` file: + +```shell +$ make helm-docs +INFO[2024-04-14T07:29:26Z] Found Chart directories [charts/spark-operator-chart] +INFO[2024-04-14T07:29:26Z] Generating README Documentation for chart charts/spark-operator-chart +``` + +Note that if git pre-commit hooks are set up, `helm-docs` will automatically run before committing any changes. If there are any changes to the `README.md` file, the commit process will be aborted. From 362b812355feb2860fcf9a65a9f3d6cb0811a555 Mon Sep 17 00:00:00 2001 From: Ajay Kemparaj Date: Tue, 16 Apr 2024 22:31:52 +0530 Subject: [PATCH 43/87] chore: remove k8s.io/kubernetes replaces and adapt to v1.29.3 apis (#1968) Signed-off-by: ajayk --- go.mod | 11 +- go.sum | 301 ++++++++++++------ .../sparkapplication/sparkapp_util.go | 2 +- pkg/controller/sparkapplication/submission.go | 3 +- 4 files changed, 209 insertions(+), 108 deletions(-) diff --git a/go.mod b/go.mod index a6b09386b8..c5a4ffb5a2 100644 --- a/go.mod +++ b/go.mod @@ -66,7 +66,7 @@ require ( github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/fatih/camelcase v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-errors/errors v1.0.1 // indirect + github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -106,14 +106,14 @@ require ( github.com/prometheus/procfs v0.13.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/xlab/treeprint v1.1.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect go.opentelemetry.io/otel v1.25.0 // indirect go.opentelemetry.io/otel/metric v1.25.0 // indirect go.opentelemetry.io/otel/trace v1.25.0 // indirect - go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect + go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/oauth2 v0.19.0 // indirect golang.org/x/sys v0.19.0 // indirect @@ -133,8 +133,8 @@ require ( k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.12.1 // indirect - sigs.k8s.io/kustomize/kyaml v0.13.9 // indirect + sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) @@ -158,7 +158,6 @@ replace ( k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.25.3 k8s.io/kubectl => k8s.io/kubectl v0.25.3 k8s.io/kubelet => k8s.io/kubelet v0.25.3 - k8s.io/kubernetes => k8s.io/kubernetes v1.19.6 k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.25.3 k8s.io/metrics => k8s.io/metrics v0.25.3 k8s.io/node-api => k8s.io/node-api v0.25.3 diff --git a/go.sum b/go.sum index fc7758ecd8..4de117db63 100644 --- a/go.sum +++ b/go.sum @@ -8,7 +8,6 @@ cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -606,31 +605,26 @@ cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcP dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= -github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= @@ -645,21 +639,25 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= +github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b/go.mod h1:FNj4KYEAAHfYu68kRYolGoxkaJn+6mdEsaM12VTwuI0= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/hcsshim v0.8.10-0.20200715222032-5eafd1556990/go.mod h1:ay/0dTb7NsG8QMDfsRfLHgZo/6xAJShLe1+ePPflihk= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= @@ -672,6 +670,7 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= @@ -686,7 +685,6 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= -github.com/aws/aws-sdk-go v1.6.10/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.51.16 h1:vnWKK8KjbftEkuPX8bRj3WHsLy1uhotn0eXptpvrxJI= @@ -729,6 +727,11 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/bazelbuild/bazel-gazelle v0.18.2/go.mod h1:D0ehMSbS+vesFsLGiD6JXu3mVEzOlfUl8wNnq+x/9p0= +github.com/bazelbuild/bazel-gazelle v0.19.1-0.20191105222053-70208cbdc798/go.mod h1:rPwzNHUqEzngx1iVBfO/2X2npKaT3tqPqqHW6rVsn/A= +github.com/bazelbuild/buildtools v0.0.0-20190731111112-f720930ceb60/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= +github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= +github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -738,12 +741,12 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= @@ -752,6 +755,7 @@ github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -759,15 +763,12 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu v0.0.0-20181120144056-17b0214f6c48/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= -github.com/cilium/ebpf v0.0.0-20200601085316-9f1617e5c574/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= -github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -788,20 +789,11 @@ github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoC github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v1.0.0/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v1.0.0/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/coredns/corefile-migration v1.0.10/go.mod h1:RMy/mXdeDlYwzt0vdMEJvT2hGJ2I86/eO0UdXmH9XNI= +github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/containerd v1.0.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/coredns/corefile-migration v1.0.6/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -810,14 +802,13 @@ github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHo github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -832,11 +823,10 @@ github.com/daviddengcn/go-colortext v1.0.0/go.mod h1:zDqEI5NVUop5QPpVJUxE9UO10hR github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v1.4.2-0.20200309214505-aa6a9891b09c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -865,7 +855,7 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= @@ -873,6 +863,7 @@ github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -889,12 +880,15 @@ github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSy github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= +github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= @@ -903,12 +897,12 @@ github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmn github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.9.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -922,6 +916,7 @@ github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ4 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -979,8 +974,24 @@ github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhO github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -1004,6 +1015,7 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -1013,6 +1025,7 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -1035,6 +1048,23 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= +github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= +github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.18.0/go.mod h1:kaqo8l0OZKYPtjNmG4z4HrWLgcYNIJ9B9q3LWri9uLg= +github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= +github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= +github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= +github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= github.com/golangplus/bytes v1.0.0/go.mod h1:AdRaCFwmc/00ZzELMWb01soso6W1R/++O1XL80yAn+A= github.com/golangplus/fmt v1.0.0/go.mod h1:zpM0OfbMCjPtd2qkTD/jX2MgiFCqklhSUFyDW44gVQE= @@ -1043,7 +1073,7 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cadvisor v0.37.3/go.mod h1:BalYQhwl2UV8lpB3oFssiaW8Uj6sqfFDxw5nEs9sBgU= +github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= github.com/google/cel-go v0.12.5/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= @@ -1068,6 +1098,8 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-replayers/grpcreplay v1.1.0 h1:S5+I3zYyZ+GQz68OfbURDdt/+cSMqCK1wrvNx7WBzTE= github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk= github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk= @@ -1098,6 +1130,7 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= @@ -1133,17 +1166,20 @@ github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57Q github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q= github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -1172,6 +1208,7 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -1189,9 +1226,8 @@ github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+h github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -1204,6 +1240,7 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -1220,13 +1257,16 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -1237,6 +1277,7 @@ github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NB github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -1246,6 +1287,7 @@ github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= @@ -1253,6 +1295,7 @@ github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H7 github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1268,6 +1311,7 @@ github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwm github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -1275,9 +1319,11 @@ github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -1285,21 +1331,21 @@ github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nr github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1313,8 +1359,8 @@ github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwd github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= -github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -1324,6 +1370,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -1341,31 +1389,31 @@ github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU= github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc90.0.20200616040943-82d2fa4eb069/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8= -github.com/opencontainers/runc v1.0.0-rc91.0.20200707015106-819fcc687efb/go.mod h1:ZuXhqlr4EiRYgDrBDNfSbE4+n9JX4+V107NwAmF7sZA= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= -github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -1384,6 +1432,7 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -1407,7 +1456,6 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck= github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -1417,6 +1465,7 @@ github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1429,6 +1478,7 @@ github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfm github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= @@ -1442,12 +1492,19 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -1458,20 +1515,24 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1479,8 +1540,8 @@ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= @@ -1505,28 +1566,33 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20200520041808-52d707b772fe/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1540,7 +1606,7 @@ github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaD go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= @@ -1585,8 +1651,9 @@ go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1 go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -1597,8 +1664,11 @@ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9i go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= gocloud.dev v0.37.0 h1:XF1rN6R0qZI/9DYjN16Uy0durAmSlf58DHOcb28GPro= gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco= +golang.org/x/build v0.0.0-20190927031335-2835ba2e683f/go.mod h1:fYw7AShPAhGMdXqA9gRadk/CcMsvLlClpE5oBwnS3dM= +golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1606,14 +1676,15 @@ golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190424203555-c05e17bb3b2d/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= @@ -1688,11 +1759,15 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1704,8 +1779,8 @@ golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1770,6 +1845,7 @@ golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1799,6 +1875,7 @@ golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1819,6 +1896,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1827,6 +1906,7 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1836,37 +1916,32 @@ golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1878,7 +1953,6 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1926,6 +2000,7 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1941,6 +2016,7 @@ golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= @@ -1950,7 +2026,9 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -1980,21 +2058,29 @@ golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190122202912-9c309ee22fab/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -2003,7 +2089,9 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2030,7 +2118,6 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -2058,6 +2145,8 @@ golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2080,14 +2169,13 @@ gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZ gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.1-0.20200106000736-b8fc810ca6b5/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.1/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= @@ -2296,7 +2384,6 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -2357,6 +2444,7 @@ google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2368,6 +2456,7 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -2392,13 +2481,17 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpAriY= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= @@ -2422,12 +2515,13 @@ k8s.io/component-helpers v0.25.3/go.mod h1:yu9zgPm9pf5jpmUzOZA9PMHY16Eu8ymt8AnSL k8s.io/controller-manager v0.25.3/go.mod h1:lWiZbjFw3joeiIVpscVfKywrAde4GE8Z84i5MIpEQMw= k8s.io/cri-api v0.25.3/go.mod h1:riC/P0yOGUf2K1735wW+CXs1aY2ctBgePtnnoFLd0dU= k8s.io/csi-translation-lib v0.25.3/go.mod h1:hrosK8ufTX5fz1CJO79EfPPkuLZWvaxEb4tovbcv/AU= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= @@ -2436,7 +2530,7 @@ k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-aggregator v0.25.3/go.mod h1:w87nqmzJMf7S73FRYcnexqfYW0AFiLJiCkvVCwM3feE= k8s.io/kube-controller-manager v0.25.3/go.mod h1:InfGO/O9vIPxpbgd0gUK22xVDsaGnJAUsATzwKk6BNg= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20220401212409-b28bf2818661/go.mod h1:daOouuuwd9JXpv1L7Y34iV3yf6nxzipkKMWWlqlvK9M= k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 h1:w6nThEmGo9zcL+xH1Tu6pjxJ3K1jXFW+V0u4peqN8ks= @@ -2446,15 +2540,16 @@ k8s.io/kube-scheduler v0.25.3/go.mod h1:0EKmWTnwNaHnmWwan4bABGQm4XyYpc146XyFWX4e k8s.io/kubectl v0.25.3 h1:HnWJziEtmsm4JaJiKT33kG0kadx68MXxUE8UEbXnN4U= k8s.io/kubectl v0.25.3/go.mod h1:glU7PiVj/R6Ud4A9FJdTcJjyzOtCJyc0eO7Mrbh3jlI= k8s.io/kubelet v0.25.3/go.mod h1:YopVc6vLhveZb22I7AzcoWPap+t3/KJKqRZDa2MZmyE= -k8s.io/kubernetes v1.19.6 h1:Dc8KsWmZRuq8LD4Tn3v7ui6l3aym3JjcIdoff2lUU1I= -k8s.io/kubernetes v1.19.6/go.mod h1:gKuOq0UXDNqdLqmNMmMqFebiI7OUIcvh7XYngH4Oy2s= +k8s.io/kubernetes v1.18.3/go.mod h1:Efg82S+Ti02A/Mww53bxroc7IgzX2bgPsf6hT8gAs3M= +k8s.io/kubernetes v1.29.3 h1:EuOAKN4zpiP+kBx/0e9yS5iBkPSyLml19juOqZxBtDw= +k8s.io/kubernetes v1.29.3/go.mod h1:CP+Z+S9haxyB7J+nV6ywYry4dqlphArPXjcc0CsBVXc= k8s.io/legacy-cloud-providers v0.25.3/go.mod h1:0l3ulE+R3UXrVSfevmLvKSqJluRX/ABedGLGfpYf9t0= k8s.io/metrics v0.25.3/go.mod h1:5j5FKJb8RHsb3Q2PLsD/p1mLiA1fTrl+a62Les+KDhc= k8s.io/mount-utils v0.25.3/go.mod h1:odpFnGwJfFjN3SRnjfGS0902ubcj/W6hDOrNDmSSINo= +k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= k8s.io/sample-apiserver v0.25.3/go.mod h1:olYnTnro/u7rnn7dlKEceKb9ivx05tfIubBKefSPeVw= -k8s.io/system-validators v1.1.2/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= -k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -2499,6 +2594,9 @@ modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= @@ -2508,13 +2606,15 @@ sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h6 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM= sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= sigs.k8s.io/kustomize/cmd/config v0.10.9/go.mod h1:T0s850zPV3wKfBALA0dyeP/K74jlJcoP8Pr9ZWwE3MQ= sigs.k8s.io/kustomize/kustomize/v4 v4.5.7/go.mod h1:VSNKEH9D9d9bLiWEGbS6Xbg/Ih0tgQalmPvntzRxZ/Q= -sigs.k8s.io/kustomize/kyaml v0.13.9 h1:Qz53EAaFFANyNgyOEJbT/yoIHygK40/ZcvU3rgry2Tk= sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= +sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= @@ -2523,5 +2623,6 @@ sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= volcano.sh/volcano v1.1.0 h1:jxLaQEMpvToYrSEOTwBU7R5Vg+tsASdbAdmbqoZY2DY= volcano.sh/volcano v1.1.0/go.mod h1:zddAnaLKfnKMYkyMbdhlb8J3HwGeqvryeLl1tux/G4M= diff --git a/pkg/controller/sparkapplication/sparkapp_util.go b/pkg/controller/sparkapplication/sparkapp_util.go index ff321625cc..97329e5e8f 100644 --- a/pkg/controller/sparkapplication/sparkapp_util.go +++ b/pkg/controller/sparkapplication/sparkapp_util.go @@ -21,7 +21,7 @@ import ( "fmt" v1 "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/apis/policy" + policy "k8s.io/api/policy/v1beta1" "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/kubeflow/spark-operator/pkg/config" diff --git a/pkg/controller/sparkapplication/submission.go b/pkg/controller/sparkapplication/submission.go index 9c7d8c8cac..733c1746ac 100644 --- a/pkg/controller/sparkapplication/submission.go +++ b/pkg/controller/sparkapplication/submission.go @@ -26,8 +26,9 @@ import ( "github.com/golang/glog" v1 "k8s.io/api/core/v1" + policy "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/apis/policy" "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/kubeflow/spark-operator/pkg/config" From e9dbdfb9719b528c79308659d149a611a8a9438e Mon Sep 17 00:00:00 2001 From: Andrii Chubatiuk Date: Tue, 16 Apr 2024 23:22:52 +0300 Subject: [PATCH 44/87] fix: upgraded k8s deps (#1983) Signed-off-by: Andrew Chubatiuk --- Makefile | 2 +- charts/spark-operator-chart/Chart.yaml | 4 +- charts/spark-operator-chart/README.md | 2 +- go.mod | 66 +- go.sum | 2221 +---------------- hack/api-docs/Dockerfile | 9 +- main.go | 26 +- .../volcano/volcano_scheduler.go | 4 +- .../scheduledsparkapplication/controller.go | 2 +- .../sparkapplication/sparkapp_util.go | 16 - pkg/controller/sparkapplication/submission.go | 13 +- pkg/webhook/resourceusage/enforcer.go | 2 +- 12 files changed, 84 insertions(+), 2283 deletions(-) diff --git a/Makefile b/Makefile index b26d51c23b..3e3b3f0af5 100644 --- a/Makefile +++ b/Makefile @@ -38,7 +38,7 @@ install-sparkctl: | sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64 build-api-docs: docker build -t temp-api-ref-docs hack/api-docs docker run -v $$(pwd):/repo/ temp-api-ref-docs \ - sh -c "cd /repo/ && /go/gen-crd-api-reference-docs/gen-crd-api-reference-docs \ + sh -c "cd /repo/ && /go/bin/gen-crd-api-reference-docs \ -config /repo/hack/api-docs/api-docs-config.json \ -api-dir github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2 \ -template-dir /repo/hack/api-docs/api-docs-template \ diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index f1005f46e2..d6688363c8 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.6 -appVersion: v1beta2-1.4.2-3.5.0 +version: 1.2.7 +appVersion: v1beta2-1.4.3-3.5.0 keywords: - spark home: https://github.com/kubeflow/spark-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index d3a230be0c..b166205081 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.2.6](https://img.shields.io/badge/Version-1.2.6-informational?style=flat-square) ![AppVersion: v1beta2-1.4.2-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.2--3.5.0-informational?style=flat-square) +![Version: 1.2.7](https://img.shields.io/badge/Version-1.2.7-informational?style=flat-square) ![AppVersion: v1beta2-1.4.3-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.3--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator diff --git a/go.mod b/go.mod index c5a4ffb5a2..f44232d712 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.19.0 github.com/prometheus/client_model v0.6.1 - github.com/robfig/cron v1.2.0 + github.com/robfig/cron/v3 v3.0.1 github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.9.0 gocloud.dev v0.37.0 @@ -28,7 +28,7 @@ require ( k8s.io/kubectl v0.29.3 k8s.io/kubernetes v1.29.3 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 - volcano.sh/volcano v1.1.0 + volcano.sh/apis v1.8.2 ) require ( @@ -36,12 +36,7 @@ require ( cloud.google.com/go/compute v1.25.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.7 // indirect - github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.29 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/aws/aws-sdk-go v1.51.16 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect @@ -62,7 +57,6 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/elazarl/goproxy v0.0.0-20200421181703-e76ad31c14f6 // indirect github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/fatih/camelcase v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -73,11 +67,9 @@ require ( github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.0.1 // indirect - github.com/google/gnostic v0.7.0 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect @@ -96,10 +88,12 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/common v0.52.2 // indirect @@ -129,7 +123,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/cli-runtime v0.25.3 // indirect + k8s.io/cli-runtime v0.29.3 // indirect k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect @@ -140,28 +134,28 @@ require ( ) replace ( - k8s.io/api => k8s.io/api v0.25.3 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.25.3 - k8s.io/apimachinery => k8s.io/apimachinery v0.25.3 - k8s.io/apiserver => k8s.io/apiserver v0.25.3 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.25.3 - k8s.io/client-go => k8s.io/client-go v0.25.3 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.25.3 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.25.3 - k8s.io/code-generator => k8s.io/code-generator v0.25.3 - k8s.io/component-base => k8s.io/component-base v0.25.3 - k8s.io/cri-api => k8s.io/cri-api v0.25.3 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.25.3 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.25.3 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.25.3 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.25.3 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.25.3 - k8s.io/kubectl => k8s.io/kubectl v0.25.3 - k8s.io/kubelet => k8s.io/kubelet v0.25.3 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.25.3 - k8s.io/metrics => k8s.io/metrics v0.25.3 - k8s.io/node-api => k8s.io/node-api v0.25.3 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.25.3 - k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.25.3 - k8s.io/sample-controller => k8s.io/sample-controller v0.25.3 + k8s.io/api => k8s.io/api v0.29.3 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.29.3 + k8s.io/apimachinery => k8s.io/apimachinery v0.29.3 + k8s.io/apiserver => k8s.io/apiserver v0.29.3 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.29.3 + k8s.io/client-go => k8s.io/client-go v0.29.3 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.29.3 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.29.3 + k8s.io/code-generator => k8s.io/code-generator v0.29.3 + k8s.io/component-base => k8s.io/component-base v0.29.3 + k8s.io/cri-api => k8s.io/cri-api v0.29.3 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.29.3 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.29.3 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.29.3 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.29.3 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.29.3 + k8s.io/kubectl => k8s.io/kubectl v0.29.3 + k8s.io/kubelet => k8s.io/kubelet v0.29.3 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.29.3 + k8s.io/metrics => k8s.io/metrics v0.29.3 + k8s.io/node-api => k8s.io/node-api v0.29.3 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.29.3 + k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.29.3 + k8s.io/sample-controller => k8s.io/sample-controller v0.29.3 ) diff --git a/go.sum b/go.sum index 4de117db63..774d575a5c 100644 --- a/go.sum +++ b/go.sum @@ -1,692 +1,19 @@ -bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw= cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= -cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= -cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= -cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= -cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= -cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= -cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= -cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= -cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= -cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= -cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= -cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= -cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= -cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= -cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= -cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= -cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= -cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= -cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= -cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= -cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= -cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= -cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= -cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= -cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= -cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= -cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= -cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= -cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= -cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= -cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= -cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= -cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= -cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= -cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= -cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= -cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= -cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= -cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= -cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= -cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= -cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= -cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= -cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= -cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= -cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= -cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= -cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= -cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= -cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= -cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= -cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= -cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= -cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= -cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= -cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= -cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= -cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= -cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= -cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= -cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= -cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= -cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= -cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= -cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= -cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= -cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= -cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= -cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= -cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= -cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= -cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= -cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= -cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= -cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= -cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= -cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= -cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= -cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= -cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= -cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= -cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= -cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= -cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= -cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= -cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= -cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= -cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= -cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= -cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= -cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= -cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= -cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= -cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= -cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= -cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= -cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= -cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= -cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= -cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= -cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= -cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= -git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= -github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= -github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= -github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= -github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b/go.mod h1:FNj4KYEAAHfYu68kRYolGoxkaJn+6mdEsaM12VTwuI0= -github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= -github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= -github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= -github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220418222510-f25a4f6275ed/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= -github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= -github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= -github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= -github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.51.16 h1:vnWKK8KjbftEkuPX8bRj3WHsLy1uhotn0eXptpvrxJI= github.com/aws/aws-sdk-go v1.51.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= @@ -727,1288 +54,281 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= -github.com/bazelbuild/bazel-gazelle v0.18.2/go.mod h1:D0ehMSbS+vesFsLGiD6JXu3mVEzOlfUl8wNnq+x/9p0= -github.com/bazelbuild/bazel-gazelle v0.19.1-0.20191105222053-70208cbdc798/go.mod h1:rPwzNHUqEzngx1iVBfO/2X2npKaT3tqPqqHW6rVsn/A= -github.com/bazelbuild/buildtools v0.0.0-20190731111112-f720930ceb60/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/checkpoint-restore/go-criu v0.0.0-20181120144056-17b0214f6c48/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= -github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= -github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.0.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/coredns/corefile-migration v1.0.6/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v1.0.0/go.mod h1:zDqEI5NVUop5QPpVJUxE9UO10hRnmkD5G4Pmri9+m4c= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20200421181703-e76ad31c14f6 h1:GhNw/V+7mWhxPyD/n7STfwp/MJJ+Z/sa6wmzYXr96Ls= -github.com/elazarl/goproxy v0.0.0-20200421181703-e76ad31c14f6/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= -github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= -github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= -github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= -github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= -github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= -github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= -github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= -github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= -github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= -github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= -github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= -github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.18.0/go.mod h1:kaqo8l0OZKYPtjNmG4z4HrWLgcYNIJ9B9q3LWri9uLg= -github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= -github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= -github.com/golangplus/bytes v1.0.0/go.mod h1:AdRaCFwmc/00ZzELMWb01soso6W1R/++O1XL80yAn+A= -github.com/golangplus/fmt v1.0.0/go.mod h1:zpM0OfbMCjPtd2qkTD/jX2MgiFCqklhSUFyDW44gVQE= -github.com/golangplus/testing v1.0.0/go.mod h1:ZDreixUV3YzhoVraIDyOzHrr76p6NUh6k/pPg/Q3gYA= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= -github.com/google/cel-go v0.12.5/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= -github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/gnostic v0.7.0 h1:d7EpuFp8vVdML+y0JJJYiKeOLjKTdH/GvVkLOBWqJpw= -github.com/google/gnostic v0.7.0/go.mod h1:IAcUyMl6vtC95f60EZ8oXyqTsOersP6HbwjeG7EyDPM= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-replayers/grpcreplay v1.1.0 h1:S5+I3zYyZ+GQz68OfbURDdt/+cSMqCK1wrvNx7WBzTE= github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk= github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk= github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI= github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q= github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= -github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= -github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= -github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= -github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= -github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4= -github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= -github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= -github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= -github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= -github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= -github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= -github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck= github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= -github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= -github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= -github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= -github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= -github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= -go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= -go.etcd.io/etcd/pkg/v3 v3.5.4/go.mod h1:OI+TtO+Aa3nhQSppMbwE4ld3uF1/fqqwbpfndbbrEe0= -go.etcd.io/etcd/raft/v3 v3.5.4/go.mod h1:SCuunjYvZFC0fBX0vxMSPjuZmpcSk+XaAcMrD6Do03w= -go.etcd.io/etcd/server/v3 v3.5.4/go.mod h1:S5/YTU15KxymM5l3T6b09sNOHPXqGYIZStpuuGbb65c= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0 h1:zvpPXY7RfYAGSdYQLjp6zxdJNSYD/+FFoCTQN9IPxBs= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0/go.mod h1:BMn8NB1vsxTljvuorms2hyOs8IBuuBEq0pl7ltOfy30= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM= go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= -go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= gocloud.dev v0.37.0 h1:XF1rN6R0qZI/9DYjN16Uy0durAmSlf58DHOcb28GPro= gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco= -golang.org/x/build v0.0.0-20190927031335-2835ba2e683f/go.mod h1:fYw7AShPAhGMdXqA9gRadk/CcMsvLlClpE5oBwnS3dM= -golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190424203555-c05e17bb3b2d/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= -golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= -golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= -golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -2017,131 +337,31 @@ golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190122202912-9c309ee22fab/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= @@ -2151,280 +371,28 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= -gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= -gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.60.0/go.mod h1:d7rl65NZAkEQ90JFzqBjcRq1TVeG5ZoGV3sSpEnnVb4= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= -google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= -google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= -google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211021150943-2b146023228c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= -google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= -google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= -google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= -google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= -google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= -google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= -google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda h1:b6F6WIV4xHHD0FA4oIyzU6mHWg2WI2X1RBehwa5QN38= google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda/go.mod h1:AHcE/gZH76Bk/ROZhQphlRoWo5xKDEtz3eVEO1LfA8c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2433,196 +401,51 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpAriY= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ= -k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI= -k8s.io/apiextensions-apiserver v0.25.3 h1:bfI4KS31w2f9WM1KLGwnwuVlW3RSRPuIsfNF/3HzR0k= -k8s.io/apiextensions-apiserver v0.25.3/go.mod h1:ZJqwpCkxIx9itilmZek7JgfUAM0dnTsA48I4krPqRmo= -k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc= -k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo= -k8s.io/apiserver v0.25.3/go.mod h1:9bT47iM2fzRuhICJpM/RcQR9sqDDfZ7Yw60h0p3JW08= -k8s.io/cli-runtime v0.25.3 h1:Zs7P7l7db/5J+KDePOVtDlArAa9pZXaDinGWGZl0aM8= -k8s.io/cli-runtime v0.25.3/go.mod h1:InHHsjkyW5hQsILJGpGjeruiDZT/R0OkROQgD6GzxO4= -k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0= -k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA= -k8s.io/cloud-provider v0.25.3/go.mod h1:P7TjzjbkqW3C0NAT1bNEZrZRifNNBVhrTb+iHRjfFz0= -k8s.io/cluster-bootstrap v0.25.3/go.mod h1:C5NZX+WE7v/hEyUfMj2sjQfKHsOVAYLrSFLtPspVljM= -k8s.io/code-generator v0.25.3/go.mod h1:9F5fuVZOMWRme7MYj2YT3L9ropPWPokd9VRhVyD3+0w= -k8s.io/component-base v0.25.3/go.mod h1:WYoS8L+IlTZgU7rhAl5Ctpw0WdMxDfCC5dkxcEFa/TI= -k8s.io/component-helpers v0.25.3/go.mod h1:yu9zgPm9pf5jpmUzOZA9PMHY16Eu8ymt8AnSL0Xwbgw= -k8s.io/controller-manager v0.25.3/go.mod h1:lWiZbjFw3joeiIVpscVfKywrAde4GE8Z84i5MIpEQMw= -k8s.io/cri-api v0.25.3/go.mod h1:riC/P0yOGUf2K1735wW+CXs1aY2ctBgePtnnoFLd0dU= -k8s.io/csi-translation-lib v0.25.3/go.mod h1:hrosK8ufTX5fz1CJO79EfPPkuLZWvaxEb4tovbcv/AU= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI= +k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k= +k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-aggregator v0.25.3/go.mod h1:w87nqmzJMf7S73FRYcnexqfYW0AFiLJiCkvVCwM3feE= -k8s.io/kube-controller-manager v0.25.3/go.mod h1:InfGO/O9vIPxpbgd0gUK22xVDsaGnJAUsATzwKk6BNg= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20220401212409-b28bf2818661/go.mod h1:daOouuuwd9JXpv1L7Y34iV3yf6nxzipkKMWWlqlvK9M= -k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU= k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 h1:w6nThEmGo9zcL+xH1Tu6pjxJ3K1jXFW+V0u4peqN8ks= k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/kube-proxy v0.25.3/go.mod h1:A/aOKVIY+tivIHk/i6hEF6IyLSDHKGooLnedg4dBJa8= -k8s.io/kube-scheduler v0.25.3/go.mod h1:0EKmWTnwNaHnmWwan4bABGQm4XyYpc146XyFWX4ey5E= -k8s.io/kubectl v0.25.3 h1:HnWJziEtmsm4JaJiKT33kG0kadx68MXxUE8UEbXnN4U= -k8s.io/kubectl v0.25.3/go.mod h1:glU7PiVj/R6Ud4A9FJdTcJjyzOtCJyc0eO7Mrbh3jlI= -k8s.io/kubelet v0.25.3/go.mod h1:YopVc6vLhveZb22I7AzcoWPap+t3/KJKqRZDa2MZmyE= -k8s.io/kubernetes v1.18.3/go.mod h1:Efg82S+Ti02A/Mww53bxroc7IgzX2bgPsf6hT8gAs3M= +k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us= +k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4= k8s.io/kubernetes v1.29.3 h1:EuOAKN4zpiP+kBx/0e9yS5iBkPSyLml19juOqZxBtDw= k8s.io/kubernetes v1.29.3/go.mod h1:CP+Z+S9haxyB7J+nV6ywYry4dqlphArPXjcc0CsBVXc= -k8s.io/legacy-cloud-providers v0.25.3/go.mod h1:0l3ulE+R3UXrVSfevmLvKSqJluRX/ABedGLGfpYf9t0= -k8s.io/metrics v0.25.3/go.mod h1:5j5FKJb8RHsb3Q2PLsD/p1mLiA1fTrl+a62Les+KDhc= -k8s.io/mount-utils v0.25.3/go.mod h1:odpFnGwJfFjN3SRnjfGS0902ubcj/W6hDOrNDmSSINo= -k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= -k8s.io/sample-apiserver v0.25.3/go.mod h1:olYnTnro/u7rnn7dlKEceKb9ivx05tfIubBKefSPeVw= -k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= -modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= -modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= -modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= -modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= -modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= -modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= -modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= -modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= -modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= -modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= -modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= -modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= -modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= -modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= -modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= -modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= -modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= -modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33/go.mod h1:soWkSNf2tZC7aMibXEqVhCd73GOY5fJikn8qbdzemB0= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/kustomize/api v0.12.1/go.mod h1:y3JUhimkZkR6sbLNwfJHxvo1TCLwuwm14sCYnkH6S1s= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= -sigs.k8s.io/kustomize/cmd/config v0.10.9/go.mod h1:T0s850zPV3wKfBALA0dyeP/K74jlJcoP8Pr9ZWwE3MQ= -sigs.k8s.io/kustomize/kustomize/v4 v4.5.7/go.mod h1:VSNKEH9D9d9bLiWEGbS6Xbg/Ih0tgQalmPvntzRxZ/Q= -sigs.k8s.io/kustomize/kyaml v0.13.9/go.mod h1:QsRbD0/KcU+wdk0/L0fIp2KLnohkVzs6fQ85/nOXac4= sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= -volcano.sh/volcano v1.1.0 h1:jxLaQEMpvToYrSEOTwBU7R5Vg+tsASdbAdmbqoZY2DY= -volcano.sh/volcano v1.1.0/go.mod h1:zddAnaLKfnKMYkyMbdhlb8J3HwGeqvryeLl1tux/G4M= +volcano.sh/apis v1.8.2 h1:MJ1EXpdQeKG+XEhb/I3liWgMFzkgW3qCcj6qdhTuvfA= +volcano.sh/apis v1.8.2/go.mod h1:h+xbUpkjfRaHjktAi8h+7JNnNahjwhRSgpN9FUUwNXQ= diff --git a/hack/api-docs/Dockerfile b/hack/api-docs/Dockerfile index 68ab360a08..15de4a02fd 100644 --- a/hack/api-docs/Dockerfile +++ b/hack/api-docs/Dockerfile @@ -14,10 +14,5 @@ # limitations under the License. # -FROM golang:1.15.2-alpine - -RUN apk add git -RUN git clone https://github.com/ahmetb/gen-crd-api-reference-docs.git && \ - cd gen-crd-api-reference-docs && \ - git checkout ccf856504caaeac38151b57a950d3f8a7942b9db && \ - go build \ No newline at end of file +FROM golang:1.22.2-alpine +RUN go install github.com/ahmetb/gen-crd-api-reference-docs@latest diff --git a/main.go b/main.go index 96886a87ee..58f6505b74 100644 --- a/main.go +++ b/main.go @@ -32,6 +32,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" @@ -102,20 +103,25 @@ func main() { startCh := make(chan struct{}, 1) if *enableLeaderElection { + podName := os.Getenv("POD_NAME") hostname, err := os.Hostname() if err != nil { glog.Fatal(err) } - resourceLock, err := resourcelock.New(resourcelock.ConfigMapsLeasesResourceLock, - *leaderElectionLockNamespace, - *leaderElectionLockName, - kubeClient.CoreV1(), - kubeClient.CoordinationV1(), - resourcelock.ResourceLockConfig{ - Identity: hostname, - // TODO: This is a workaround for a nil dereference in client-go. This line can be removed when that dependency is updated. - EventRecorder: &record.FakeRecorder{}, - }) + broadcaster := record.NewBroadcaster() + source := apiv1.EventSource{Component: "spark-operator-leader-elector", Host: hostname} + recorder := broadcaster.NewRecorder(scheme.Scheme, source) + resourceLock := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Namespace: *leaderElectionLockNamespace, + Name: *leaderElectionLockName, + }, + Client: kubeClient.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: podName, + EventRecorder: recorder, + }, + } if err != nil { glog.Fatal(err) } diff --git a/pkg/batchscheduler/volcano/volcano_scheduler.go b/pkg/batchscheduler/volcano/volcano_scheduler.go index 8ef79f200a..a232784c7c 100644 --- a/pkg/batchscheduler/volcano/volcano_scheduler.go +++ b/pkg/batchscheduler/volcano/volcano_scheduler.go @@ -27,8 +27,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" - "volcano.sh/volcano/pkg/apis/scheduling/v1beta1" - volcanoclient "volcano.sh/volcano/pkg/client/clientset/versioned" + "volcano.sh/apis/pkg/apis/scheduling/v1beta1" + volcanoclient "volcano.sh/apis/pkg/client/clientset/versioned" "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" schedulerinterface "github.com/kubeflow/spark-operator/pkg/batchscheduler/interface" diff --git a/pkg/controller/scheduledsparkapplication/controller.go b/pkg/controller/scheduledsparkapplication/controller.go index 643518aa01..056ba9d562 100644 --- a/pkg/controller/scheduledsparkapplication/controller.go +++ b/pkg/controller/scheduledsparkapplication/controller.go @@ -24,7 +24,7 @@ import ( "time" "github.com/golang/glog" - "github.com/robfig/cron" + "github.com/robfig/cron/v3" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/controller/sparkapplication/sparkapp_util.go b/pkg/controller/sparkapplication/sparkapp_util.go index 97329e5e8f..3d2a58f07c 100644 --- a/pkg/controller/sparkapplication/sparkapp_util.go +++ b/pkg/controller/sparkapplication/sparkapp_util.go @@ -20,9 +20,6 @@ import ( "encoding/json" "fmt" - v1 "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" "github.com/kubeflow/spark-operator/pkg/config" apiv1 "k8s.io/api/core/v1" @@ -212,19 +209,6 @@ func driverStateToApplicationState(driverState v1beta2.DriverState) v1beta2.Appl } } -func getVolumeFSType(v v1.Volume) (policy.FSType, error) { - switch { - case v.HostPath != nil: - return policy.HostPath, nil - case v.EmptyDir != nil: - return policy.EmptyDir, nil - case v.PersistentVolumeClaim != nil: - return policy.PersistentVolumeClaim, nil - } - - return "", fmt.Errorf("unknown volume type for volume: %#v", v) -} - func printStatus(status *v1beta2.SparkApplicationStatus) (string, error) { marshalled, err := json.MarshalIndent(status, "", " ") if err != nil { diff --git a/pkg/controller/sparkapplication/submission.go b/pkg/controller/sparkapplication/submission.go index 733c1746ac..d916e7d8be 100644 --- a/pkg/controller/sparkapplication/submission.go +++ b/pkg/controller/sparkapplication/submission.go @@ -26,7 +26,6 @@ import ( "github.com/golang/glog" v1 "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -510,16 +509,16 @@ func buildLocalVolumeOptions(prefix string, volume v1.Volume, volumeMount v1.Vol var options []string switch { case volume.HostPath != nil: - options = append(options, fmt.Sprintf(VolumeMountPathTemplate, string(policy.HostPath), volume.Name, volumeMount.MountPath)) - options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, string(policy.HostPath), volume.Name, "path", volume.HostPath.Path)) + options = append(options, fmt.Sprintf(VolumeMountPathTemplate, "hostPath", volume.Name, volumeMount.MountPath)) + options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "hostPath", volume.Name, "path", volume.HostPath.Path)) if volume.HostPath.Type != nil { - options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, string(policy.HostPath), volume.Name, "type", *volume.HostPath.Type)) + options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "hostPath", volume.Name, "type", *volume.HostPath.Type)) } case volume.EmptyDir != nil: - options = append(options, fmt.Sprintf(VolumeMountPathTemplate, string(policy.EmptyDir), volume.Name, volumeMount.MountPath)) + options = append(options, fmt.Sprintf(VolumeMountPathTemplate, "emptyDir", volume.Name, volumeMount.MountPath)) case volume.PersistentVolumeClaim != nil: - options = append(options, fmt.Sprintf(VolumeMountPathTemplate, string(policy.PersistentVolumeClaim), volume.Name, volumeMount.MountPath)) - options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, string(policy.PersistentVolumeClaim), volume.Name, "claimName", volume.PersistentVolumeClaim.ClaimName)) + options = append(options, fmt.Sprintf(VolumeMountPathTemplate, "persistentVolumeClaim", volume.Name, volumeMount.MountPath)) + options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "persistentVolumeClaim", volume.Name, "claimName", volume.PersistentVolumeClaim.ClaimName)) } return options diff --git a/pkg/webhook/resourceusage/enforcer.go b/pkg/webhook/resourceusage/enforcer.go index 987895bc5a..87e9bbce06 100644 --- a/pkg/webhook/resourceusage/enforcer.go +++ b/pkg/webhook/resourceusage/enforcer.go @@ -2,9 +2,9 @@ package resourceusage import ( "fmt" + "github.com/golang/glog" so "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - "github.com/golang/glog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/informers" From 1d3e38570673c42f788541b2246003f39c7a8f73 Mon Sep 17 00:00:00 2001 From: Andrii Chubatiuk Date: Wed, 17 Apr 2024 00:07:23 +0300 Subject: [PATCH 45/87] fix: fixed serviceaccount annotations (#1972) * fixed serviceaccount annotations Signed-off-by: Andrew Chubatiuk * bump chart version Signed-off-by: Andrew Chubatiuk * docs fix Signed-off-by: Andrew Chubatiuk --------- Signed-off-by: Andrew Chubatiuk --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 2 +- charts/spark-operator-chart/templates/deployment.yaml | 10 +++++----- .../templates/spark-serviceaccount.yaml | 8 +++----- charts/spark-operator-chart/tests/deployment_test.yaml | 9 +++++++++ charts/spark-operator-chart/values.yaml | 1 + 6 files changed, 20 insertions(+), 12 deletions(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index d6688363c8..b6f4c959e5 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.7 +version: 1.2.8 appVersion: v1beta2-1.4.3-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index b166205081..9c3b266c8f 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.2.7](https://img.shields.io/badge/Version-1.2.7-informational?style=flat-square) ![AppVersion: v1beta2-1.4.3-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.3--3.5.0-informational?style=flat-square) +![Version: 1.2.8](https://img.shields.io/badge/Version-1.2.8-informational?style=flat-square) ![AppVersion: v1beta2-1.4.3-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.3--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml index 699fb61018..48128acc18 100644 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ b/charts/spark-operator-chart/templates/deployment.yaml @@ -54,15 +54,15 @@ spec: {{- toYaml .Values.securityContext | nindent 10 }} {{- if or .Values.metrics.enable .Values.webhook.enable }} ports: - {{ if .Values.metrics.enable }} + {{ if .Values.metrics.enable -}} - name: {{ .Values.metrics.portName | quote }} containerPort: {{ .Values.metrics.port }} - {{ end }} - {{ if .Values.webhook.enable }} + {{- end }} + {{ if .Values.webhook.enable -}} - name: {{ .Values.webhook.portName | quote }} containerPort: {{ .Values.webhook.port }} - {{ end }} - {{ end }} + {{- end }} + {{ end -}} args: - -v={{ .Values.logLevel }} - -logtostderr diff --git a/charts/spark-operator-chart/templates/spark-serviceaccount.yaml b/charts/spark-operator-chart/templates/spark-serviceaccount.yaml index 25c00ccfab..af8e8d7f9d 100644 --- a/charts/spark-operator-chart/templates/spark-serviceaccount.yaml +++ b/charts/spark-operator-chart/templates/spark-serviceaccount.yaml @@ -1,4 +1,4 @@ -{{- if .Values.serviceAccounts.spark.create -}} +{{- if .Values.serviceAccounts.spark.create }} {{- range $sparkJobNamespace := .Values.sparkJobNamespaces | default (list .Release.Namespace) }} --- apiVersion: v1 @@ -6,11 +6,9 @@ kind: ServiceAccount metadata: name: {{ include "spark.serviceAccountName" $ }} namespace: {{ $sparkJobNamespace }} - labels: - {{- include "spark-operator.labels" $ | nindent 4 }} {{- with $.Values.serviceAccounts.spark.annotations }} - annotations: - {{- toYaml . | nindent 4 }} + annotations: {{ toYaml . | nindent 4 }} {{- end }} + labels: {{ include "spark-operator.labels" $ | nindent 4 }} {{- end }} {{- end }} diff --git a/charts/spark-operator-chart/tests/deployment_test.yaml b/charts/spark-operator-chart/tests/deployment_test.yaml index 247266bb7e..34393bd33e 100644 --- a/charts/spark-operator-chart/tests/deployment_test.yaml +++ b/charts/spark-operator-chart/tests/deployment_test.yaml @@ -7,6 +7,15 @@ release: name: spark-operator tests: + - it: Should contain namespace arg when sparkJobNamespaces is equal to 1 + set: + sparkJobNamespaces: + - ns1 + asserts: + - contains: + path: spec.template.spec.containers[0].args + content: -namespace=ns1 + - it: Should add pod annotations if podAnnotations is set set: podAnnotations: diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index de1aee6d32..2232fd852c 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -54,6 +54,7 @@ serviceAccounts: # -- List of namespaces where to run spark jobs, operator namespace is included only when list of namespaces is empty sparkJobNamespaces: [] +# - ns1 # -- Operator concurrency, higher values might increase memory usage controllerThreads: 10 From dede19052e2c450de39292481a08605335178db3 Mon Sep 17 00:00:00 2001 From: Andrey Velichkevich Date: Wed, 17 Apr 2024 01:10:26 +0100 Subject: [PATCH 46/87] Use Kubeflow Docker Hub for Spark Operator Image (#1974) * Use Kubeflow Docker Hub for Spark Operator Image Signed-off-by: Andrey Velichkevich * Bump Helm Chart version Signed-off-by: Andrey Velichkevich --------- Signed-off-by: Andrey Velichkevich --- .github/workflows/main.yaml | 14 +++++++------- .github/workflows/release.yaml | 12 ++++++------ charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 4 ++-- charts/spark-operator-chart/values.yaml | 2 +- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 30f8435ce6..bb24682bd8 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -16,7 +16,7 @@ jobs: - name: Checkout source code uses: actions/checkout@v4 with: - fetch-depth: '0' + fetch-depth: "0" - name: The API should not change once published run: | @@ -79,7 +79,7 @@ jobs: - name: Build Spark-Operator Docker Image run: | - docker build -t ghcr.io/kubeflow/spark-operator:latest . + docker build -t docker.io/kubeflow/spark-operator:latest . - name: Check changes in resources used in docker file run: | @@ -89,7 +89,7 @@ jobs: if ! git diff --quiet origin/master -- $resource; then ## And the appVersion hasn't been updated if ! git diff origin/master -- charts/spark-operator-chart/Chart.yaml | grep +appVersion; then - echo "resource used in ghcr.io/kubeflow/spark-operator has changed in $resource, need to update the appVersion in charts/spark-operator-chart/Chart.yaml" + echo "resource used in docker.io/kubeflow/spark-operator has changed in $resource, need to update the appVersion in charts/spark-operator-chart/Chart.yaml" git diff origin/master -- $resource; echo "failing the build... " && false fi @@ -147,8 +147,8 @@ jobs: - name: Run chart-testing (install) run: | - docker build -t ghcr.io/kubeflow/spark-operator:local . - minikube image load ghcr.io/kubeflow/spark-operator:local + docker build -t docker.io/kubeflow/spark-operator:local . + minikube image load docker.io/kubeflow/spark-operator:local ct install integration-test: @@ -174,8 +174,8 @@ jobs: - name: Build local spark-operator docker image for minikube testing run: | - docker build -t ghcr.io/kubeflow/spark-operator:local . - minikube image load ghcr.io/kubeflow/spark-operator:local + docker build -t docker.io/kubeflow/spark-operator:local . + minikube image load docker.io/kubeflow/spark-operator:local # The integration tests are currently broken see: https://github.com/kubeflow/spark-operator/issues/1416 # - name: Run chart-testing (integration test) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index bbc8dabd0b..196ebfdb55 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -5,7 +5,7 @@ on: branches: - master env: - REGISTRY_IMAGE: ghcr.io/kubeflow/spark-operator + REGISTRY_IMAGE: docker.io/kubeflow/spark-operator jobs: build-skip-check: @@ -67,10 +67,10 @@ jobs: - name: Login to Packages Container registry uses: docker/login-action@v3 with: - registry: ghcr.io + registry: docker.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Build and Push Spark-Operator Docker Image to github container registry + - name: Build and Push Spark-Operator Docker Image to Docker Hub id: build uses: docker/build-push-action@v5 with: @@ -116,9 +116,9 @@ jobs: - name: Login to Docker Hub uses: docker/login-action@v3 with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + registry: docker.io + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Create manifest list and push working-directory: /tmp/digests run: | diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index b6f4c959e5..c4a2b7fe1b 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.8 +version: 1.2.9 appVersion: v1beta2-1.4.3-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 9c3b266c8f..f8009ecae6 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.2.8](https://img.shields.io/badge/Version-1.2.8-informational?style=flat-square) ![AppVersion: v1beta2-1.4.3-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.3--3.5.0-informational?style=flat-square) +![Version: 1.2.9](https://img.shields.io/badge/Version-1.2.9-informational?style=flat-square) ![AppVersion: v1beta2-1.4.3-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.3--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator @@ -86,7 +86,7 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum | envFrom | list | `[]` | Pod environment variable sources | | fullnameOverride | string | `""` | String to override release name | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repository | string | `"ghcr.io/kubeflow/spark-operator"` | Image repository | +| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository | | image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. | | imagePullSecrets | list | `[]` | Image pull secrets | | ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. | diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index 2232fd852c..3ea5eb8684 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -11,7 +11,7 @@ replicaCount: 1 image: # -- Image repository - repository: ghcr.io/kubeflow/spark-operator + repository: docker.io/kubeflow/spark-operator # -- Image pull policy pullPolicy: IfNotPresent # -- if set, override the image tag whose default is the chart appVersion. From c01e91da63662a18eaa81871b83fb27ada20dea2 Mon Sep 17 00:00:00 2001 From: Aran Shavit Date: Wed, 17 Apr 2024 22:49:27 +0300 Subject: [PATCH 47/87] fix spark-rbac (#1986) * remove non-existent field Signed-off-by: Aran Shavit * bump version Signed-off-by: Aran Shavit * README Signed-off-by: Aran Shavit --------- Signed-off-by: Aran Shavit --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 2 +- charts/spark-operator-chart/templates/spark-rbac.yaml | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index c4a2b7fe1b..4c36a89b6b 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.9 +version: 1.2.10 appVersion: v1beta2-1.4.3-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index f8009ecae6..67ad7be816 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.2.9](https://img.shields.io/badge/Version-1.2.9-informational?style=flat-square) ![AppVersion: v1beta2-1.4.3-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.3--3.5.0-informational?style=flat-square) +![Version: 1.2.10](https://img.shields.io/badge/Version-1.2.10-informational?style=flat-square) ![AppVersion: v1beta2-1.4.3-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.3--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator diff --git a/charts/spark-operator-chart/templates/spark-rbac.yaml b/charts/spark-operator-chart/templates/spark-rbac.yaml index 2b645538b0..429992541b 100644 --- a/charts/spark-operator-chart/templates/spark-rbac.yaml +++ b/charts/spark-operator-chart/templates/spark-rbac.yaml @@ -49,7 +49,6 @@ subjects: roleRef: kind: Role name: spark-role - namespace: {{ $jobNamespace }} apiGroup: rbac.authorization.k8s.io {{- end }} {{- end }} From 5e9c0b40133d61e5ba254a68039fa90bbe28a6f3 Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Thu, 18 Apr 2024 05:07:30 +0800 Subject: [PATCH 48/87] Update spark operator permissions for CRD (#1973) Signed-off-by: Yi Chen --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 2 +- charts/spark-operator-chart/templates/rbac.yaml | 3 --- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 4c36a89b6b..63a0c11b1c 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.10 +version: 1.2.11 appVersion: v1beta2-1.4.3-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 67ad7be816..ef56687415 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.2.10](https://img.shields.io/badge/Version-1.2.10-informational?style=flat-square) ![AppVersion: v1beta2-1.4.3-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.3--3.5.0-informational?style=flat-square) +![Version: 1.2.11](https://img.shields.io/badge/Version-1.2.11-informational?style=flat-square) ![AppVersion: v1beta2-1.4.3-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.3--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator diff --git a/charts/spark-operator-chart/templates/rbac.yaml b/charts/spark-operator-chart/templates/rbac.yaml index 0f3ad19525..3e9b227137 100644 --- a/charts/spark-operator-chart/templates/rbac.yaml +++ b/charts/spark-operator-chart/templates/rbac.yaml @@ -68,10 +68,7 @@ rules: resources: - customresourcedefinitions verbs: - - create - get - - update - - delete - apiGroups: - admissionregistration.k8s.io resources: From f6c9d64cd63be13a3c0c93edb577c764be94b126 Mon Sep 17 00:00:00 2001 From: Duc Nguyen Date: Fri, 19 Apr 2024 11:17:47 +0700 Subject: [PATCH 49/87] chore: Add Timo to user list (#1615) --- docs/who-is-using.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/who-is-using.md b/docs/who-is-using.md index ca953f8135..18a0caa12c 100644 --- a/docs/who-is-using.md +++ b/docs/who-is-using.md @@ -39,6 +39,7 @@ | [DiDi](https://www.didiglobal.com) | @Run-Lin | Evaluation | Data Infrastructure | | [DeepCure](https://www.deepcure.ai) | @mschroering | Production | Spark / ML | | [C2FO](https://www.c2fo.com/) | @vanhoale | Production | Data Platform / Data Infrastructure | +| [Timo](https://timo.vn) | @vanducng | Production | Data Platform | | [Kognita](https://kognita.com.br/) | @andreclaudino | Production | MLOps, Data Platform / Data Infrastructure, ML/AI | | [Molex](https://www.molex.com/) | @AshishPushpSingh | Evaluation/Production | Data Platform | | [Qualytics](https://www.qualytics.co/) | @josecsotomorales | Production | Data Quality Platform | From 4d4f1c5f8c3aed60efc7815d2627b83ab59b09c6 Mon Sep 17 00:00:00 2001 From: Vara Bonthu Date: Thu, 18 Apr 2024 21:31:47 -0700 Subject: [PATCH 50/87] fix: Update Github workflow to publish Helm charts on chart changes, irrespective of image change (#1992) * Update workflow to publish Helm charts on chart changes, irrespective of image updates Signed-off-by: Vara Bonthu * fixed the chart name with prefix Signed-off-by: Vara Bonthu --------- Signed-off-by: Vara Bonthu --- .github/workflows/release.yaml | 53 ++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 16 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 196ebfdb55..a5c5dbfa1f 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -11,8 +11,10 @@ jobs: build-skip-check: runs-on: ubuntu-latest outputs: - skip: ${{ steps.skip-check.outputs.skip }} - version: ${{ steps.skip-check.outputs.VERSION_TAG }} + image_changed: ${{ steps.skip-check.outputs.image_changed }} + chart_changed: ${{ steps.skip-check.outputs.chart_changed }} + app_version_tag: ${{ steps.skip-check.outputs.app_version_tag }} + chart_version_tag: ${{ steps.skip-check.outputs.chart_version_tag }} steps: - name: Checkout source code uses: actions/checkout@v4 @@ -21,22 +23,36 @@ jobs: - name: Check if build should be skipped id: skip-check run: | - VERSION_TAG=$(cat charts/spark-operator-chart/Chart.yaml | grep "appVersion: .*" | cut -c13-) - if git rev-parse -q --verify "refs/tags/$VERSION_TAG"; then - echo "Spark-Operator Docker Image Tag $VERSION_TAG already exists!" - echo "skip=true" >> "$GITHUB_OUTPUT" - else - git tag $VERSION_TAG - git push origin $VERSION_TAG - echo "Spark-Operator Docker Image new tag: $VERSION_TAG released" - echo "skip=false" >> "$GITHUB_OUTPUT" + app_version_tag=$(cat charts/spark-operator-chart/Chart.yaml | grep "appVersion: .*" | cut -c13-) + chart_version_tag=$(cat charts/spark-operator-chart/Chart.yaml | grep "version: .*" | cut -c10-) + + # Initialize flags + image_changed=false + chart_changed=false + + if ! git rev-parse -q --verify "refs/tags/$app_version_tag"; then + image_changed=true + git tag $app_version_tag + git push origin $app_version_tag + echo "Spark-Operator Docker Image new tag: $app_version_tag released" fi - echo "VERSION_TAG=${VERSION_TAG}" >> "$GITHUB_OUTPUT" + + if ! git rev-parse -q --verify "refs/tags/spark-operator-chart-$chart_version_tag"; then + chart_changed=true + git tag spark-operator-chart-$chart_version_tag + git push origin spark-operator-chart-$chart_version_tag + echo "Spark-Operator Helm Chart new tag: spark-operator-chart-$chart_version_tag released" + fi + + echo "image_changed=${image_changed}" >> "$GITHUB_OUTPUT" + echo "chart_changed=${chart_changed}" >> "$GITHUB_OUTPUT" + echo "app_version_tag=${app_version_tag}" >> "$GITHUB_OUTPUT" + echo "chart_version_tag=${chart_version_tag}" >> "$GITHUB_OUTPUT" release: runs-on: ubuntu-latest needs: - build-skip-check - if: needs.build-skip-check.outputs.skip == 'false' + if: needs.build-skip-check.outputs.image_changed == 'true' strategy: fail-fast: false matrix: @@ -97,7 +113,7 @@ jobs: needs: - release - build-skip-check - if: needs.build-skip-check.outputs.skip == 'false' + if: needs.build-skip-check.outputs.image_changed == 'true' steps: - name: Download digests uses: actions/download-artifact@v4 @@ -112,7 +128,7 @@ jobs: uses: docker/metadata-action@v5 with: images: ${{ env.REGISTRY_IMAGE }} - tags: ${{ needs.build-skip-check.outputs.version }} + tags: ${{ needs.build-skip-check.outputs.app_version_tag }} - name: Login to Docker Hub uses: docker/login-action@v3 with: @@ -129,14 +145,19 @@ jobs: docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} publish-chart: runs-on: ubuntu-latest - if: needs.publish-image.result == 'success' || needs.publish-image.result == 'skipped' + if: needs.build-skip-check.outputs.chart_changed == 'true' needs: - publish-image + - build-skip-check steps: - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 + - name: Install Helm + uses: azure/setup-helm@v4 + with: + version: v3.14.3 - name: Configure Git run: | git config user.name "$GITHUB_ACTOR" From 4f26fe7b58c200cb20b993b289f4b251e0bec189 Mon Sep 17 00:00:00 2001 From: Vara Bonthu Date: Fri, 19 Apr 2024 00:06:47 -0700 Subject: [PATCH 51/87] fix: Removed `publish-image` dependency on publishing the helm chart (#1995) Signed-off-by: Vara Bonthu --- .github/workflows/release.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index a5c5dbfa1f..04e2e39c09 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -147,7 +147,6 @@ jobs: runs-on: ubuntu-latest if: needs.build-skip-check.outputs.chart_changed == 'true' needs: - - publish-image - build-skip-check steps: - name: Checkout From ccb3ceb54b6f09de8c67d96484fc911d122dcee3 Mon Sep 17 00:00:00 2001 From: Jacob Salway Date: Sat, 20 Apr 2024 08:10:20 +1000 Subject: [PATCH 52/87] Add emptyDir sizeLimit support for local dirs (#1993) * Add emptyDir sizeLimit support Signed-off-by: Jacob Salway * Bump appVersion and add sizeLimit example Signed-off-by: Jacob Salway --------- Signed-off-by: Jacob Salway --- charts/spark-operator-chart/Chart.yaml | 4 +- charts/spark-operator-chart/README.md | 2 +- docs/user-guide.md | 3 +- pkg/controller/sparkapplication/submission.go | 3 + .../sparkapplication/submission_test.go | 56 +++++++++++++++++++ 5 files changed, 64 insertions(+), 4 deletions(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 63a0c11b1c..fec08b3ae5 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.11 -appVersion: v1beta2-1.4.3-3.5.0 +version: 1.2.12 +appVersion: v1beta2-1.4.4-3.5.0 keywords: - spark home: https://github.com/kubeflow/spark-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index ef56687415..4999ae0024 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.2.11](https://img.shields.io/badge/Version-1.2.11-informational?style=flat-square) ![AppVersion: v1beta2-1.4.3-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.3--3.5.0-informational?style=flat-square) +![Version: 1.2.12](https://img.shields.io/badge/Version-1.2.12-informational?style=flat-square) ![AppVersion: v1beta2-1.4.4-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.4--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator diff --git a/docs/user-guide.md b/docs/user-guide.md index 9d7221b77e..60354843bd 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -360,7 +360,8 @@ spec: persistentVolumeClaim: claimName: my-pvc - name: spark-work - emptyDir: {} + emptyDir: + sizeLimit: 5Gi driver: volumeMounts: - name: spark-work diff --git a/pkg/controller/sparkapplication/submission.go b/pkg/controller/sparkapplication/submission.go index d916e7d8be..a935267ca1 100644 --- a/pkg/controller/sparkapplication/submission.go +++ b/pkg/controller/sparkapplication/submission.go @@ -516,6 +516,9 @@ func buildLocalVolumeOptions(prefix string, volume v1.Volume, volumeMount v1.Vol } case volume.EmptyDir != nil: options = append(options, fmt.Sprintf(VolumeMountPathTemplate, "emptyDir", volume.Name, volumeMount.MountPath)) + if volume.EmptyDir.SizeLimit != nil { + options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "emptyDir", volume.Name, "sizeLimit", volume.EmptyDir.SizeLimit.String())) + } case volume.PersistentVolumeClaim != nil: options = append(options, fmt.Sprintf(VolumeMountPathTemplate, "persistentVolumeClaim", volume.Name, volumeMount.MountPath)) options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "persistentVolumeClaim", volume.Name, "claimName", volume.PersistentVolumeClaim.ClaimName)) diff --git a/pkg/controller/sparkapplication/submission_test.go b/pkg/controller/sparkapplication/submission_test.go index 3c34950c64..5c2c08123b 100644 --- a/pkg/controller/sparkapplication/submission_test.go +++ b/pkg/controller/sparkapplication/submission_test.go @@ -29,6 +29,7 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" @@ -366,6 +367,61 @@ func TestAddLocalDir_Driver_Executor(t *testing.T) { assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[3]) } +func TestAddEmptyDir_Driver_Executor_WithSizeLimit(t *testing.T) { + sizeLimit := resource.MustParse("5Gi") + volumes := []corev1.Volume{ + { + Name: "spark-local-dir-1", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + SizeLimit: &sizeLimit, + }, + }, + }, + } + + volumeMounts := []corev1.VolumeMount{ + { + Name: "spark-local-dir-1", + MountPath: "/tmp/mnt-1", + }, + } + + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "spark-test", + UID: "spark-test-1", + }, + Spec: v1beta2.SparkApplicationSpec{ + Volumes: volumes, + Driver: v1beta2.DriverSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ + VolumeMounts: volumeMounts, + }, + }, + Executor: v1beta2.ExecutorSpec{ + SparkPodSpec: v1beta2.SparkPodSpec{ + VolumeMounts: volumeMounts, + }, + }, + }, + } + + localDirOptions, err := addLocalDirConfOptions(app) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, 0, len(app.Spec.Volumes)) + assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) + assert.Equal(t, 0, len(app.Spec.Executor.VolumeMounts)) + assert.Equal(t, 4, len(localDirOptions)) + assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "emptyDir", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) + assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "emptyDir", volumes[0].Name, "sizeLimit", volumes[0].EmptyDir.SizeLimit.String()), localDirOptions[1]) + assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "emptyDir", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[2]) + assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "emptyDir", volumes[0].Name, "sizeLimit", volumes[0].EmptyDir.SizeLimit.String()), localDirOptions[3]) +} + func TestPopulateLabels_Driver_Executor(t *testing.T) { const ( AppLabelKey = "app-label-key" From bf2eccefefa08381616d8df22c958b938fd3e78a Mon Sep 17 00:00:00 2001 From: Andrey Velichkevich Date: Wed, 24 Apr 2024 15:49:19 +0100 Subject: [PATCH 53/87] Fix Docker Hub Credentials in CI (#2003) Signed-off-by: Andrey Velichkevich --- .github/workflows/release.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 04e2e39c09..417ffb267b 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -25,11 +25,11 @@ jobs: run: | app_version_tag=$(cat charts/spark-operator-chart/Chart.yaml | grep "appVersion: .*" | cut -c13-) chart_version_tag=$(cat charts/spark-operator-chart/Chart.yaml | grep "version: .*" | cut -c10-) - + # Initialize flags image_changed=false chart_changed=false - + if ! git rev-parse -q --verify "refs/tags/$app_version_tag"; then image_changed=true git tag $app_version_tag @@ -84,8 +84,8 @@ jobs: uses: docker/login-action@v3 with: registry: docker.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and Push Spark-Operator Docker Image to Docker Hub id: build uses: docker/build-push-action@v5 From 8fc405822d78119b66613612c105b82dfb592c71 Mon Sep 17 00:00:00 2001 From: t3mi Date: Wed, 24 Apr 2024 19:41:19 +0300 Subject: [PATCH 54/87] fix(chart): remove operator namespace default for job namespaces value (#1989) Signed-off-by: t3mi --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 4 ++-- charts/spark-operator-chart/templates/deployment.yaml | 4 ++-- charts/spark-operator-chart/templates/spark-rbac.yaml | 2 +- charts/spark-operator-chart/values.yaml | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index fec08b3ae5..b26c28afbd 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.12 +version: 1.2.13 appVersion: v1beta2-1.4.4-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 4999ae0024..1c8b8d2020 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.2.12](https://img.shields.io/badge/Version-1.2.12-informational?style=flat-square) ![AppVersion: v1beta2-1.4.4-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.4--3.5.0-informational?style=flat-square) +![Version: 1.2.13](https://img.shields.io/badge/Version-1.2.13-informational?style=flat-square) ![AppVersion: v1beta2-1.4.4-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.4--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator @@ -126,7 +126,7 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum | serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | | serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | | sidecars | list | `[]` | Sidecar containers | -| sparkJobNamespaces | list | `[]` | List of namespaces where to run spark jobs, operator namespace is included only when list of namespaces is empty | +| sparkJobNamespaces | list | `[]` | List of namespaces where to run spark jobs | | tolerations | list | `[]` | List of node taints to tolerate | | uiService.enable | bool | `true` | Enable UI service creation for Spark application | | volumeMounts | list | `[]` | | diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml index 48128acc18..4f1f552e59 100644 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ b/charts/spark-operator-chart/templates/deployment.yaml @@ -3,7 +3,7 @@ # In the post-install hook, the token corresponding to the operator service account # is used to authenticate with the Kubernetes API server to install the secret bundle. -{{- $jobNamespaces := .Values.sparkJobNamespaces | default (list .Release.Namespace) }} +{{- $jobNamespaces := .Values.sparkJobNamespaces | default list }} --- apiVersion: apps/v1 kind: Deployment @@ -66,7 +66,7 @@ spec: args: - -v={{ .Values.logLevel }} - -logtostderr - {{- if le (len $jobNamespaces) 1 }} + {{- if eq (len $jobNamespaces) 1 }} - -namespace={{ index $jobNamespaces 0 }} {{- end }} - -enable-ui-service={{ .Values.uiService.enable}} diff --git a/charts/spark-operator-chart/templates/spark-rbac.yaml b/charts/spark-operator-chart/templates/spark-rbac.yaml index 429992541b..acdeaa5cb5 100644 --- a/charts/spark-operator-chart/templates/spark-rbac.yaml +++ b/charts/spark-operator-chart/templates/spark-rbac.yaml @@ -1,5 +1,5 @@ {{- if or .Values.rbac.create .Values.rbac.createRole }} -{{- $jobNamespaces := .Values.sparkJobNamespaces | default (list .Release.Namespace) }} +{{- $jobNamespaces := .Values.sparkJobNamespaces | default list }} {{- range $jobNamespace := $jobNamespaces }} --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index 3ea5eb8684..175a44cf89 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -52,7 +52,7 @@ serviceAccounts: # -- Optional annotations for the operator service account annotations: {} -# -- List of namespaces where to run spark jobs, operator namespace is included only when list of namespaces is empty +# -- List of namespaces where to run spark jobs sparkJobNamespaces: [] # - ns1 From 333ee0bcb220862ec76df607e091d72c165c2d13 Mon Sep 17 00:00:00 2001 From: Cian Gallagher Date: Fri, 26 Apr 2024 01:44:46 +0100 Subject: [PATCH 55/87] feat: add support for service labels on driver-svc (#1985) * feat: add support for service labels on driver-svc Signed-off-by: Cian Gallagher * docs: update helm docs Signed-off-by: Cian Gallagher * fix: undo changes to api-docs Signed-off-by: Cian Gallagher * docs: update api-docs Signed-off-by: Cian Gallagher * fix: update appVersion Signed-off-by: Cian Gallagher * ci: remove step to check api change Signed-off-by: Cian Gallagher * docs: update helm-docs Signed-off-by: Cian Gallagher * docs: update helm-docs Signed-off-by: Cian Gallagher * fix: update app version Signed-off-by: Cian Gallagher * docs: update helm docs Signed-off-by: Cian Gallagher --------- Signed-off-by: Cian Gallagher --- .github/workflows/main.yaml | 11 ---------- charts/spark-operator-chart/Chart.yaml | 4 ++-- charts/spark-operator-chart/README.md | 2 +- ...tor.k8s.io_scheduledsparkapplications.yaml | 4 ++++ ...parkoperator.k8s.io_sparkapplications.yaml | 4 ++++ docs/api-docs.md | 13 ++++++++++++ ...tor.k8s.io_scheduledsparkapplications.yaml | 4 ++++ ...parkoperator.k8s.io_sparkapplications.yaml | 4 ++++ .../sparkoperator.k8s.io/v1beta2/types.go | 6 +++++- pkg/config/constants.go | 2 ++ pkg/controller/sparkapplication/submission.go | 12 +++++++---- .../sparkapplication/submission_test.go | 21 +++++++++++-------- test.sh | 15 +++++++++++++ 13 files changed, 74 insertions(+), 28 deletions(-) create mode 100755 test.sh diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index bb24682bd8..8212230131 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -18,17 +18,6 @@ jobs: with: fetch-depth: "0" - - name: The API should not change once published - run: | - if ! git diff --quiet origin/master -- pkg/apis/sparkoperator.k8s.io/v1beta1; then - echo "sparkoperator.k8s.io/v1beta1 api has changed" - false - fi - if ! git diff --quiet origin/master -- pkg/apis/sparkoperator.k8s.io/v1beta2; then - echo "sparkoperator.k8s.io/v1beta2 api has changed" - false - fi - - name: The API documentation hasn't changed run: | make build-api-docs diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index b26c28afbd..87e814fe36 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.13 -appVersion: v1beta2-1.4.4-3.5.0 +version: 1.2.14 +appVersion: v1beta2-1.4.5-3.5.0 keywords: - spark home: https://github.com/kubeflow/spark-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 1c8b8d2020..4fb0e064bb 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.2.13](https://img.shields.io/badge/Version-1.2.13-informational?style=flat-square) ![AppVersion: v1beta2-1.4.4-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.4--3.5.0-informational?style=flat-square) +![Version: 1.2.14](https://img.shields.io/badge/Version-1.2.14-informational?style=flat-square) ![AppVersion: v1beta2-1.4.5-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.5--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 47c7113c5c..78ffc98a42 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -1336,6 +1336,10 @@ spec: additionalProperties: type: string type: object + serviceLabels: + additionalProperties: + type: string + type: object shareProcessNamespace: type: boolean sidecars: diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml index daadc2c48f..a77b8f0e4a 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -1322,6 +1322,10 @@ spec: additionalProperties: type: string type: object + serviceLabels: + additionalProperties: + type: string + type: object shareProcessNamespace: type: boolean sidecars: diff --git a/docs/api-docs.md b/docs/api-docs.md index fc9b73195c..4deb0e76f0 100644 --- a/docs/api-docs.md +++ b/docs/api-docs.md @@ -1105,6 +1105,19 @@ executors to connect to the driver.

+serviceLabels
+ +map[string]string + + + +(Optional) +

ServiceLabels defines the labels to be added to the Kubernetes headless service used by +executors to connect to the driver.

+ + + + ports
diff --git a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 47c7113c5c..78ffc98a42 100644 --- a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -1336,6 +1336,10 @@ spec: additionalProperties: type: string type: object + serviceLabels: + additionalProperties: + type: string + type: object shareProcessNamespace: type: boolean sidecars: diff --git a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml index daadc2c48f..a77b8f0e4a 100644 --- a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -1322,6 +1322,10 @@ spec: additionalProperties: type: string type: object + serviceLabels: + additionalProperties: + type: string + type: object shareProcessNamespace: type: boolean sidecars: diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go index 616c2fc681..4224de4a6c 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go @@ -563,6 +563,10 @@ type DriverSpec struct { // executors to connect to the driver. // +optional ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + // ServiceLabels defines the labels to be added to the Kubernetes headless service used by + // executors to connect to the driver. + // +optional + ServiceLabels map[string]string `json:"serviceLabels,omitempty"` // Ports settings for the pods, following the Kubernetes specifications. // +optional Ports []Port `json:"ports,omitempty"` @@ -659,7 +663,7 @@ type MonitoringSpec struct { // If not specified, the content in spark-docker/conf/metrics.properties will be used. MetricsProperties *string `json:"metricsProperties,omitempty"` // MetricsPropertiesFile is the container local path of file metrics.properties for configuring - //the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used. + // the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used. // +optional MetricsPropertiesFile *string `json:"metricsPropertiesFile,omitempty"` // Prometheus is for configuring the Prometheus JMX exporter. diff --git a/pkg/config/constants.go b/pkg/config/constants.go index 20439b2723..b65f3a38c0 100644 --- a/pkg/config/constants.go +++ b/pkg/config/constants.go @@ -150,6 +150,8 @@ const ( SparkDriverKubernetesMaster = "spark.kubernetes.driver.master" // SparkDriverServiceAnnotationKeyPrefix is the key prefix of annotations to be added to the driver service. SparkDriverServiceAnnotationKeyPrefix = "spark.kubernetes.driver.service.annotation." + // SparkDriverServiceLabelKeyPrefix is the key prefix of annotations to be added to the driver service. + SparkDriverServiceLabelKeyPrefix = "spark.kubernetes.driver.service.label." // SparkDynamicAllocationEnabled is the Spark configuration key for specifying if dynamic // allocation is enabled or not. SparkDynamicAllocationEnabled = "spark.dynamicAllocation.enabled" diff --git a/pkg/controller/sparkapplication/submission.go b/pkg/controller/sparkapplication/submission.go index a935267ca1..2f3fe1dd7b 100644 --- a/pkg/controller/sparkapplication/submission.go +++ b/pkg/controller/sparkapplication/submission.go @@ -26,7 +26,6 @@ import ( "github.com/golang/glog" v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" @@ -59,7 +58,7 @@ func runSparkSubmit(submission *submission) (bool, error) { if !present { glog.Error("SPARK_HOME is not specified") } - var command = filepath.Join(sparkHome, "/bin/spark-submit") + command := filepath.Join(sparkHome, "/bin/spark-submit") cmd := execCommand(command, submission.args...) glog.V(2).Infof("spark-submit arguments: %v", cmd.Args) @@ -301,7 +300,7 @@ func addDriverConfOptions(app *v1beta2.SparkApplication, submissionID string) ([ fmt.Sprintf("%s=%s", config.SparkDriverKubernetesMaster, *app.Spec.Driver.KubernetesMaster)) } - //Populate SparkApplication Labels to Driver + // Populate SparkApplication Labels to Driver driverLabels := make(map[string]string) for key, value := range app.Labels { driverLabels[key] = value @@ -330,6 +329,11 @@ func addDriverConfOptions(app *v1beta2.SparkApplication, submissionID string) ([ fmt.Sprintf("%s%s=%s", config.SparkDriverServiceAnnotationKeyPrefix, key, value)) } + for key, value := range app.Spec.Driver.ServiceLabels { + driverConfOptions = append(driverConfOptions, + fmt.Sprintf("%s%s=%s", config.SparkDriverServiceLabelKeyPrefix, key, value)) + } + driverConfOptions = append(driverConfOptions, config.GetDriverSecretConfOptions(app)...) driverConfOptions = append(driverConfOptions, config.GetDriverEnvVarConfOptions(app)...) @@ -388,7 +392,7 @@ func addExecutorConfOptions(app *v1beta2.SparkApplication, submissionID string) fmt.Sprintf("%s=%t", config.SparkExecutorDeleteOnTermination, *app.Spec.Executor.DeleteOnTermination)) } - //Populate SparkApplication Labels to Executors + // Populate SparkApplication Labels to Executors executorLabels := make(map[string]string) for key, value := range app.Labels { executorLabels[key] = value diff --git a/pkg/controller/sparkapplication/submission_test.go b/pkg/controller/sparkapplication/submission_test.go index 5c2c08123b..16c6a17161 100644 --- a/pkg/controller/sparkapplication/submission_test.go +++ b/pkg/controller/sparkapplication/submission_test.go @@ -25,9 +25,7 @@ import ( "testing" "github.com/google/uuid" - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -41,6 +39,7 @@ const ( VolumeMountOptionPathTemplate = "spark.kubernetes.%s.volumes.%s.%s.options.%s=%s" SparkDriverLabelAnnotationTemplate = "spark.kubernetes.driver.label.sparkoperator.k8s.io/%s=%s" SparkDriverLabelTemplate = "spark.kubernetes.driver.label.%s=%s" + SparkDriverServiceLabelTemplate = "spark.kubernetes.driver.service.label.%s=%s" SparkExecutorLabelAnnotationTemplate = "spark.kubernetes.executor.label.sparkoperator.k8s.io/%s=%s" SparkExecutorLabelTemplate = "spark.kubernetes.executor.label.%s=%s" ) @@ -424,12 +423,14 @@ func TestAddEmptyDir_Driver_Executor_WithSizeLimit(t *testing.T) { func TestPopulateLabels_Driver_Executor(t *testing.T) { const ( - AppLabelKey = "app-label-key" - AppLabelValue = "app-label-value" - DriverLabelKey = "driver-label-key" - DriverLabelValue = "driver-label-key" - ExecutorLabelKey = "executor-label-key" - ExecutorLabelValue = "executor-label-key" + AppLabelKey = "app-label-key" + AppLabelValue = "app-label-value" + DriverLabelKey = "driver-label-key" + DriverLabelValue = "driver-label-key" + DriverServiceLabelKey = "driver-svc-label-key" + DriverServiceLabelValue = "driver-svc-label-value" + ExecutorLabelKey = "executor-label-key" + ExecutorLabelValue = "executor-label-key" ) app := &v1beta2.SparkApplication{ @@ -440,6 +441,7 @@ func TestPopulateLabels_Driver_Executor(t *testing.T) { }, Spec: v1beta2.SparkApplicationSpec{ Driver: v1beta2.DriverSpec{ + ServiceLabels: map[string]string{DriverServiceLabelKey: DriverServiceLabelValue}, SparkPodSpec: v1beta2.SparkPodSpec{ Labels: map[string]string{DriverLabelKey: DriverLabelValue}, }, @@ -457,7 +459,7 @@ func TestPopulateLabels_Driver_Executor(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 5, len(driverOptions)) + assert.Equal(t, 6, len(driverOptions)) sort.Strings(driverOptions) expectedDriverLabels := []string{ fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), @@ -465,6 +467,7 @@ func TestPopulateLabels_Driver_Executor(t *testing.T) { fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "submission-id", submissionID), fmt.Sprintf(SparkDriverLabelTemplate, AppLabelKey, AppLabelValue), fmt.Sprintf(SparkDriverLabelTemplate, DriverLabelKey, DriverLabelValue), + fmt.Sprintf(SparkDriverServiceLabelTemplate, DriverServiceLabelKey, DriverServiceLabelValue), } sort.Strings(expectedDriverLabels) diff --git a/test.sh b/test.sh new file mode 100755 index 0000000000..87ffb6e000 --- /dev/null +++ b/test.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +DOCKERFILE_RESOURCES=$(cat Dockerfile | grep -o "COPY [a-zA-Z0-9].*? " | cut -c6-) + +for resource in $DOCKERFILE_RESOURCES; do + # If the resource is different + if ! git diff --quiet origin/master -- $resource; then + ## And the appVersion hasn't been updated + if ! git diff origin/master -- charts/spark-operator-chart/Chart.yaml | grep +appVersion; then + echo "resource used in docker.io/kubeflow/spark-operator has changed in $resource, need to update the appVersion in charts/spark-operator-chart/Chart.yaml" + git diff origin/master -- $resource; + echo "failing the build... " && false + fi + fi +done From d87c6855ffb1a2b4e38804dd14ba328a9db3ce7c Mon Sep 17 00:00:00 2001 From: Peter McClonski Date: Tue, 7 May 2024 11:10:43 -0400 Subject: [PATCH 56/87] Fix examples (#2010) Signed-off-by: Peter Jablonski --- Makefile | 2 +- charts/spark-operator-chart/Chart.yaml | 4 +-- charts/spark-operator-chart/README.md | 4 +-- .../templates/spark-rbac.yaml | 15 ---------- .../tests/spark-rbac_test.yaml | 28 ++++++++++++++++--- charts/spark-operator-chart/values.yaml | 3 +- examples/spark-pi-configmap.yaml | 12 ++++---- examples/spark-pi-custom-resource.yaml | 12 ++++---- examples/spark-pi-prometheus.yaml | 2 +- examples/spark-pi-schedule.yaml | 12 ++++---- examples/spark-pi.yaml | 12 ++++---- examples/spark-py-pi.yaml | 10 +++---- 12 files changed, 61 insertions(+), 55 deletions(-) diff --git a/Makefile b/Makefile index 3e3b3f0af5..d8947d5634 100644 --- a/Makefile +++ b/Makefile @@ -48,7 +48,7 @@ helm-unittest: helm unittest charts/spark-operator-chart --strict helm-lint: - docker run --rm --workdir /workspace --volume $(PWD):/workspace quay.io/helmpack/chart-testing:latest ct lint + docker run --rm --workdir /workspace --volume "$$(pwd):/workspace" quay.io/helmpack/chart-testing:latest ct lint helm-docs: docker run --rm --volume "$$(pwd):/helm-docs" -u "$(id -u)" jnorwood/helm-docs:latest diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 87e814fe36..8917650412 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.14 -appVersion: v1beta2-1.4.5-3.5.0 +version: 1.2.15 +appVersion: v1beta2-1.4.6-3.5.0 keywords: - spark home: https://github.com/kubeflow/spark-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 4fb0e064bb..a8f41dc76c 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.2.14](https://img.shields.io/badge/Version-1.2.14-informational?style=flat-square) ![AppVersion: v1beta2-1.4.5-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.5--3.5.0-informational?style=flat-square) +![Version: 1.2.15](https://img.shields.io/badge/Version-1.2.15-informational?style=flat-square) ![AppVersion: v1beta2-1.4.6-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.6--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator @@ -126,7 +126,7 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum | serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | | serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | | sidecars | list | `[]` | Sidecar containers | -| sparkJobNamespaces | list | `[]` | List of namespaces where to run spark jobs | +| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs | | tolerations | list | `[]` | List of node taints to tolerate | | uiService.enable | bool | `true` | Enable UI service creation for Spark application | | volumeMounts | list | `[]` | | diff --git a/charts/spark-operator-chart/templates/spark-rbac.yaml b/charts/spark-operator-chart/templates/spark-rbac.yaml index acdeaa5cb5..bbf9da6201 100644 --- a/charts/spark-operator-chart/templates/spark-rbac.yaml +++ b/charts/spark-operator-chart/templates/spark-rbac.yaml @@ -14,23 +14,8 @@ rules: - "" resources: - pods - verbs: - - "*" -- apiGroups: - - "" - resources: - services - verbs: - - "*" -- apiGroups: - - "" - resources: - configmaps - verbs: - - "*" -- apiGroups: - - "" - resources: - persistentvolumeclaims verbs: - "*" diff --git a/charts/spark-operator-chart/tests/spark-rbac_test.yaml b/charts/spark-operator-chart/tests/spark-rbac_test.yaml index 1a31c7152e..6d194fa3a6 100644 --- a/charts/spark-operator-chart/tests/spark-rbac_test.yaml +++ b/charts/spark-operator-chart/tests/spark-rbac_test.yaml @@ -60,7 +60,27 @@ tests: kind: RoleBinding name: spark - - it: Should render multiple spark roles if sparkJobNamespaces is set + - it: Should create a single spark role with namespace "" by default + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + name: spark-role + namespace: "" + + - it: Should create a single spark role binding with namespace "" by default + values: + - ../values.yaml + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + name: spark + namespace: "" + + - it: Should render multiple spark roles if sparkJobNamespaces is set with multiple values set: sparkJobNamespaces: - ns1 @@ -73,7 +93,7 @@ tests: name: spark-role namespace: ns1 - - it: Should render multiple spark role bindings if sparkJobNamespaces is set + - it: Should render multiple spark role bindings if sparkJobNamespaces is set with multiple values set: sparkJobNamespaces: - ns1 @@ -86,7 +106,7 @@ tests: name: spark namespace: ns1 - - it: Should render multiple spark roles if sparkJobNamespaces is set + - it: Should render multiple spark roles if sparkJobNamespaces is set with multiple values set: sparkJobNamespaces: - ns1 @@ -99,7 +119,7 @@ tests: name: spark-role namespace: ns2 - - it: Should render multiple spark role bindings if sparkJobNamespaces is set + - it: Should render multiple spark role bindings if sparkJobNamespaces is set with multiple values set: sparkJobNamespaces: - ns1 diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index 175a44cf89..2cbd088b33 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -53,7 +53,8 @@ serviceAccounts: annotations: {} # -- List of namespaces where to run spark jobs -sparkJobNamespaces: [] +sparkJobNamespaces: + - "" # - ns1 # -- Operator concurrency, higher values might increase memory usage diff --git a/examples/spark-pi-configmap.yaml b/examples/spark-pi-configmap.yaml index ce0c7cf14e..a6a5dc023b 100644 --- a/examples/spark-pi-configmap.yaml +++ b/examples/spark-pi-configmap.yaml @@ -21,11 +21,11 @@ metadata: spec: type: Scala mode: cluster - image: "gcr.io/spark-operator/spark:v3.1.1" + image: "spark:3.5.0" imagePullPolicy: Always mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.1.1.jar" - sparkVersion: "3.1.1" + mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" + sparkVersion: "3.5.0" restartPolicy: type: Never volumes: @@ -37,8 +37,8 @@ spec: coreLimit: "1200m" memory: "512m" labels: - version: 3.1.1 - serviceAccount: spark + version: 3.5.0 + serviceAccount: spark-operator-spark volumeMounts: - name: config-vol mountPath: /opt/spark/mycm @@ -47,7 +47,7 @@ spec: instances: 1 memory: "512m" labels: - version: 3.1.1 + version: 3.5.0 volumeMounts: - name: config-vol mountPath: /opt/spark/mycm diff --git a/examples/spark-pi-custom-resource.yaml b/examples/spark-pi-custom-resource.yaml index bae7cc467c..1e70098d2e 100644 --- a/examples/spark-pi-custom-resource.yaml +++ b/examples/spark-pi-custom-resource.yaml @@ -21,11 +21,11 @@ metadata: spec: type: Scala mode: cluster - image: "gcr.io/spark-operator/spark:v3.1.1" + image: "spark:3.5.0" imagePullPolicy: Always mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.1.1.jar" - sparkVersion: "3.1.1" + mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" + sparkVersion: "3.5.0" restartPolicy: type: Never volumes: @@ -38,8 +38,8 @@ spec: coreLimit: "1200m" memory: "512m" labels: - version: 3.1.1 - serviceAccount: spark + version: 3.5.0 + serviceAccount: spark-operator-spark volumeMounts: - name: "test-volume" mountPath: "/tmp" @@ -48,7 +48,7 @@ spec: instances: 1 memory: "512m" labels: - version: 3.1.1 + version: 3.5.0 volumeMounts: - name: "test-volume" mountPath: "/tmp" diff --git a/examples/spark-pi-prometheus.yaml b/examples/spark-pi-prometheus.yaml index 98298c1847..b47de1db60 100644 --- a/examples/spark-pi-prometheus.yaml +++ b/examples/spark-pi-prometheus.yaml @@ -37,7 +37,7 @@ spec: memory: "512m" labels: version: 3.1.1 - serviceAccount: spark + serviceAccount: spark-operator-spark executor: cores: 1 instances: 1 diff --git a/examples/spark-pi-schedule.yaml b/examples/spark-pi-schedule.yaml index 4af6b736c7..576a77361d 100644 --- a/examples/spark-pi-schedule.yaml +++ b/examples/spark-pi-schedule.yaml @@ -25,11 +25,11 @@ spec: template: type: Scala mode: cluster - image: "gcr.io/spark-operator/spark:v3.1.1" + image: "spark:3.5.0" imagePullPolicy: Always mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.1.1.jar" - sparkVersion: "3.1.1" + mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" + sparkVersion: "3.5.0" restartPolicy: type: Never driver: @@ -37,11 +37,11 @@ spec: coreLimit: "1200m" memory: "512m" labels: - version: 3.1.1 - serviceAccount: spark + version: 3.5.0 + serviceAccount: spark-operator-spark executor: cores: 1 instances: 1 memory: "512m" labels: - version: 3.1.1 + version: 3.5.0 diff --git a/examples/spark-pi.yaml b/examples/spark-pi.yaml index 986fe505ff..41d48645e6 100644 --- a/examples/spark-pi.yaml +++ b/examples/spark-pi.yaml @@ -21,11 +21,11 @@ metadata: spec: type: Scala mode: cluster - image: "gcr.io/spark-operator/spark:v3.1.1" + image: "spark:3.5.0" imagePullPolicy: Always mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.1.1.jar" - sparkVersion: "3.1.1" + mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" + sparkVersion: "3.5.0" sparkUIOptions: serviceLabels: test-label/v1: 'true' @@ -41,8 +41,8 @@ spec: coreLimit: "1200m" memory: "512m" labels: - version: 3.1.1 - serviceAccount: spark + version: 3.5.0 + serviceAccount: spark-operator-spark volumeMounts: - name: "test-volume" mountPath: "/tmp" @@ -51,7 +51,7 @@ spec: instances: 1 memory: "512m" labels: - version: 3.1.1 + version: 3.5.0 volumeMounts: - name: "test-volume" mountPath: "/tmp" diff --git a/examples/spark-py-pi.yaml b/examples/spark-py-pi.yaml index 20e81a1fab..cb7f21d11a 100644 --- a/examples/spark-py-pi.yaml +++ b/examples/spark-py-pi.yaml @@ -25,10 +25,10 @@ spec: type: Python pythonVersion: "3" mode: cluster - image: "gcr.io/spark-operator/spark-py:v3.1.1" + image: "spark:3.5.0" imagePullPolicy: Always mainApplicationFile: local:///opt/spark/examples/src/main/python/pi.py - sparkVersion: "3.1.1" + sparkVersion: "3.5.0" restartPolicy: type: OnFailure onFailureRetries: 3 @@ -40,11 +40,11 @@ spec: coreLimit: "1200m" memory: "512m" labels: - version: 3.1.1 - serviceAccount: spark + version: 3.5.0 + serviceAccount: spark-operator-spark executor: cores: 1 instances: 1 memory: "512m" labels: - version: 3.1.1 + version: 3.5.0 From a1efb79a4f716b7807b2ab51083afd8179499517 Mon Sep 17 00:00:00 2001 From: Matthew Rossi Date: Tue, 7 May 2024 17:58:43 +0200 Subject: [PATCH 57/87] Update Spark Job Namespace docs (#2000) Signed-off-by: Matthew Rossi --- README.md | 2 +- docs/quick-start-guide.md | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 2e0edd3be6..ab81ee6857 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ $ helm install my-release spark-operator/spark-operator --namespace spark-operat This will install the Kubernetes Operator for Apache Spark into the namespace `spark-operator`. The operator by default watches and handles `SparkApplication`s in every namespaces. If you would like to limit the operator to watch and handle `SparkApplication`s in a single namespace, e.g., `default` instead, add the following option to the `helm install` command: ``` ---set sparkJobNamespace=default +--set "sparkJobNamespaces={default}" ``` For configuration options available in the Helm chart, please refer to the chart's [README](charts/spark-operator-chart/README.md). diff --git a/docs/quick-start-guide.md b/docs/quick-start-guide.md index 4ee06c904f..e7045c0820 100644 --- a/docs/quick-start-guide.md +++ b/docs/quick-start-guide.md @@ -126,10 +126,10 @@ To run the Spark Pi example, run the following command: $ kubectl apply -f examples/spark-pi.yaml ``` -Note that `spark-pi.yaml` configures the driver pod to use the `spark` service account to communicate with the Kubernetes API server. You might need to replace it with the appropriate service account before submitting the job. If you installed the operator using the Helm chart and overrode `sparkJobNamespace`, the service account name ends with `-spark` and starts with the Helm release name. For example, if you would like to run your Spark jobs to run in a namespace called `test-ns`, first make sure it already exists, and then install the chart with the command: +Note that `spark-pi.yaml` configures the driver pod to use the `spark` service account to communicate with the Kubernetes API server. You might need to replace it with the appropriate service account before submitting the job. If you installed the operator using the Helm chart and overrode `sparkJobNamespaces`, the service account name ends with `-spark` and starts with the Helm release name. For example, if you would like to run your Spark jobs to run in a namespace called `test-ns`, first make sure it already exists, and then install the chart with the command: ```bash -$ helm install my-release spark-operator/spark-operator --namespace spark-operator --set sparkJobNamespace=test-ns +$ helm install my-release spark-operator/spark-operator --namespace spark-operator --set "sparkJobNamespaces={test-ns}" ``` Then the chart will set up a service account for your Spark jobs to use in that namespace. @@ -234,13 +234,13 @@ $ helm upgrade --set image.repository=org/image --set i Refer to the Helm [documentation](https://helm.sh/docs/helm/helm_upgrade/) for more details on `helm upgrade`. -## About the Spark Job Namespace +## About Spark Job Namespaces -The Spark Job Namespace value defines the namespace(s) where `SparkApplications` can be deployed. The Helm chart value for the Spark Job Namespace is `sparkJobNamespace`, and its default value is `""`, as defined in the Helm chart's [README](../charts/spark-operator-chart/README.md). Note that in the [Kubernetes apimachinery](https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/apimachinery) project, the constants `NamespaceAll` and `NamespaceNone` are both defined as the empty string. In this case, the empty string represents `NamespaceAll`. When set to `""`, the Spark Operator supports deploying `SparkApplications` to all namespaces. The Helm chart will create a service account in the namespace where the spark-operator is deployed. In order to successfully deploy `SparkApplications`, you will need to ensure the driver pod's service account meets the criteria described in the [service accounts for driver pods](#about-the-service-account-for-driver-pods) section. +The Spark Job Namespaces value defines the namespaces where `SparkApplications` can be deployed. The Helm chart value for the Spark Job Namespaces is `sparkJobNamespaces`, and its default value is `[]`. As defined in the Helm chart's [README](../charts/spark-operator-chart/README.md), when the list of namespaces is empty the Helm chart will create a service account in the namespace where the spark-operator is deployed. -if you installed the operator using the Helm chart and overrode the `sparkJobNamespace` to some other, pre-existing namespace, the Helm chart will create the necessary service account and RBAC in the specified namespace. +If you installed the operator using the Helm chart and overrode the `sparkJobNamespaces` to some other, pre-existing namespace, the Helm chart will create the necessary service account and RBAC in the specified namespace. -The Spark Operator uses the Spark Job Namespace to identify and filter relevant events for the `SparkApplication` CRD. If you specify a namespace for Spark Jobs, and then submit a SparkApplication resource to another namespace, the Spark Operator will filter out the event, and the resource will not get deployed. If you don't specify a namespace, the Spark Operator will see `SparkApplication` events for all namespaces, and will deploy them to the namespace requested in the create call. +The Spark Operator uses the Spark Job Namespace to identify and filter relevant events for the `SparkApplication` CRD. If you specify a namespace for Spark Jobs, and then submit a SparkApplication resource to another namespace, the Spark Operator will filter out the event, and the resource will not get deployed. If you don't specify a namespace, the Spark Operator will see only `SparkApplication` events for the Spark Operator namespace. ## About the Service Account for Driver Pods @@ -347,5 +347,5 @@ If you are deploying the operator on a GKE cluster with the [Private cluster](ht To install the operator with a custom port, pass the appropriate flag during `helm install`: ```bash -$ helm install my-release spark-operator/spark-operator --namespace spark-operator --set sparkJobNamespace=spark --set webhook.enable=true --set webhook.port=443 +$ helm install my-release spark-operator/spark-operator --namespace spark-operator --set "sparkJobNamespaces={spark}" --set webhook.enable=true --set webhook.port=443 ``` From 1d123caa97d17ff1a29069b537e9ffc45151a60d Mon Sep 17 00:00:00 2001 From: Andrej Kyselica Date: Thu, 9 May 2024 17:52:45 -0400 Subject: [PATCH 58/87] Remove outdated PySpark experimental warning in example (#2014) Signed-off-by: Andrej Kyselica --- examples/spark-py-pi.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/spark-py-pi.yaml b/examples/spark-py-pi.yaml index cb7f21d11a..11a193cfd0 100644 --- a/examples/spark-py-pi.yaml +++ b/examples/spark-py-pi.yaml @@ -12,9 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# -# Support for Python is experimental, and requires building SNAPSHOT image of Apache Spark, -# with `imagePullPolicy` set to Always apiVersion: "sparkoperator.k8s.io/v1beta2" kind: SparkApplication From 153537e82d09cbb1b706aebfd094400ef51e0ce0 Mon Sep 17 00:00:00 2001 From: Peter McClonski Date: Fri, 17 May 2024 18:24:45 -0400 Subject: [PATCH 59/87] Fixes a bug with dynamic allocation forcing the executor count to be 1 even when minExecutors is set to 0 (#1979) Signed-off-by: Peter McClonski --- .../sparkoperator.k8s.io/v1beta2/defaults.go | 15 +++++++---- .../v1beta2/defaults_test.go | 25 +++++++++++++++++++ 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go index f722a36cfd..e46f4012df 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go @@ -16,6 +16,8 @@ limitations under the License. package v1beta2 +import "strconv" + // SetSparkApplicationDefaults sets default values for certain fields of a SparkApplication. func SetSparkApplicationDefaults(app *SparkApplication) { if app == nil { @@ -44,7 +46,7 @@ func SetSparkApplicationDefaults(app *SparkApplication) { } setDriverSpecDefaults(&app.Spec.Driver, app.Spec.SparkConf) - setExecutorSpecDefaults(&app.Spec.Executor, app.Spec.SparkConf) + setExecutorSpecDefaults(&app.Spec.Executor, app.Spec.SparkConf, app.Spec.DynamicAllocation) } func setDriverSpecDefaults(spec *DriverSpec, sparkConf map[string]string) { @@ -59,7 +61,7 @@ func setDriverSpecDefaults(spec *DriverSpec, sparkConf map[string]string) { } } -func setExecutorSpecDefaults(spec *ExecutorSpec, sparkConf map[string]string) { +func setExecutorSpecDefaults(spec *ExecutorSpec, sparkConf map[string]string, allocSpec *DynamicAllocation) { if _, exists := sparkConf["spark.executor.cores"]; !exists && spec.Cores == nil { spec.Cores = new(int32) *spec.Cores = 1 @@ -68,8 +70,11 @@ func setExecutorSpecDefaults(spec *ExecutorSpec, sparkConf map[string]string) { spec.Memory = new(string) *spec.Memory = "1g" } - if _, exists := sparkConf["spark.executor.instances"]; !exists && spec.Instances == nil { - spec.Instances = new(int32) - *spec.Instances = 1 + var dynalloc, _ = sparkConf["spark.dynamicallocation.enabled"] + if dynamic, _ := strconv.ParseBool(dynalloc); !dynamic && (allocSpec == nil || !allocSpec.Enabled) { + if _, exists := sparkConf["spark.executor.instances"]; !exists && spec.Instances == nil { + spec.Instances = new(int32) + *spec.Instances = 1 + } } } diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go index 6c1be13de6..624374ee16 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go @@ -199,4 +199,29 @@ func TestSetSparkApplicationDefaultsExecutorSpecDefaults(t *testing.T) { assert.Nil(t, app.Spec.Executor.Memory) assert.Nil(t, app.Spec.Executor.Instances) + //Case3: Dynamic allocation is enabled with minExecutors = 0 + var minExecs = int32(0) + app = &SparkApplication{ + Spec: SparkApplicationSpec{ + DynamicAllocation: &DynamicAllocation{ + Enabled: true, + MinExecutors: &minExecs, + }, + }, + } + + SetSparkApplicationDefaults(app) + assert.Nil(t, app.Spec.Executor.Instances) + + //Case4: Dynamic allocation is enabled via SparkConf + app = &SparkApplication{ + Spec: SparkApplicationSpec{ + SparkConf: map[string]string{ + "spark.dynamicallocation.enabled": "true", + }, + }, + } + + SetSparkApplicationDefaults(app) + assert.Nil(t, app.Spec.Executor.Instances) } From b723367d796cca0bafe646806e9814aa14dbe8ab Mon Sep 17 00:00:00 2001 From: Bo Yang <14280154+hiboyang@users.noreply.github.com> Date: Mon, 20 May 2024 12:42:48 -0700 Subject: [PATCH 60/87] Support exposing extra TCP ports in Spark Driver via K8s Ingress (#1998) * Add driverIngressOptions in SparkApplication CRD Signed-off-by: Bo (AIML) Yang * Update chart version to 1.3.0 Signed-off-by: Bo (AIML) Yang * Update helm chart README Signed-off-by: Bo (AIML) Yang * Fix make detect-crds-drift Signed-off-by: Bo (AIML) Yang * Update api-docs.md Signed-off-by: Bo (AIML) Yang --------- Signed-off-by: Bo (AIML) Yang --- charts/spark-operator-chart/Chart.yaml | 4 +- charts/spark-operator-chart/README.md | 2 +- ...tor.k8s.io_scheduledsparkapplications.yaml | 37 + ...parkoperator.k8s.io_sparkapplications.yaml | 37 + docs/api-docs.md | 144 ++++ ...tor.k8s.io_scheduledsparkapplications.yaml | 37 + ...parkoperator.k8s.io_sparkapplications.yaml | 37 + .../sparkoperator.k8s.io/v1beta2/types.go | 29 + .../v1beta2/zz_generated.deepcopy.go | 60 ++ pkg/controller/sparkapplication/controller.go | 45 +- .../sparkapplication/driveringress.go | 370 +++++++++ .../sparkapplication/driveringress_test.go | 730 ++++++++++++++++++ pkg/controller/sparkapplication/sparkui.go | 261 +------ .../sparkapplication/sparkui_test.go | 2 +- 14 files changed, 1535 insertions(+), 260 deletions(-) create mode 100644 pkg/controller/sparkapplication/driveringress.go create mode 100644 pkg/controller/sparkapplication/driveringress_test.go diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 8917650412..ea9fd34809 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.2.15 -appVersion: v1beta2-1.4.6-3.5.0 +version: 1.3.0 +appVersion: v1beta2-1.4.2-3.5.0 keywords: - spark home: https://github.com/kubeflow/spark-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index a8f41dc76c..9dff10b3f6 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.2.15](https://img.shields.io/badge/Version-1.2.15-informational?style=flat-square) ![AppVersion: v1beta2-1.4.6-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.6--3.5.0-informational?style=flat-square) +![Version: 1.3.0](https://img.shields.io/badge/Version-1.3.0-informational?style=flat-square) ![AppVersion: v1beta2-1.4.2-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.2--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 78ffc98a42..fe4ba87fee 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -3779,6 +3779,43 @@ spec: serviceType: type: string type: object + driverIngressOptions: + items: + properties: + serviceAnnotations: + additionalProperties: + type: string + type: object + serviceLabels: + additionalProperties: + type: string + type: object + ingressURLFormat: + type: string + ingressAnnotations: + additionalProperties: + type: string + type: object + ingressTLS: + items: + properties: + hosts: + items: + type: string + type: array + secretName: + type: string + type: object + type: array + servicePort: + format: int32 + type: integer + servicePortName: + type: string + serviceType: + type: string + type: object + type: array sparkVersion: type: string timeToLiveSeconds: diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml index a77b8f0e4a..927424177d 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -3767,6 +3767,43 @@ spec: serviceType: type: string type: object + driverIngressOptions: + items: + properties: + serviceAnnotations: + additionalProperties: + type: string + type: object + serviceLabels: + additionalProperties: + type: string + type: object + ingressURLFormat: + type: string + ingressAnnotations: + additionalProperties: + type: string + type: object + ingressTLS: + items: + properties: + hosts: + items: + type: string + type: array + secretName: + type: string + type: object + type: array + servicePort: + format: int32 + type: integer + servicePortName: + type: string + serviceType: + type: string + type: object + type: array sparkVersion: type: string timeToLiveSeconds: diff --git a/docs/api-docs.md b/docs/api-docs.md index 4deb0e76f0..2455515276 100644 --- a/docs/api-docs.md +++ b/docs/api-docs.md @@ -601,6 +601,20 @@ SparkUIConfiguration +driverIngressOptions
+ +
+[]DriverIngressConfiguration + + + + +(Optional) +

DriverIngressOptions allows configuring the Service and the Ingress to expose ports inside Spark Driver

+ + + + dynamicAllocation
@@ -991,6 +1005,122 @@ string +

DriverIngressConfiguration +

+

+(Appears on:SparkApplicationSpec) +

+
+

DriverIngressConfiguration is for driver ingress specific configuration parameters.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+servicePort
+ +int32 + +
+

ServicePort allows configuring the port at service level that might be different from the targetPort.

+
+servicePortName
+ +string + +
+

ServicePortName allows configuring the name of the service port. +This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP.

+
+serviceType
+ + +Kubernetes core/v1.ServiceType + + +
+(Optional) +

ServiceType allows configuring the type of the service. Defaults to ClusterIP.

+
+serviceAnnotations
+ +map[string]string + +
+(Optional) +

ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object.

+
+serviceLabels
+ +map[string]string + +
+(Optional) +

ServiceLables is a map of key,value pairs of labels that might be added to the service object.

+
+ingressURLFormat
+ +string + +
+

IngressURLFormat is the URL for the ingress.

+
+ingressAnnotations
+ +map[string]string + +
+(Optional) +

IngressAnnotations is a map of key,value pairs of annotations that might be added to the ingress object. i.e. specify nginx as ingress.class

+
+ingressTLS
+ + +[]Kubernetes networking/v1.IngressTLS + + +
+(Optional) +

TlsHosts is useful If we need to declare SSL certificates to the ingress object

+

DriverSpec

@@ -2493,6 +2623,20 @@ SparkUIConfiguration +driverIngressOptions
+ + +[]DriverIngressConfiguration + + + + +(Optional) +

DriverIngressOptions allows configuring the Service and the Ingress to expose ports inside Spark Driver

+ + + + dynamicAllocation
diff --git a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 78ffc98a42..fe4ba87fee 100644 --- a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -3779,6 +3779,43 @@ spec: serviceType: type: string type: object + driverIngressOptions: + items: + properties: + serviceAnnotations: + additionalProperties: + type: string + type: object + serviceLabels: + additionalProperties: + type: string + type: object + ingressURLFormat: + type: string + ingressAnnotations: + additionalProperties: + type: string + type: object + ingressTLS: + items: + properties: + hosts: + items: + type: string + type: array + secretName: + type: string + type: object + type: array + servicePort: + format: int32 + type: integer + servicePortName: + type: string + serviceType: + type: string + type: object + type: array sparkVersion: type: string timeToLiveSeconds: diff --git a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml index a77b8f0e4a..927424177d 100644 --- a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -3767,6 +3767,43 @@ spec: serviceType: type: string type: object + driverIngressOptions: + items: + properties: + serviceAnnotations: + additionalProperties: + type: string + type: object + serviceLabels: + additionalProperties: + type: string + type: object + ingressURLFormat: + type: string + ingressAnnotations: + additionalProperties: + type: string + type: object + ingressTLS: + items: + properties: + hosts: + items: + type: string + type: array + secretName: + type: string + type: object + type: array + servicePort: + format: int32 + type: integer + servicePortName: + type: string + serviceType: + type: string + type: object + type: array sparkVersion: type: string timeToLiveSeconds: diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go index 4224de4a6c..ca009e739e 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go @@ -280,6 +280,9 @@ type SparkApplicationSpec struct { // SparkUIOptions allows configuring the Service and the Ingress to expose the sparkUI // +optional SparkUIOptions *SparkUIConfiguration `json:"sparkUIOptions,omitempty"` + // DriverIngressOptions allows configuring the Service and the Ingress to expose ports inside Spark Driver + // +optional + DriverIngressOptions []DriverIngressConfiguration `json:"driverIngressOptions,omitempty"` // DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes // scheduler backend since Spark 3.0. // +optional @@ -328,6 +331,32 @@ type SparkUIConfiguration struct { IngressTLS []networkingv1.IngressTLS `json:"ingressTLS,omitempty"` } +// DriverIngressConfiguration is for driver ingress specific configuration parameters. +type DriverIngressConfiguration struct { + // ServicePort allows configuring the port at service level that might be different from the targetPort. + ServicePort *int32 `json:"servicePort"` + // ServicePortName allows configuring the name of the service port. + // This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + ServicePortName *string `json:"servicePortName"` + // ServiceType allows configuring the type of the service. Defaults to ClusterIP. + // +optional + ServiceType *apiv1.ServiceType `json:"serviceType"` + // ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object. + // +optional + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` + // ServiceLables is a map of key,value pairs of labels that might be added to the service object. + // +optional + ServiceLabels map[string]string `json:"serviceLabels,omitempty"` + // IngressURLFormat is the URL for the ingress. + IngressURLFormat string `json:"ingressURLFormat,omitempty"` + // IngressAnnotations is a map of key,value pairs of annotations that might be added to the ingress object. i.e. specify nginx as ingress.class + // +optional + IngressAnnotations map[string]string `json:"ingressAnnotations,omitempty"` + // TlsHosts is useful If we need to declare SSL certificates to the ingress object + // +optional + IngressTLS []networkingv1.IngressTLS `json:"ingressTLS,omitempty"` +} + // ApplicationStateType represents the type of the current state of an application. type ApplicationStateType string diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go index 308afff898..0b15feb0a0 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go @@ -762,6 +762,13 @@ func (in *SparkApplicationSpec) DeepCopyInto(out *SparkApplicationSpec) { *out = new(SparkUIConfiguration) (*in).DeepCopyInto(*out) } + if in.DriverIngressOptions != nil { + in, out := &in.DriverIngressOptions, &out.DriverIngressOptions + *out = make([]DriverIngressConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.DynamicAllocation != nil { in, out := &in.DynamicAllocation, &out.DynamicAllocation *out = new(DynamicAllocation) @@ -1043,3 +1050,56 @@ func (in *SparkUIConfiguration) DeepCopy() *SparkUIConfiguration { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverIngressConfiguration) DeepCopyInto(out *DriverIngressConfiguration) { + *out = *in + if in.ServicePort != nil { + in, out := &in.ServicePort, &out.ServicePort + *out = new(int32) + **out = **in + } + if in.ServicePortName != nil { + in, out := &in.ServicePortName, &out.ServicePortName + *out = new(string) + **out = **in + } + if in.ServiceType != nil { + in, out := &in.ServiceType, &out.ServiceType + *out = new(v1.ServiceType) + **out = **in + } + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.IngressURLFormat = in.IngressURLFormat + if in.IngressAnnotations != nil { + in, out := &in.IngressAnnotations, &out.IngressAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.IngressTLS != nil { + in, out := &in.IngressTLS, &out.IngressTLS + *out = make([]networkingv1.IngressTLS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverIngressConfiguration. +func (in *DriverIngressConfiguration) DeepCopy() *DriverIngressConfiguration { + if in == nil { + return nil + } + out := new(DriverIngressConfiguration) + in.DeepCopyInto(out) + return out +} \ No newline at end of file diff --git a/pkg/controller/sparkapplication/controller.go b/pkg/controller/sparkapplication/controller.go index 5d43a72b0a..3e9b373b89 100644 --- a/pkg/controller/sparkapplication/controller.go +++ b/pkg/controller/sparkapplication/controller.go @@ -691,7 +691,7 @@ func (c *Controller) submitSparkApplication(app *v1beta2.SparkApplication) *v1be // Create UI Ingress if ingress-format is set. if c.ingressURLFormat != "" { // We are going to want to use an ingress url. - ingressURL, err := getSparkUIingressURL(c.ingressURLFormat, app.GetName(), app.GetNamespace()) + ingressURL, err := getDriverIngressURL(c.ingressURLFormat, app.GetName(), app.GetNamespace()) if err != nil { glog.Errorf("failed to get the spark ingress url %s/%s: %v", app.Namespace, app.Name, err) } else { @@ -715,6 +715,29 @@ func (c *Controller) submitSparkApplication(app *v1beta2.SparkApplication) *v1be } } + for _, driverIngressConfiguration := range app.Spec.DriverIngressOptions { + service, err := createDriverIngressServiceFromConfiguration(app, &driverIngressConfiguration, c.kubeClient) + if err != nil { + glog.Errorf("failed to create driver ingress service for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) + continue + } + glog.Infof("Created driver ingress service %s (port: %d) for SparkApplication %s/%s", service.serviceName, service.servicePort, app.Namespace, app.Name) + // Create ingress if ingress-format is set. + if driverIngressConfiguration.IngressURLFormat != "" { + // We are going to want to use an ingress url. + ingressURL, err := getDriverIngressURL(driverIngressConfiguration.IngressURLFormat, app.GetName(), app.GetNamespace()) + if err != nil { + glog.Errorf("failed to get the driver ingress url %s/%s: %v", app.Namespace, app.Name, err) + } else { + ingress, err := createDriverIngress(app, &driverIngressConfiguration, *service, ingressURL, c.ingressClassName, c.kubeClient) + if err != nil { + glog.Errorf("failed to create driver ingress for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) + } + glog.Infof("Created driver ingress %s (url: %s) for SparkApplication %s/%s", ingress.ingressName, ingress.ingressURL, app.Namespace, app.Name) + } + } + } + driverPodName := getDriverPodName(app) driverInfo.PodName = driverPodName submissionID := uuid.New().String() @@ -914,6 +937,26 @@ func (c *Controller) validateSparkApplication(app *v1beta2.SparkApplication) err return fmt.Errorf("NodeSelector property can be defined at SparkApplication or at any of Driver,Executor") } + var servicePorts map[int32]bool + var ingressURLFormats map[string]bool + for _, item := range appSpec.DriverIngressOptions { + if item.ServicePort == nil { + return fmt.Errorf("DriverIngressOptions has nill ServicePort") + } + if servicePorts[*item.ServicePort] { + return fmt.Errorf("DriverIngressOptions has duplicate ServicePort: %d", *item.ServicePort) + } + servicePorts[*item.ServicePort] = true + + if item.IngressURLFormat == "" { + return fmt.Errorf("DriverIngressOptions has empty IngressURLFormat") + } + if ingressURLFormats[item.IngressURLFormat] { + return fmt.Errorf("DriverIngressOptions has duplicate IngressURLFormat: %s", item.IngressURLFormat) + } + ingressURLFormats[item.IngressURLFormat] = true + } + return nil } diff --git a/pkg/controller/sparkapplication/driveringress.go b/pkg/controller/sparkapplication/driveringress.go new file mode 100644 index 0000000000..08dab31468 --- /dev/null +++ b/pkg/controller/sparkapplication/driveringress.go @@ -0,0 +1,370 @@ +/* +Copyright 2024 spark-operator contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "context" + "fmt" + "github.com/golang/glog" + "net/url" + "regexp" + + apiv1 "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + clientset "k8s.io/client-go/kubernetes" + + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/util" +) + +// SparkService encapsulates information about the driver UI service. +type SparkService struct { + serviceName string + serviceType apiv1.ServiceType + servicePort int32 + servicePortName string + targetPort intstr.IntOrString + serviceIP string + serviceAnnotations map[string]string + serviceLabels map[string]string +} + +// SparkIngress encapsulates information about the driver UI ingress. +type SparkIngress struct { + ingressName string + ingressURL *url.URL + ingressClassName string + annotations map[string]string + ingressTLS []networkingv1.IngressTLS +} + +var ingressAppNameURLRegex = regexp.MustCompile("{{\\s*[$]appName\\s*}}") +var ingressAppNamespaceURLRegex = regexp.MustCompile("{{\\s*[$]appNamespace\\s*}}") + +func getDriverIngressURL(ingressURLFormat string, appName string, appNamespace string) (*url.URL, error) { + ingressURL := ingressAppNamespaceURLRegex.ReplaceAllString(ingressAppNameURLRegex.ReplaceAllString(ingressURLFormat, appName), appNamespace) + parsedURL, err := url.Parse(ingressURL) + if err != nil { + return nil, err + } + if parsedURL.Scheme == "" { + //url does not contain any scheme, adding http:// so url.Parse can function correctly + parsedURL, err = url.Parse("http://" + ingressURL) + if err != nil { + return nil, err + } + } + return parsedURL, nil +} + +func createDriverIngress(app *v1beta2.SparkApplication, driverIngressConfiguration *v1beta2.DriverIngressConfiguration, service SparkService, ingressURL *url.URL, ingressClassName string, kubeClient clientset.Interface) (*SparkIngress, error) { + if driverIngressConfiguration.ServicePort == nil { + return nil, fmt.Errorf("cannot create Driver Ingress for application %s/%s due to empty ServicePort on driverIngressConfiguration", app.Namespace, app.Name) + } + ingressName := fmt.Sprintf("%s-ing-%d", app.Name, *driverIngressConfiguration.ServicePort) + if util.IngressCapabilities.Has("networking.k8s.io/v1") { + return createDriverIngress_v1(app, service, ingressName, ingressURL, ingressClassName, kubeClient) + } else { + return createDriverIngress_legacy(app, service, ingressName, ingressURL, kubeClient) + } +} + +func createDriverIngress_v1(app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL, ingressClassName string, kubeClient clientset.Interface) (*SparkIngress, error) { + ingressResourceAnnotations := getIngressResourceAnnotations(app) + ingressTlsHosts := getIngressTlsHosts(app) + + ingressURLPath := ingressURL.Path + // If we're serving on a subpath, we need to ensure we create capture groups + if ingressURLPath != "" && ingressURLPath != "/" { + ingressURLPath = ingressURLPath + "(/|$)(.*)" + } + + implementationSpecific := networkingv1.PathTypeImplementationSpecific + + ingress := networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingressName, + Namespace: app.Namespace, + Labels: getResourceLabels(app), + OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, + }, + Spec: networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{{ + Host: ingressURL.Host, + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{{ + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: service.serviceName, + Port: networkingv1.ServiceBackendPort{ + Number: service.servicePort, + }, + }, + }, + Path: ingressURLPath, + PathType: &implementationSpecific, + }}, + }, + }, + }}, + }, + } + + if len(ingressResourceAnnotations) != 0 { + ingress.ObjectMeta.Annotations = ingressResourceAnnotations + } + + // If we're serving on a subpath, we need to ensure we use the capture groups + if ingressURL.Path != "" && ingressURL.Path != "/" { + if ingress.ObjectMeta.Annotations == nil { + ingress.ObjectMeta.Annotations = make(map[string]string) + } + ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/rewrite-target"] = "/$2" + } + if len(ingressTlsHosts) != 0 { + ingress.Spec.TLS = ingressTlsHosts + } + if len(ingressClassName) != 0 { + ingress.Spec.IngressClassName = &ingressClassName + } + + glog.Infof("Creating an Ingress %s for the Spark UI for application %s", ingress.Name, app.Name) + _, err := kubeClient.NetworkingV1().Ingresses(ingress.Namespace).Create(context.TODO(), &ingress, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + return &SparkIngress{ + ingressName: ingress.Name, + ingressURL: ingressURL, + ingressClassName: ingressClassName, + annotations: ingress.Annotations, + ingressTLS: ingressTlsHosts, + }, nil +} + +func createDriverIngress_legacy(app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL, kubeClient clientset.Interface) (*SparkIngress, error) { + ingressResourceAnnotations := getIngressResourceAnnotations(app) + // var ingressTlsHosts networkingv1.IngressTLS[] + // That we convert later for extensionsv1beta1, but return as is in SparkIngress + ingressTlsHosts := getIngressTlsHosts(app) + + ingressURLPath := ingressURL.Path + // If we're serving on a subpath, we need to ensure we create capture groups + if ingressURLPath != "" && ingressURLPath != "/" { + ingressURLPath = ingressURLPath + "(/|$)(.*)" + } + + ingress := extensions.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingressName, + Namespace: app.Namespace, + Labels: getResourceLabels(app), + OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, + }, + Spec: extensions.IngressSpec{ + Rules: []extensions.IngressRule{{ + Host: ingressURL.Host, + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{{ + Backend: extensions.IngressBackend{ + ServiceName: service.serviceName, + ServicePort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: service.servicePort, + }, + }, + Path: ingressURLPath, + }}, + }, + }, + }}, + }, + } + + if len(ingressResourceAnnotations) != 0 { + ingress.ObjectMeta.Annotations = ingressResourceAnnotations + } + + // If we're serving on a subpath, we need to ensure we use the capture groups + if ingressURL.Path != "" && ingressURL.Path != "/" { + if ingress.ObjectMeta.Annotations == nil { + ingress.ObjectMeta.Annotations = make(map[string]string) + } + ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/rewrite-target"] = "/$2" + } + if len(ingressTlsHosts) != 0 { + ingress.Spec.TLS = convertIngressTlsHostsToLegacy(ingressTlsHosts) + } + glog.Infof("Creating an extensions/v1beta1 Ingress %s for application %s", ingress.Name, app.Name) + _, err := kubeClient.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.TODO(), &ingress, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + return &SparkIngress{ + ingressName: ingress.Name, + ingressURL: ingressURL, + annotations: ingress.Annotations, + ingressTLS: ingressTlsHosts, + }, nil +} + +func convertIngressTlsHostsToLegacy(ingressTlsHosts []networkingv1.IngressTLS) []extensions.IngressTLS { + var ingressTlsHosts_legacy []extensions.IngressTLS + for _, ingressTlsHost := range ingressTlsHosts { + ingressTlsHosts_legacy = append(ingressTlsHosts_legacy, extensions.IngressTLS{ + Hosts: ingressTlsHost.Hosts, + SecretName: ingressTlsHost.SecretName, + }) + } + return ingressTlsHosts_legacy +} + +func createDriverIngressService( + app *v1beta2.SparkApplication, + portName string, + port int32, + targetPort int32, + serviceName string, + serviceType apiv1.ServiceType, + serviceAnnotations map[string]string, + serviceLabels map[string]string, + kubeClient clientset.Interface) (*SparkService, error) { + service := &apiv1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: app.Namespace, + Labels: getResourceLabels(app), + OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, + }, + Spec: apiv1.ServiceSpec{ + Ports: []apiv1.ServicePort{ + { + Name: portName, + Port: port, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: targetPort, + }, + }, + }, + Selector: map[string]string{ + config.SparkAppNameLabel: app.Name, + config.SparkRoleLabel: config.SparkDriverRole, + }, + Type: serviceType, + }, + } + + if len(serviceAnnotations) != 0 { + service.ObjectMeta.Annotations = serviceAnnotations + } + + if len(serviceLabels) != 0 { + glog.Infof("Creating a service labels %s for the Driver Ingress: %v", service.Name, &serviceLabels) + service.ObjectMeta.Labels = serviceLabels + } + + glog.Infof("Creating a service %s for the Driver Ingress for application %s", service.Name, app.Name) + service, err := kubeClient.CoreV1().Services(app.Namespace).Create(context.TODO(), service, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + + return &SparkService{ + serviceName: service.Name, + serviceType: service.Spec.Type, + servicePort: service.Spec.Ports[0].Port, + servicePortName: service.Spec.Ports[0].Name, + targetPort: service.Spec.Ports[0].TargetPort, + serviceIP: service.Spec.ClusterIP, + serviceAnnotations: serviceAnnotations, + serviceLabels: serviceLabels, + }, nil +} + +func getDriverIngressServicePort(driverIngressConfiguration *v1beta2.DriverIngressConfiguration) (int32, error) { + port := driverIngressConfiguration.ServicePort + if port == nil { + return 0, fmt.Errorf("servie port is nil on driver ingress configuration") + } + return *port, nil +} + +func getDriverIngressServicePortName(driverIngressConfiguration *v1beta2.DriverIngressConfiguration) string { + portName := driverIngressConfiguration.ServicePortName + if portName != nil { + return *portName + } + port := 0 + if driverIngressConfiguration.ServicePort != nil { + port = int(*driverIngressConfiguration.ServicePort) + } + return fmt.Sprintf("driver-ing-%d", port) +} + +func getDriverIngressServiceName(app *v1beta2.SparkApplication, port int32) string { + return fmt.Sprintf("%s-driver-%d", app.Name, port) +} + +func getDriverIngressServiceType(driverIngressConfiguration *v1beta2.DriverIngressConfiguration) apiv1.ServiceType { + if driverIngressConfiguration.ServiceType != nil { + return *driverIngressConfiguration.ServiceType + } + return apiv1.ServiceTypeClusterIP +} + +func getDriverIngressServiceAnnotations(driverIngressConfiguration *v1beta2.DriverIngressConfiguration) map[string]string { + serviceAnnotations := map[string]string{} + if driverIngressConfiguration.ServiceAnnotations != nil { + for key, value := range driverIngressConfiguration.ServiceAnnotations { + serviceAnnotations[key] = value + } + } + return serviceAnnotations +} + +func getDriverIngressServiceLabels(driverIngressConfiguration *v1beta2.DriverIngressConfiguration) map[string]string { + serviceLabels := map[string]string{} + if driverIngressConfiguration.ServiceLabels != nil { + for key, value := range driverIngressConfiguration.ServiceLabels { + serviceLabels[key] = value + } + } + return serviceLabels +} + +func createDriverIngressServiceFromConfiguration( + app *v1beta2.SparkApplication, + driverIngressConfiguration *v1beta2.DriverIngressConfiguration, + kubeClient clientset.Interface) (*SparkService, error) { + portName := getDriverIngressServicePortName(driverIngressConfiguration) + port, err := getDriverIngressServicePort(driverIngressConfiguration) + if err != nil { + return nil, err + } + serviceName := getDriverIngressServiceName(app, port) + serviceType := getDriverIngressServiceType(driverIngressConfiguration) + serviceAnnotations := getDriverIngressServiceAnnotations(driverIngressConfiguration) + serviceLabels := getDriverIngressServiceLabels(driverIngressConfiguration) + return createDriverIngressService(app, portName, port, port, serviceName, serviceType, serviceAnnotations, serviceLabels, kubeClient) +} diff --git a/pkg/controller/sparkapplication/driveringress_test.go b/pkg/controller/sparkapplication/driveringress_test.go new file mode 100644 index 0000000000..ef1dedc672 --- /dev/null +++ b/pkg/controller/sparkapplication/driveringress_test.go @@ -0,0 +1,730 @@ +/* +Copyright 2024 spark-operator contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "context" + "fmt" + "reflect" + "testing" + + apiv1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes/fake" + + "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/util" +) + +func TestCreateDriverIngressService(t *testing.T) { + type testcase struct { + name string + app *v1beta2.SparkApplication + expectedServices []SparkService + expectedSelector map[string]string + expectError bool + } + testFn := func(test testcase, t *testing.T) { + fakeClient := fake.NewSimpleClientset() + util.IngressCapabilities = map[string]bool{"networking.k8s.io/v1": true} + if len(test.expectedServices) != len(test.app.Spec.DriverIngressOptions) { + t.Errorf("%s: size of test.expectedServices (%d) and test.app.Spec.DriverIngressOptions (%d) is different for %s", + test.name, len(test.expectedServices), len(test.app.Spec.DriverIngressOptions), test.app.Name) + } + for i, driverIngressConfiguration := range test.app.Spec.DriverIngressOptions { + sparkService, err := createDriverIngressServiceFromConfiguration(test.app, &driverIngressConfiguration, fakeClient) + if err != nil { + if test.expectError { + return + } + t.Fatal(err) + } + expectedService := test.expectedServices[i] + if sparkService.serviceName != expectedService.serviceName { + t.Errorf("%s: for service name wanted %s got %s", test.name, expectedService.serviceName, sparkService.serviceName) + } + service, err := fakeClient.CoreV1(). + Services(test.app.Namespace). + Get(context.TODO(), sparkService.serviceName, metav1.GetOptions{}) + if err != nil { + if test.expectError { + return + } + t.Fatal(err) + } + if service.Labels[config.SparkAppNameLabel] != test.app.Name { + t.Errorf("%s: service of app %s has the wrong labels", test.name, test.app.Name) + } + if !reflect.DeepEqual(test.expectedSelector, service.Spec.Selector) { + t.Errorf("%s: for label selector wanted %s got %s", test.name, test.expectedSelector, service.Spec.Selector) + } + if service.Spec.Type != expectedService.serviceType { + t.Errorf("%s: for service type wanted %s got %s", test.name, expectedService.serviceType, service.Spec.Type) + } + if len(service.Spec.Ports) != 1 { + t.Errorf("%s: wanted a single port got %d ports", test.name, len(service.Spec.Ports)) + } + port := service.Spec.Ports[0] + if port.Port != expectedService.servicePort { + t.Errorf("%s: unexpected port wanted %d got %d", test.name, expectedService.servicePort, port.Port) + } + if port.Name != expectedService.servicePortName { + t.Errorf("%s: unexpected port name wanted %s got %s", test.name, expectedService.servicePortName, port.Name) + } + serviceAnnotations := service.ObjectMeta.Annotations + if !reflect.DeepEqual(serviceAnnotations, expectedService.serviceAnnotations) { + t.Errorf("%s: unexpected annotations wanted %s got %s", test.name, expectedService.serviceAnnotations, serviceAnnotations) + } + serviceLabels := service.ObjectMeta.Labels + if !reflect.DeepEqual(serviceLabels, expectedService.serviceLabels) { + t.Errorf("%s: unexpected labels wanted %s got %s", test.name, expectedService.serviceLabels, serviceLabels) + } + } + } + serviceNameFormat := "%s-driver-%d" + portNameFormat := "driver-ing-%d" + app1 := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo1", + Namespace: "default", + UID: "foo-123", + }, + Spec: v1beta2.SparkApplicationSpec{ + DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ + { + ServicePort: int32ptr(8888), + }, + }, + }, + Status: v1beta2.SparkApplicationStatus{ + SparkApplicationID: "foo-1", + ExecutionAttempts: 1, + }, + } + app2 := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo2", + Namespace: "default", + UID: "foo-123", + }, + Spec: v1beta2.SparkApplicationSpec{ + DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ + { + ServicePort: int32ptr(8888), + }, + }, + }, + Status: v1beta2.SparkApplicationStatus{ + SparkApplicationID: "foo-2", + ExecutionAttempts: 2, + }, + } + app3 := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo3", + Namespace: "default", + UID: "foo-123", + }, + Spec: v1beta2.SparkApplicationSpec{ + DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ + { + ServicePort: nil, + }, + }, + }, + Status: v1beta2.SparkApplicationStatus{ + SparkApplicationID: "foo-3", + }, + } + var appPort int32 = 80 + app4 := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo4", + Namespace: "default", + UID: "foo-123", + }, + Spec: v1beta2.SparkApplicationSpec{ + DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ + { + ServicePort: &appPort, + }, + }, + SparkConf: map[string]string{ + sparkUIPortConfigurationKey: "4041", + }, + }, + Status: v1beta2.SparkApplicationStatus{ + SparkApplicationID: "foo-3", + }, + } + var serviceTypeNodePort apiv1.ServiceType = apiv1.ServiceTypeNodePort + app5 := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo5", + Namespace: "default", + UID: "foo-123", + }, + Spec: v1beta2.SparkApplicationSpec{ + DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ + { + ServicePort: int32ptr(8888), + ServiceType: &serviceTypeNodePort, + }, + }, + }, + Status: v1beta2.SparkApplicationStatus{ + SparkApplicationID: "foo-2", + ExecutionAttempts: 2, + }, + } + appPortName := "http-spark-test" + app6 := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo6", + Namespace: "default", + UID: "foo-123", + }, + Spec: v1beta2.SparkApplicationSpec{ + DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ + { + ServicePort: &appPort, + ServicePortName: &appPortName, + }, + }, + }, + Status: v1beta2.SparkApplicationStatus{ + SparkApplicationID: "foo-6", + }, + } + app7 := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo7", + Namespace: "default", + UID: "foo-123", + }, + Spec: v1beta2.SparkApplicationSpec{ + DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ + { + ServicePort: int32ptr(8888), + ServiceAnnotations: map[string]string{ + "key": "value", + }, + }, + }, + }, + Status: v1beta2.SparkApplicationStatus{ + SparkApplicationID: "foo-7", + ExecutionAttempts: 1, + }, + } + app8 := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo8", + Namespace: "default", + UID: "foo-123", + }, + Spec: v1beta2.SparkApplicationSpec{ + DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ + { + ServicePort: int32ptr(8888), + ServiceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo8", + "key": "value", + }, + }, + }, + }, + Status: v1beta2.SparkApplicationStatus{ + SparkApplicationID: "foo-8", + ExecutionAttempts: 1, + }, + } + testcases := []testcase{ + { + name: "service with custom serviceport and serviceport and target port are same", + app: app1, + expectedServices: []SparkService{ + { + serviceName: fmt.Sprintf(serviceNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), + serviceType: apiv1.ServiceTypeClusterIP, + servicePortName: fmt.Sprintf(portNameFormat, *app1.Spec.DriverIngressOptions[0].ServicePort), + servicePort: *app1.Spec.DriverIngressOptions[0].ServicePort, + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo1", + }, + targetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: int32(*app1.Spec.DriverIngressOptions[0].ServicePort), + }, + }, + }, + expectedSelector: map[string]string{ + config.SparkAppNameLabel: "foo1", + config.SparkRoleLabel: config.SparkDriverRole, + }, + expectError: false, + }, + { + name: "service with default port", + app: app2, + expectedServices: []SparkService{ + { + serviceName: fmt.Sprintf(serviceNameFormat, app2.GetName(), *app2.Spec.DriverIngressOptions[0].ServicePort), + serviceType: apiv1.ServiceTypeClusterIP, + servicePortName: fmt.Sprintf(portNameFormat, *app2.Spec.DriverIngressOptions[0].ServicePort), + servicePort: int32(*app2.Spec.DriverIngressOptions[0].ServicePort), + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo2", + }, + }, + }, + expectedSelector: map[string]string{ + config.SparkAppNameLabel: "foo2", + config.SparkRoleLabel: config.SparkDriverRole, + }, + expectError: false, + }, + { + name: "service with custom serviceport and serviceport and target port are different", + app: app4, + expectedServices: []SparkService{ + { + serviceName: fmt.Sprintf(serviceNameFormat, app4.GetName(), *app4.Spec.DriverIngressOptions[0].ServicePort), + serviceType: apiv1.ServiceTypeClusterIP, + servicePortName: fmt.Sprintf(portNameFormat, *app4.Spec.DriverIngressOptions[0].ServicePort), + servicePort: 80, + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo4", + }, + targetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: int32(4041), + }, + }, + }, + expectedSelector: map[string]string{ + config.SparkAppNameLabel: "foo4", + config.SparkRoleLabel: config.SparkDriverRole, + }, + expectError: false, + }, + { + name: "service with custom servicetype", + app: app5, + expectedServices: []SparkService{ + { + serviceName: fmt.Sprintf(serviceNameFormat, app5.GetName(), *app5.Spec.DriverIngressOptions[0].ServicePort), + serviceType: apiv1.ServiceTypeNodePort, + servicePortName: fmt.Sprintf(portNameFormat, *app5.Spec.DriverIngressOptions[0].ServicePort), + servicePort: *app5.Spec.DriverIngressOptions[0].ServicePort, + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo5", + }, + }, + }, + expectedSelector: map[string]string{ + config.SparkAppNameLabel: "foo5", + config.SparkRoleLabel: config.SparkDriverRole, + }, + expectError: false, + }, + { + name: "service with custom serviceportname", + app: app6, + expectedServices: []SparkService{ + { + serviceName: fmt.Sprintf(serviceNameFormat, app6.GetName(), *app6.Spec.DriverIngressOptions[0].ServicePort), + serviceType: apiv1.ServiceTypeClusterIP, + servicePortName: "http-spark-test", + servicePort: int32(80), + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo6", + }, + }, + }, + expectedSelector: map[string]string{ + config.SparkAppNameLabel: "foo6", + config.SparkRoleLabel: config.SparkDriverRole, + }, + expectError: false, + }, + { + name: "service with annotation", + app: app7, + expectedServices: []SparkService{ + { + serviceName: fmt.Sprintf(serviceNameFormat, app7.GetName(), *app7.Spec.DriverIngressOptions[0].ServicePort), + serviceType: apiv1.ServiceTypeClusterIP, + servicePortName: fmt.Sprintf(portNameFormat, *app7.Spec.DriverIngressOptions[0].ServicePort), + servicePort: *app7.Spec.DriverIngressOptions[0].ServicePort, + serviceAnnotations: map[string]string{ + "key": "value", + }, + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo7", + }, + targetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: int32(4041), + }, + }, + }, + expectedSelector: map[string]string{ + config.SparkAppNameLabel: "foo7", + config.SparkRoleLabel: config.SparkDriverRole, + }, + expectError: false, + }, + { + name: "service with custom labels", + app: app8, + expectedServices: []SparkService{ + { + serviceName: fmt.Sprintf(serviceNameFormat, app8.GetName(), *app8.Spec.DriverIngressOptions[0].ServicePort), + serviceType: apiv1.ServiceTypeClusterIP, + servicePortName: fmt.Sprintf(portNameFormat, *app8.Spec.DriverIngressOptions[0].ServicePort), + servicePort: *app8.Spec.DriverIngressOptions[0].ServicePort, + serviceLabels: map[string]string{ + "sparkoperator.k8s.io/app-name": "foo8", + "key": "value", + }, + targetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: int32(4041), + }, + }, + }, + expectedSelector: map[string]string{ + config.SparkAppNameLabel: "foo8", + config.SparkRoleLabel: config.SparkDriverRole, + }, + expectError: false, + }, + { + name: "service with bad port configurations", + app: app3, + expectError: true, + expectedServices: []SparkService{{}}, + }, + } + for _, test := range testcases { + testFn(test, t) + } +} + +func TestCreateDriverIngress(t *testing.T) { + type testcase struct { + name string + app *v1beta2.SparkApplication + expectedIngresses []SparkIngress + expectError bool + } + + testFn := func(test testcase, t *testing.T, ingressURLFormat string, ingressClassName string) { + fakeClient := fake.NewSimpleClientset() + if len(test.expectedIngresses) != len(test.app.Spec.DriverIngressOptions) { + t.Errorf("%s: size of test.expectedIngresses (%d) and test.app.Spec.DriverIngressOptions (%d) is different for %s", + test.name, len(test.expectedIngresses), len(test.app.Spec.DriverIngressOptions), test.app.Name) + } + for i, driverIngressConfiguration := range test.app.Spec.DriverIngressOptions { + sparkService, err := createDriverIngressServiceFromConfiguration(test.app, &driverIngressConfiguration, fakeClient) + if err != nil { + t.Fatal(err) + } + ingressURL, err := getDriverIngressURL(ingressURLFormat, test.app.Name, test.app.Namespace) + if err != nil { + t.Fatal(err) + } + sparkIngress, err := createDriverIngress(test.app, &driverIngressConfiguration, *sparkService, ingressURL, ingressClassName, fakeClient) + if err != nil { + if test.expectError { + return + } + t.Fatal(err) + } + expectedIngress := test.expectedIngresses[i] + if sparkIngress.ingressName != expectedIngress.ingressName { + t.Errorf("Ingress name wanted %s got %s", expectedIngress.ingressName, sparkIngress.ingressName) + } + if sparkIngress.ingressURL.String() != expectedIngress.ingressURL.String() { + t.Errorf("Ingress URL wanted %s got %s", expectedIngress.ingressURL, sparkIngress.ingressURL) + } + ingress, err := fakeClient.NetworkingV1().Ingresses(test.app.Namespace). + Get(context.TODO(), sparkIngress.ingressName, metav1.GetOptions{}) + if err != nil { + t.Fatal(err) + } + if len(ingress.Annotations) != 0 { + for key, value := range ingress.Annotations { + if expectedIngress.annotations[key] != ingress.Annotations[key] { + t.Errorf("Expected annotation: %s=%s but found : %s=%s", key, value, key, ingress.Annotations[key]) + } + } + } + if len(ingress.Spec.TLS) != 0 { + for _, ingressTls := range ingress.Spec.TLS { + if ingressTls.Hosts[0] != expectedIngress.ingressTLS[0].Hosts[0] { + t.Errorf("Expected ingressTls host: %s but found : %s", expectedIngress.ingressTLS[0].Hosts[0], ingressTls.Hosts[0]) + } + if ingressTls.SecretName != expectedIngress.ingressTLS[0].SecretName { + t.Errorf("Expected ingressTls secretName: %s but found : %s", expectedIngress.ingressTLS[0].SecretName, ingressTls.SecretName) + } + } + } + if ingress.Labels[config.SparkAppNameLabel] != test.app.Name { + t.Errorf("Ingress of app %s has the wrong labels", test.app.Name) + } + + if len(ingress.Spec.Rules) != 1 { + t.Errorf("No Ingress rules found.") + } + ingressRule := ingress.Spec.Rules[0] + // If we have a path, then the ingress adds capture groups + if ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "" && ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "/" { + expectedIngress.ingressURL.Path = expectedIngress.ingressURL.Path + "(/|$)(.*)" + } + if ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path != expectedIngress.ingressURL.Host+expectedIngress.ingressURL.Path { + t.Errorf("Ingress of app %s has the wrong host %s", ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path, expectedIngress.ingressURL.Host+expectedIngress.ingressURL.Path) + } + + if len(ingressRule.IngressRuleValue.HTTP.Paths) != 1 { + t.Errorf("No Ingress paths found.") + } + ingressPath := ingressRule.IngressRuleValue.HTTP.Paths[0] + if ingressPath.Backend.Service.Name != sparkService.serviceName { + t.Errorf("Service name wanted %s got %s", sparkService.serviceName, ingressPath.Backend.Service.Name) + } + if *ingressPath.PathType != networkingv1.PathTypeImplementationSpecific { + t.Errorf("PathType wanted %s got %s", networkingv1.PathTypeImplementationSpecific, *ingressPath.PathType) + } + if ingressPath.Backend.Service.Port.Number != sparkService.servicePort { + t.Errorf("Service port wanted %v got %v", sparkService.servicePort, ingressPath.Backend.Service.Port.Number) + } + } + } + + ingressNameFormat := "%s-ing-%d" + var appPort int32 = 80 + app1 := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + UID: "foo-123", + }, + Spec: v1beta2.SparkApplicationSpec{ + DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ + { + ServicePort: &appPort, + }, + }, + }, + Status: v1beta2.SparkApplicationStatus{ + SparkApplicationID: "foo-1", + DriverInfo: v1beta2.DriverInfo{ + WebUIServiceName: "blah-service", + }, + }, + } + app2 := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + UID: "foo-123", + }, + Spec: v1beta2.SparkApplicationSpec{ + DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ + { + ServicePort: &appPort, + IngressAnnotations: map[string]string{ + "kubernetes.io/ingress.class": "nginx", + "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", + }, + }, + }, + }, + Status: v1beta2.SparkApplicationStatus{ + SparkApplicationID: "foo-1", + DriverInfo: v1beta2.DriverInfo{ + WebUIServiceName: "blah-service", + }, + }, + } + app3 := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + UID: "foo-123", + }, + Spec: v1beta2.SparkApplicationSpec{ + DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ + { + ServicePort: &appPort, + IngressAnnotations: map[string]string{ + "kubernetes.io/ingress.class": "nginx", + "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", + }, + IngressTLS: []networkingv1.IngressTLS{ + {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, + }, + }, + }, + }, + Status: v1beta2.SparkApplicationStatus{ + SparkApplicationID: "foo-1", + DriverInfo: v1beta2.DriverInfo{ + WebUIServiceName: "blah-service", + }, + }, + } + app4 := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + UID: "foo-123", + }, + Spec: v1beta2.SparkApplicationSpec{ + DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ + { + ServicePort: &appPort, + IngressAnnotations: map[string]string{ + "kubernetes.io/ingress.class": "nginx", + }, + IngressTLS: []networkingv1.IngressTLS{ + {Hosts: []string{"host1", "host2"}, SecretName: ""}, + }, + }, + }, + }, + Status: v1beta2.SparkApplicationStatus{ + SparkApplicationID: "foo-1", + DriverInfo: v1beta2.DriverInfo{ + WebUIServiceName: "blah-service", + }, + }, + } + + testcases := []testcase{ + { + name: "simple ingress object", + app: app1, + expectedIngresses: []SparkIngress{ + { + ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), + ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), + }, + }, + expectError: false, + }, + { + name: "ingress with annotations and without tls configuration", + app: app2, + expectedIngresses: []SparkIngress{ + { + ingressName: fmt.Sprintf(ingressNameFormat, app2.GetName(), *app2.Spec.DriverIngressOptions[0].ServicePort), + ingressURL: parseURLAndAssertError(app2.GetName()+".ingress.clusterName.com", t), + annotations: map[string]string{ + "kubernetes.io/ingress.class": "nginx", + "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", + }, + }, + }, + expectError: false, + }, + { + name: "ingress with annotations and tls configuration", + app: app3, + expectedIngresses: []SparkIngress{ + { + ingressName: fmt.Sprintf(ingressNameFormat, app3.GetName(), *app3.Spec.DriverIngressOptions[0].ServicePort), + ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), + annotations: map[string]string{ + "kubernetes.io/ingress.class": "nginx", + "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", + }, + ingressTLS: []networkingv1.IngressTLS{ + {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, + }, + }, + }, + expectError: false, + }, + { + name: "ingress with incomplete list of annotations", + app: app4, + expectedIngresses: []SparkIngress{ + { + ingressName: fmt.Sprintf(ingressNameFormat, app4.GetName(), *app4.Spec.DriverIngressOptions[0].ServicePort), + ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), + annotations: map[string]string{ + "kubernetes.io/ingress.class": "nginx", + "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", + }, + ingressTLS: []networkingv1.IngressTLS{ + {Hosts: []string{"host1", "host2"}, SecretName: ""}, + }, + }, + }, + expectError: true, + }, + } + + for _, test := range testcases { + testFn(test, t, "{{$appName}}.ingress.clusterName.com", "") + } + + testcases = []testcase{ + { + name: "simple ingress object with ingress URL Format with path", + app: app1, + expectedIngresses: []SparkIngress{ + { + ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), + ingressURL: parseURLAndAssertError("ingress.clusterName.com/"+app1.GetNamespace()+"/"+app1.GetName(), t), + annotations: map[string]string{ + "nginx.ingress.kubernetes.io/rewrite-target": "/$2", + }, + }, + }, + expectError: false, + }, + } + + for _, test := range testcases { + testFn(test, t, "ingress.clusterName.com/{{$appNamespace}}/{{$appName}}", "") + } + + testcases = []testcase{ + { + name: "simple ingress object with ingressClassName set", + app: app1, + expectedIngresses: []SparkIngress{ + { + ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), + ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), + ingressClassName: "nginx", + }, + }, + expectError: false, + }, + } + for _, test := range testcases { + testFn(test, t, "{{$appName}}.ingress.clusterName.com", "nginx") + } +} diff --git a/pkg/controller/sparkapplication/sparkui.go b/pkg/controller/sparkapplication/sparkui.go index 5ac64062e6..b247974da8 100644 --- a/pkg/controller/sparkapplication/sparkui.go +++ b/pkg/controller/sparkapplication/sparkui.go @@ -17,23 +17,13 @@ limitations under the License. package sparkapplication import ( - "context" "fmt" "net/url" - "regexp" "strconv" - "github.com/golang/glog" - - apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" - networkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" clientset "k8s.io/client-go/kubernetes" "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" "github.com/kubeflow/spark-operator/pkg/util" ) @@ -43,204 +33,13 @@ const ( defaultSparkWebUIPortName string = "spark-driver-ui-port" ) -var ingressAppNameURLRegex = regexp.MustCompile("{{\\s*[$]appName\\s*}}") -var ingressAppNamespaceURLRegex = regexp.MustCompile("{{\\s*[$]appNamespace\\s*}}") - -func getSparkUIingressURL(ingressURLFormat string, appName string, appNamespace string) (*url.URL, error) { - ingressURL := ingressAppNamespaceURLRegex.ReplaceAllString(ingressAppNameURLRegex.ReplaceAllString(ingressURLFormat, appName), appNamespace) - parsedURL, err := url.Parse(ingressURL) - if err != nil { - return nil, err - } - if parsedURL.Scheme == "" { - //url does not contain any scheme, adding http:// so url.Parse can function correctly - parsedURL, err = url.Parse("http://" + ingressURL) - if err != nil { - return nil, err - } - } - return parsedURL, nil -} - -// SparkService encapsulates information about the driver UI service. -type SparkService struct { - serviceName string - serviceType apiv1.ServiceType - servicePort int32 - servicePortName string - targetPort intstr.IntOrString - serviceIP string - serviceAnnotations map[string]string - serviceLabels map[string]string -} - -// SparkIngress encapsulates information about the driver UI ingress. -type SparkIngress struct { - ingressName string - ingressURL *url.URL - ingressClassName string - annotations map[string]string - ingressTLS []networkingv1.IngressTLS -} - func createSparkUIIngress(app *v1beta2.SparkApplication, service SparkService, ingressURL *url.URL, ingressClassName string, kubeClient clientset.Interface) (*SparkIngress, error) { + ingressName := getDefaultUIIngressName(app) if util.IngressCapabilities.Has("networking.k8s.io/v1") { - return createSparkUIIngress_v1(app, service, ingressURL, ingressClassName, kubeClient) + return createDriverIngress_v1(app, service, ingressName, ingressURL, ingressClassName, kubeClient) } else { - return createSparkUIIngress_legacy(app, service, ingressURL, kubeClient) - } -} - -func createSparkUIIngress_v1(app *v1beta2.SparkApplication, service SparkService, ingressURL *url.URL, ingressClassName string, kubeClient clientset.Interface) (*SparkIngress, error) { - ingressResourceAnnotations := getIngressResourceAnnotations(app) - ingressTlsHosts := getIngressTlsHosts(app) - - ingressURLPath := ingressURL.Path - // If we're serving on a subpath, we need to ensure we create capture groups - if ingressURLPath != "" && ingressURLPath != "/" { - ingressURLPath = ingressURLPath + "(/|$)(.*)" - } - - implementationSpecific := networkingv1.PathTypeImplementationSpecific - - ingress := networkingv1.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: getDefaultUIIngressName(app), - Namespace: app.Namespace, - Labels: getResourceLabels(app), - OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, - }, - Spec: networkingv1.IngressSpec{ - Rules: []networkingv1.IngressRule{{ - Host: ingressURL.Host, - IngressRuleValue: networkingv1.IngressRuleValue{ - HTTP: &networkingv1.HTTPIngressRuleValue{ - Paths: []networkingv1.HTTPIngressPath{{ - Backend: networkingv1.IngressBackend{ - Service: &networkingv1.IngressServiceBackend{ - Name: service.serviceName, - Port: networkingv1.ServiceBackendPort{ - Number: service.servicePort, - }, - }, - }, - Path: ingressURLPath, - PathType: &implementationSpecific, - }}, - }, - }, - }}, - }, - } - - if len(ingressResourceAnnotations) != 0 { - ingress.ObjectMeta.Annotations = ingressResourceAnnotations - } - - // If we're serving on a subpath, we need to ensure we use the capture groups - if ingressURL.Path != "" && ingressURL.Path != "/" { - if ingress.ObjectMeta.Annotations == nil { - ingress.ObjectMeta.Annotations = make(map[string]string) - } - ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/rewrite-target"] = "/$2" - } - if len(ingressTlsHosts) != 0 { - ingress.Spec.TLS = ingressTlsHosts - } - if len(ingressClassName) != 0 { - ingress.Spec.IngressClassName = &ingressClassName - } - - glog.Infof("Creating an Ingress %s for the Spark UI for application %s", ingress.Name, app.Name) - _, err := kubeClient.NetworkingV1().Ingresses(ingress.Namespace).Create(context.TODO(), &ingress, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - return &SparkIngress{ - ingressName: ingress.Name, - ingressURL: ingressURL, - ingressClassName: ingressClassName, - annotations: ingress.Annotations, - ingressTLS: ingressTlsHosts, - }, nil -} - -func createSparkUIIngress_legacy(app *v1beta2.SparkApplication, service SparkService, ingressURL *url.URL, kubeClient clientset.Interface) (*SparkIngress, error) { - ingressResourceAnnotations := getIngressResourceAnnotations(app) - // var ingressTlsHosts networkingv1.IngressTLS[] - // That we convert later for extensionsv1beta1, but return as is in SparkIngress - ingressTlsHosts := getIngressTlsHosts(app) - - ingressURLPath := ingressURL.Path - // If we're serving on a subpath, we need to ensure we create capture groups - if ingressURLPath != "" && ingressURLPath != "/" { - ingressURLPath = ingressURLPath + "(/|$)(.*)" - } - - ingress := extensions.Ingress{ - ObjectMeta: metav1.ObjectMeta{ - Name: getDefaultUIIngressName(app), - Namespace: app.Namespace, - Labels: getResourceLabels(app), - OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, - }, - Spec: extensions.IngressSpec{ - Rules: []extensions.IngressRule{{ - Host: ingressURL.Host, - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{{ - Backend: extensions.IngressBackend{ - ServiceName: service.serviceName, - ServicePort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: service.servicePort, - }, - }, - Path: ingressURLPath, - }}, - }, - }, - }}, - }, - } - - if len(ingressResourceAnnotations) != 0 { - ingress.ObjectMeta.Annotations = ingressResourceAnnotations - } - - // If we're serving on a subpath, we need to ensure we use the capture groups - if ingressURL.Path != "" && ingressURL.Path != "/" { - if ingress.ObjectMeta.Annotations == nil { - ingress.ObjectMeta.Annotations = make(map[string]string) - } - ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/rewrite-target"] = "/$2" - } - if len(ingressTlsHosts) != 0 { - ingress.Spec.TLS = convertIngressTlsHostsToLegacy(ingressTlsHosts) - } - glog.Infof("Creating an extensions/v1beta1 Ingress %s for the Spark UI for application %s", ingress.Name, app.Name) - _, err := kubeClient.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.TODO(), &ingress, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - return &SparkIngress{ - ingressName: ingress.Name, - ingressURL: ingressURL, - annotations: ingress.Annotations, - ingressTLS: ingressTlsHosts, - }, nil -} - -func convertIngressTlsHostsToLegacy(ingressTlsHosts []networkingv1.IngressTLS) []extensions.IngressTLS { - var ingressTlsHosts_legacy []extensions.IngressTLS - for _, ingressTlsHost := range ingressTlsHosts { - ingressTlsHosts_legacy = append(ingressTlsHosts_legacy, extensions.IngressTLS{ - Hosts: ingressTlsHost.Hosts, - SecretName: ingressTlsHost.SecretName, - }) + return createDriverIngress_legacy(app, service, ingressName, ingressURL, kubeClient) } - return ingressTlsHosts_legacy } func createSparkUIService( @@ -255,59 +54,11 @@ func createSparkUIService( if err != nil { return nil, fmt.Errorf("invalid Spark UI targetPort: %d", tPort) } - service := &apiv1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: getDefaultUIServiceName(app), - Namespace: app.Namespace, - Labels: getResourceLabels(app), - OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, - }, - Spec: apiv1.ServiceSpec{ - Ports: []apiv1.ServicePort{ - { - Name: portName, - Port: port, - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: tPort, - }, - }, - }, - Selector: map[string]string{ - config.SparkAppNameLabel: app.Name, - config.SparkRoleLabel: config.SparkDriverRole, - }, - Type: getUIServiceType(app), - }, - } - + serviceName := getDefaultUIServiceName(app) + serviceType := getUIServiceType(app) serviceAnnotations := getServiceAnnotations(app) - if len(serviceAnnotations) != 0 { - service.ObjectMeta.Annotations = serviceAnnotations - } - serviceLabels := getServiceLabels(app) - if len(serviceLabels) != 0 { - glog.Infof("Creating a service labels %s for the Spark UI: %v", service.Name, &serviceLabels) - service.ObjectMeta.Labels = serviceLabels - } - - glog.Infof("Creating a service %s for the Spark UI for application %s", service.Name, app.Name) - service, err = kubeClient.CoreV1().Services(app.Namespace).Create(context.TODO(), service, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - - return &SparkService{ - serviceName: service.Name, - serviceType: service.Spec.Type, - servicePort: service.Spec.Ports[0].Port, - servicePortName: service.Spec.Ports[0].Name, - targetPort: service.Spec.Ports[0].TargetPort, - serviceIP: service.Spec.ClusterIP, - serviceAnnotations: serviceAnnotations, - serviceLabels: serviceLabels, - }, nil + return createDriverIngressService(app, portName, port, tPort, serviceName, serviceType, serviceAnnotations, serviceLabels, kubeClient) } // getWebUITargetPort attempts to get the Spark web UI port from configuration property spark.ui.port diff --git a/pkg/controller/sparkapplication/sparkui_test.go b/pkg/controller/sparkapplication/sparkui_test.go index 6122c88108..6427aa5304 100644 --- a/pkg/controller/sparkapplication/sparkui_test.go +++ b/pkg/controller/sparkapplication/sparkui_test.go @@ -400,7 +400,7 @@ func TestCreateSparkUIIngress(t *testing.T) { if err != nil { t.Fatal(err) } - ingressURL, err := getSparkUIingressURL(ingressURLFormat, test.app.Name, test.app.Namespace) + ingressURL, err := getDriverIngressURL(ingressURLFormat, test.app.Name, test.app.Namespace) if err != nil { t.Fatal(err) } From 3c753762ace8648c2eb273fc031917c59c396002 Mon Sep 17 00:00:00 2001 From: Aakcht Date: Sat, 1 Jun 2024 01:38:41 +0600 Subject: [PATCH 61/87] Chart: add POD_NAME env for leader election (#2039) Signed-off-by: aakcht --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 2 +- charts/spark-operator-chart/templates/deployment.yaml | 8 ++++++++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index ea9fd34809..b611737daf 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.3.0 +version: 1.3.1 appVersion: v1beta2-1.4.2-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 9dff10b3f6..b76bdfb249 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.3.0](https://img.shields.io/badge/Version-1.3.0-informational?style=flat-square) ![AppVersion: v1beta2-1.4.2-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.2--3.5.0-informational?style=flat-square) +![Version: 1.3.1](https://img.shields.io/badge/Version-1.3.1-informational?style=flat-square) ![AppVersion: v1beta2-1.4.2-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.2--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml index 4f1f552e59..f318c27f4b 100644 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ b/charts/spark-operator-chart/templates/deployment.yaml @@ -48,6 +48,14 @@ spec: - name: {{ .Chart.Name }} image: {{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if gt (int .Values.replicaCount) 1 }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- end }} envFrom: {{- toYaml .Values.envFrom | nindent 10 }} securityContext: From 5a5983b1ae3fc760ca5b9a2bab3549a7ee130b31 Mon Sep 17 00:00:00 2001 From: Yuri Niitsuma Date: Fri, 31 May 2024 16:40:41 -0300 Subject: [PATCH 62/87] :memo: Add Inter&Co to who-is-using.md (#2040) Add [Inter&Co](https://inter.co/) to who is using the SparkOperator. Signed-off-by: Yuri Niitsuma --- docs/who-is-using.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/who-is-using.md b/docs/who-is-using.md index 18a0caa12c..04ed6cf755 100644 --- a/docs/who-is-using.md +++ b/docs/who-is-using.md @@ -45,3 +45,4 @@ | [Qualytics](https://www.qualytics.co/) | @josecsotomorales | Production | Data Quality Platform | | [Roblox](https://www.roblox.com/) | @matschaffer-roblox | Evaluation | Data Infrastructure | | [Rokt](https://www.rokt.com) | @jacobsalway | Production | Data Infrastructure | +| [Inter&Co](https://inter.co/) | @ignitz | Production | Data pipelines | From 05732225b6cea29646090ef6eee5f4182a4dd546 Mon Sep 17 00:00:00 2001 From: Mark Schroering Date: Tue, 4 Jun 2024 01:02:02 -0400 Subject: [PATCH 63/87] Add restartPolicy field to SparkApplication Driver/Executor initContainers CRDs (#2022) * Add restartPolicy field to SparkApplication CRDs Signed-off-by: Mark Schroering * Add restartPolicy field to SparkApplication CRDs Signed-off-by: Mark Schroering * Add restartPolicy field to SparkApplication CRDs Signed-off-by: Mark Schroering --------- Signed-off-by: Mark Schroering --- .../crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml | 4 ++++ .../crds/sparkoperator.k8s.io_sparkapplications.yaml | 4 ++++ .../crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml | 4 ++++ manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml | 4 ++++ 4 files changed, 16 insertions(+) diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index fe4ba87fee..60e836b083 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -917,6 +917,8 @@ spec: x-kubernetes-int-or-string: true type: object type: object + restartPolicy: + type: string securityContext: properties: seccompProfile: @@ -2748,6 +2750,8 @@ spec: x-kubernetes-int-or-string: true type: object type: object + restartPolicy: + type: string securityContext: properties: seccompProfile: diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml index 927424177d..c67bb2afaa 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -903,6 +903,8 @@ spec: x-kubernetes-int-or-string: true type: object type: object + restartPolicy: + type: string securityContext: properties: seccompProfile: @@ -2734,6 +2736,8 @@ spec: x-kubernetes-int-or-string: true type: object type: object + restartPolicy: + type: string securityContext: properties: seccompProfile: diff --git a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index fe4ba87fee..60e836b083 100644 --- a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -917,6 +917,8 @@ spec: x-kubernetes-int-or-string: true type: object type: object + restartPolicy: + type: string securityContext: properties: seccompProfile: @@ -2748,6 +2750,8 @@ spec: x-kubernetes-int-or-string: true type: object type: object + restartPolicy: + type: string securityContext: properties: seccompProfile: diff --git a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml index 927424177d..c67bb2afaa 100644 --- a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -903,6 +903,8 @@ spec: x-kubernetes-int-or-string: true type: object type: object + restartPolicy: + type: string securityContext: properties: seccompProfile: @@ -2734,6 +2736,8 @@ spec: x-kubernetes-int-or-string: true type: object type: object + restartPolicy: + type: string securityContext: properties: seccompProfile: From 2219563612395f4d72e74ac23cffd06dd14f8336 Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Wed, 5 Jun 2024 10:11:03 +0800 Subject: [PATCH 64/87] Bump appVersion to v1beta2-1.5.0-3.5.0 (#2044) Signed-off-by: Yi Chen --- charts/spark-operator-chart/Chart.yaml | 4 ++-- charts/spark-operator-chart/README.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index b611737daf..acfd82d22b 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.3.1 -appVersion: v1beta2-1.4.2-3.5.0 +version: 1.3.2 +appVersion: v1beta2-1.5.0-3.5.0 keywords: - spark home: https://github.com/kubeflow/spark-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index b76bdfb249..6e4d34ee33 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.3.1](https://img.shields.io/badge/Version-1.3.1-informational?style=flat-square) ![AppVersion: v1beta2-1.4.2-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.2--3.5.0-informational?style=flat-square) +![Version: 1.3.2](https://img.shields.io/badge/Version-1.3.2-informational?style=flat-square) ![AppVersion: v1beta2-1.5.0-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.5.0--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator From 089546e6bf41dd32eabf4ea5e91e887a453ae8bf Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Wed, 5 Jun 2024 10:12:02 +0800 Subject: [PATCH 65/87] Add ChenYi015 as spark-operator reviewer (#2045) Signed-off-by: Yi Chen --- OWNERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/OWNERS b/OWNERS index dc9b5fe9f0..17604590b2 100644 --- a/OWNERS +++ b/OWNERS @@ -3,3 +3,5 @@ approvers: - mwielgus - yuchaoran2011 - vara-bonthu +reviewers: + - ChenYi015 From 5ce3dbacff76bba364055b9b786110f1a4ab3174 Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Wed, 5 Jun 2024 22:39:03 +0800 Subject: [PATCH 66/87] Certifictes are generated by operator rather than gencerts.sh (#2016) Signed-off-by: Yi Chen --- Dockerfile | 4 +- Makefile | 2 +- charts/spark-operator-chart/Chart.yaml | 4 +- charts/spark-operator-chart/README.md | 10 +- .../templates/deployment.yaml | 27 +-- .../spark-operator-chart/templates/rbac.yaml | 19 +- .../templates/serviceaccount.yaml | 11 +- .../templates/webhook-cleanup-job.yaml | 62 ------ .../templates/webhook-init-job.yaml | 52 ----- .../templates/webhook/_helpers.tpl | 14 ++ .../templates/webhook/secret.yaml | 13 ++ .../service.yaml} | 14 +- .../tests/deployment_test.yaml | 25 --- .../tests/webhook/secret_test.yaml | 31 +++ .../service_test.yaml} | 4 +- charts/spark-operator-chart/values.yaml | 29 --- hack/gencerts.sh | 182 ----------------- pkg/util/cert.go | 70 +++++++ pkg/util/cert_test.go | 39 ++++ pkg/webhook/certs.go | 185 ++++++++++++------ pkg/webhook/certs_test.go | 118 +++++++++++ pkg/webhook/webhook.go | 114 +++++++---- 22 files changed, 530 insertions(+), 499 deletions(-) delete mode 100644 charts/spark-operator-chart/templates/webhook-cleanup-job.yaml delete mode 100644 charts/spark-operator-chart/templates/webhook-init-job.yaml create mode 100644 charts/spark-operator-chart/templates/webhook/_helpers.tpl create mode 100644 charts/spark-operator-chart/templates/webhook/secret.yaml rename charts/spark-operator-chart/templates/{webhook-service.yaml => webhook/service.yaml} (64%) create mode 100644 charts/spark-operator-chart/tests/webhook/secret_test.yaml rename charts/spark-operator-chart/tests/{webhook-service_test.yaml => webhook/service_test.yaml} (90%) delete mode 100755 hack/gencerts.sh create mode 100644 pkg/util/cert.go create mode 100644 pkg/util/cert_test.go create mode 100644 pkg/webhook/certs_test.go diff --git a/Dockerfile b/Dockerfile index 1d696970a3..c126cca210 100644 --- a/Dockerfile +++ b/Dockerfile @@ -40,9 +40,9 @@ USER root COPY --from=builder /usr/bin/spark-operator /usr/bin/ RUN apt-get update --allow-releaseinfo-change \ && apt-get update \ - && apt-get install -y openssl curl tini \ + && apt-get install -y tini \ && rm -rf /var/lib/apt/lists/* -COPY hack/gencerts.sh /usr/bin/ COPY entrypoint.sh /usr/bin/ + ENTRYPOINT ["/usr/bin/entrypoint.sh"] diff --git a/Makefile b/Makefile index d8947d5634..b7ae2b3781 100644 --- a/Makefile +++ b/Makefile @@ -45,7 +45,7 @@ build-api-docs: -out-file /repo/docs/api-docs.md" helm-unittest: - helm unittest charts/spark-operator-chart --strict + helm unittest charts/spark-operator-chart --strict --file "tests/**/*_test.yaml" helm-lint: docker run --rm --workdir /workspace --volume "$$(pwd):/workspace" quay.io/helmpack/chart-testing:latest ct lint diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index acfd82d22b..9107161ade 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.3.2 -appVersion: v1beta2-1.5.0-3.5.0 +version: 1.4.0 +appVersion: v1beta2-1.6.0-3.5.0 keywords: - spark home: https://github.com/kubeflow/spark-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 6e4d34ee33..b84e963604 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.3.2](https://img.shields.io/badge/Version-1.3.2-informational?style=flat-square) ![AppVersion: v1beta2-1.5.0-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.5.0--3.5.0-informational?style=flat-square) +![Version: 1.4.0](https://img.shields.io/badge/Version-1.4.0-informational?style=flat-square) ![AppVersion: v1beta2-1.6.0-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.0--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator @@ -131,17 +131,11 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum | uiService.enable | bool | `true` | Enable UI service creation for Spark application | | volumeMounts | list | `[]` | | | volumes | list | `[]` | | -| webhook.cleanupAnnotations | object | `{"helm.sh/hook":"pre-delete, pre-upgrade","helm.sh/hook-delete-policy":"hook-succeeded"}` | The annotations applied to the cleanup job, required for helm lifecycle hooks | -| webhook.cleanupPodLabels | object | `{}` | The podLabels applied to the pod of the cleanup job | -| webhook.cleanupResources | object | `{}` | Resources applied to cleanup job | | webhook.enable | bool | `false` | Enable webhook server | -| webhook.initAnnotations | object | `{"helm.sh/hook":"pre-install, pre-upgrade","helm.sh/hook-weight":"50"}` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | -| webhook.initPodLabels | object | `{}` | The podLabels applied to the pod of the init job | -| webhook.initResources | object | `{}` | Resources applied to init job | | webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | | webhook.port | int | `8080` | Webhook service port | | webhook.portName | string | `"webhook"` | Webhook container port name and service target port name | -| webhook.timeout | int | `30` | | +| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | ## Maintainers diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml index f318c27f4b..485620fee6 100644 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ b/charts/spark-operator-chart/templates/deployment.yaml @@ -92,11 +92,13 @@ spec: {{- end }} {{- if .Values.webhook.enable }} - -enable-webhook=true + - -webhook-secret-name={{ include "spark-operator.webhookSecretName" . }} + - -webhook-secret-namespace={{ .Release.Namespace }} + - -webhook-svc-name={{ include "spark-operator.webhookServiceName" . }} - -webhook-svc-namespace={{ .Release.Namespace }} + - -webhook-config-name={{ include "spark-operator.fullname" . }}-webhook-config - -webhook-port={{ .Values.webhook.port }} - -webhook-timeout={{ .Values.webhook.timeout }} - - -webhook-svc-name={{ include "spark-operator.fullname" . }}-webhook - - -webhook-config-name={{ include "spark-operator.fullname" . }}-webhook-config - -webhook-namespace-selector={{ .Values.webhook.namespaceSelector }} {{- end }} - -enable-resource-quota-enforcement={{ .Values.resourceQuotaEnforcement.enable }} @@ -109,30 +111,17 @@ spec: resources: {{- toYaml . | nindent 10 }} {{- end }} - {{- if or .Values.webhook.enable (ne (len .Values.volumeMounts) 0 ) }} - volumeMounts: - {{- end }} - {{- if .Values.webhook.enable }} - - name: webhook-certs - mountPath: /etc/webhook-certs - {{- end }} {{- with .Values.volumeMounts }} - {{- toYaml . | nindent 10 }} + volumeMounts: + {{- toYaml . | nindent 10 }} {{- end }} {{- with .Values.sidecars }} {{- toYaml . | nindent 6 }} {{- end }} - {{- if or .Values.webhook.enable (ne (len .Values.volumes) 0 ) }} + {{- with .Values.volumes }} volumes: - {{- end }} - {{- if .Values.webhook.enable }} - - name: webhook-certs - secret: - secretName: {{ include "spark-operator.fullname" . }}-webhook-certs - {{- end }} - {{- with .Values.volumes }} {{- toYaml . | nindent 8 }} - {{- end }} + {{- end }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/charts/spark-operator-chart/templates/rbac.yaml b/charts/spark-operator-chart/templates/rbac.yaml index 3e9b227137..aa110ff497 100644 --- a/charts/spark-operator-chart/templates/rbac.yaml +++ b/charts/spark-operator-chart/templates/rbac.yaml @@ -3,15 +3,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: {{ include "spark-operator.fullname" . }} - annotations: - "helm.sh/hook": pre-install, pre-upgrade - "helm.sh/hook-delete-policy": hook-failed, before-hook-creation - "helm.sh/hook-weight": "-10" - {{- with .Values.rbac.annotations }} - {{- toYaml . | nindent 4 }} - {{- end }} labels: {{- include "spark-operator.labels" . | nindent 4 }} + {{- with .Values.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} rules: - apiGroups: - "" @@ -134,12 +131,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: {{ include "spark-operator.fullname" . }} - annotations: - "helm.sh/hook": pre-install, pre-upgrade - "helm.sh/hook-delete-policy": hook-failed, before-hook-creation - "helm.sh/hook-weight": "-10" labels: {{- include "spark-operator.labels" . | nindent 4 }} + {{- with .Values.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} subjects: - kind: ServiceAccount name: {{ include "spark-operator.serviceAccountName" . }} diff --git a/charts/spark-operator-chart/templates/serviceaccount.yaml b/charts/spark-operator-chart/templates/serviceaccount.yaml index 2a5e4f208f..a75f231901 100644 --- a/charts/spark-operator-chart/templates/serviceaccount.yaml +++ b/charts/spark-operator-chart/templates/serviceaccount.yaml @@ -3,13 +3,10 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ include "spark-operator.serviceAccountName" . }} - annotations: - "helm.sh/hook": pre-install, pre-upgrade - "helm.sh/hook-delete-policy": hook-failed, before-hook-creation - "helm.sh/hook-weight": "-10" -{{- with .Values.serviceAccounts.sparkoperator.annotations }} -{{ toYaml . | indent 4 }} -{{- end }} labels: {{- include "spark-operator.labels" . | nindent 4 }} + {{- with .Values.serviceAccounts.sparkoperator.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} {{- end }} diff --git a/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml b/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml deleted file mode 100644 index f115d955c5..0000000000 --- a/charts/spark-operator-chart/templates/webhook-cleanup-job.yaml +++ /dev/null @@ -1,62 +0,0 @@ -{{ if .Values.webhook.enable }} -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ include "spark-operator.fullname" . }}-webhook-cleanup - annotations: - {{- toYaml .Values.webhook.cleanupAnnotations | nindent 4 }} - labels: - {{- include "spark-operator.labels" . | nindent 4 }} -spec: - template: - metadata: - name: {{ include "spark-operator.fullname" . }}-webhook-cleanup - {{- if .Values.istio.enabled }} - annotations: - "sidecar.istio.io/inject": "false" - {{- end }} - {{- if .Values.webhook.cleanupPodLabels }} - labels: - {{- toYaml .Values.webhook.cleanupPodLabels | nindent 8 }} - {{- end }} - spec: - serviceAccountName: {{ include "spark-operator.serviceAccountName" . }} - restartPolicy: OnFailure - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - containers: - - name: clean-secret - image: {{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - securityContext: - {{- toYaml .Values.securityContext | nindent 10 }} - command: - - "/bin/sh" - - "-c" - - "curl -ik \ - -X DELETE \ - -H \"Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)\" \ - -H \"Accept: application/json\" \ - -H \"Content-Type: application/json\" \ - https://kubernetes.default.svc/api/v1/namespaces/{{ .Release.Namespace }}/secrets/{{ include "spark-operator.fullname" . }}-webhook-certs \ - && \ - curl -ik \ - -X DELETE \ - -H \"Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)\" \ - -H \"Accept: application/json\" \ - -H \"Content-Type: application/json\" \ - --data \"{\\\"kind\\\":\\\"DeleteOptions\\\",\\\"apiVersion\\\":\\\"batch/v1\\\",\\\"propagationPolicy\\\":\\\"Foreground\\\"}\" \ - https://kubernetes.default.svc/apis/batch/v1/namespaces/{{ .Release.Namespace }}/jobs/{{ include "spark-operator.fullname" . }}-webhook-init" - resources: - {{- toYaml .Values.webhook.cleanupResources | nindent 10 }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} -{{ end }} diff --git a/charts/spark-operator-chart/templates/webhook-init-job.yaml b/charts/spark-operator-chart/templates/webhook-init-job.yaml deleted file mode 100644 index 09f398c4c1..0000000000 --- a/charts/spark-operator-chart/templates/webhook-init-job.yaml +++ /dev/null @@ -1,52 +0,0 @@ -{{ if .Values.webhook.enable }} -apiVersion: batch/v1 -kind: Job -metadata: - name: {{ include "spark-operator.fullname" . }}-webhook-init - annotations: - {{- toYaml .Values.webhook.initAnnotations | nindent 4 }} - labels: - {{- include "spark-operator.labels" . | nindent 4 }} -spec: - template: - metadata: - name: {{ include "spark-operator.fullname" . }}-webhook-init - {{- if .Values.istio.enabled }} - annotations: - "sidecar.istio.io/inject": "false" - {{- end }} - {{- if .Values.webhook.initPodLabels }} - labels: - {{- toYaml .Values.webhook.initPodLabels | nindent 8 }} - {{- end }} - spec: - serviceAccountName: {{ include "spark-operator.serviceAccountName" . }} - restartPolicy: OnFailure - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - containers: - - name: main - image: {{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - securityContext: - {{- toYaml .Values.securityContext | nindent 10 }} - command: [ - "/usr/bin/gencerts.sh", - "-n", "{{ .Release.Namespace }}", - "-s", "{{ include "spark-operator.fullname" . }}-webhook", - "-r", "{{ include "spark-operator.fullname" . }}-webhook-certs", - "-p" - ] - resources: - {{- toYaml .Values.webhook.initResources | nindent 10 }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} -{{ end }} diff --git a/charts/spark-operator-chart/templates/webhook/_helpers.tpl b/charts/spark-operator-chart/templates/webhook/_helpers.tpl new file mode 100644 index 0000000000..9600011294 --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/_helpers.tpl @@ -0,0 +1,14 @@ +{{/* +Create the name of the secret to be used by webhook +*/}} +{{- define "spark-operator.webhookSecretName" -}} +{{ include "spark-operator.fullname" . }}-webhook-certs +{{- end -}} + + +{{/* +Create the name of the service to be used by webhook +*/}} +{{- define "spark-operator.webhookServiceName" -}} +{{ include "spark-operator.fullname" . }}-webhook-svc +{{- end -}} diff --git a/charts/spark-operator-chart/templates/webhook/secret.yaml b/charts/spark-operator-chart/templates/webhook/secret.yaml new file mode 100644 index 0000000000..672738f2c0 --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/secret.yaml @@ -0,0 +1,13 @@ +{{- if .Values.webhook.enable -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "spark-operator.webhookSecretName" . }} + labels: + {{- include "spark-operator.labels" . | nindent 4 }} +data: + ca-key.pem: "" + ca-cert.pem: "" + server-key.pem: "" + server-cert.pem: "" +{{- end }} diff --git a/charts/spark-operator-chart/templates/webhook-service.yaml b/charts/spark-operator-chart/templates/webhook/service.yaml similarity index 64% rename from charts/spark-operator-chart/templates/webhook-service.yaml rename to charts/spark-operator-chart/templates/webhook/service.yaml index a26375db67..e31f8236b5 100644 --- a/charts/spark-operator-chart/templates/webhook-service.yaml +++ b/charts/spark-operator-chart/templates/webhook/service.yaml @@ -1,15 +1,15 @@ -{{ if .Values.webhook.enable }} -kind: Service +{{- if .Values.webhook.enable -}} apiVersion: v1 +kind: Service metadata: - name: {{ include "spark-operator.fullname" . }}-webhook + name: {{ include "spark-operator.webhookServiceName" . }} labels: {{- include "spark-operator.labels" . | nindent 4 }} spec: + selector: + {{- include "spark-operator.selectorLabels" . | nindent 4 }} ports: - port: 443 targetPort: {{ .Values.webhook.portName | quote }} - name: webhook - selector: - {{- include "spark-operator.selectorLabels" . | nindent 4 }} -{{ end }} + name: {{ .Values.webhook.portName }} +{{- end }} diff --git a/charts/spark-operator-chart/tests/deployment_test.yaml b/charts/spark-operator-chart/tests/deployment_test.yaml index 34393bd33e..5debda1932 100644 --- a/charts/spark-operator-chart/tests/deployment_test.yaml +++ b/charts/spark-operator-chart/tests/deployment_test.yaml @@ -160,31 +160,6 @@ tests: memory: "128Mi" cpu: "500m" - - it: Should add webhook certs volume if webhook.enable is true - set: - webhook: - enable: true - asserts: - - contains: - path: spec.template.spec.volumes - content: - name: webhook-certs - secret: - secretName: spark-operator-webhook-certs - count: 1 - - - it: Should add webhook certs volume mounts if webhook.enable is true - set: - webhook: - enable: true - asserts: - - contains: - path: spec.template.spec.containers[0].volumeMounts - content: - name: webhook-certs - mountPath: /etc/webhook-certs - count: 1 - - it: Should add sidecars if sidecars is set set: sidecars: diff --git a/charts/spark-operator-chart/tests/webhook/secret_test.yaml b/charts/spark-operator-chart/tests/webhook/secret_test.yaml new file mode 100644 index 0000000000..0e9c3b4cfd --- /dev/null +++ b/charts/spark-operator-chart/tests/webhook/secret_test.yaml @@ -0,0 +1,31 @@ +suite: Test spark operator webhook secret + +templates: + - webhook/secret.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not render the webhook secret if webhook.enable is false + asserts: + - hasDocuments: + count: 0 + + - it: Should render the webhook secret with empty data fields + set: + webhook: + enable: true + asserts: + - containsDocument: + apiVersion: v1 + kind: Secret + name: spark-operator-webhook-certs + - equal: + path: data + value: + ca-key.pem: "" + ca-cert.pem: "" + server-key.pem: "" + server-cert.pem: "" diff --git a/charts/spark-operator-chart/tests/webhook-service_test.yaml b/charts/spark-operator-chart/tests/webhook/service_test.yaml similarity index 90% rename from charts/spark-operator-chart/tests/webhook-service_test.yaml rename to charts/spark-operator-chart/tests/webhook/service_test.yaml index 4b57acdf62..d3b6b1cc26 100644 --- a/charts/spark-operator-chart/tests/webhook-service_test.yaml +++ b/charts/spark-operator-chart/tests/webhook/service_test.yaml @@ -1,7 +1,7 @@ suite: Test spark operator webhook service templates: - - webhook-service.yaml + - webhook/service.yaml release: name: spark-operator @@ -24,7 +24,7 @@ tests: - containsDocument: apiVersion: v1 kind: Service - name: spark-operator-webhook + name: spark-operator-webhook-svc - equal: path: spec.ports[0] value: diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index 2cbd088b33..1dd9a5b15d 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -104,35 +104,6 @@ webhook: # Empty string (default) will operate on all namespaces namespaceSelector: "" # -- The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade - initAnnotations: - "helm.sh/hook": pre-install, pre-upgrade - "helm.sh/hook-weight": "50" - # -- The podLabels applied to the pod of the init job - initPodLabels: {} - # -- Resources applied to init job - initResources: {} - # limits: - # cpu: 100m - # memory: 300Mi - # requests: - # cpu: 100m - # memory: 300Mi - # -- The annotations applied to the cleanup job, required for helm lifecycle hooks - cleanupAnnotations: - "helm.sh/hook": pre-delete, pre-upgrade - "helm.sh/hook-delete-policy": hook-succeeded - # -- Webhook Timeout in seconds - # -- The podLabels applied to the pod of the cleanup job - cleanupPodLabels: {} - # -- Resources applied to cleanup job - cleanupResources: {} - # limits: - # cpu: 100m - # memory: 300Mi - # requests: - # cpu: 100m - # memory: 300Mi - timeout: 30 metrics: diff --git a/hack/gencerts.sh b/hack/gencerts.sh deleted file mode 100755 index 14d3619e74..0000000000 --- a/hack/gencerts.sh +++ /dev/null @@ -1,182 +0,0 @@ -#!/bin/bash -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Generates a CA certificate, a server key, and a server certificate signed by the CA. - -set -e -SCRIPT=`basename ${BASH_SOURCE[0]}` -RESOURCE_NAME="spark-webhook-certs" - -function usage { - cat<< EOF - Usage: $SCRIPT - Options: - -h | --help Display help information. - -n | --namespace The namespace where the Spark operator is installed. - -s | --service The name of the webhook service. - -p | --in-pod Whether the script is running inside a pod or not. - -r | --resource-name The spark resource name that will hold the secret [default: $RESOURCE_NAME] -EOF -} - -function parse_arguments { - while [[ $# -gt 0 ]] - do - case "$1" in - -n|--namespace) - if [[ -n "$2" ]]; then - NAMESPACE="$2" - else - echo "-n or --namespace requires a value." - exit 1 - fi - shift 2 - continue - ;; - -s|--service) - if [[ -n "$2" ]]; then - SERVICE="$2" - else - echo "-s or --service requires a value." - exit 1 - fi - shift 2 - continue - ;; - -p|--in-pod) - export IN_POD=true - shift 1 - continue - ;; - -r|--resource-name) - if [[ -n "$2" ]]; then - RESOURCE_NAME="$2" - else - echo "-r or --resource-name requires a value." - exit 1 - fi - shift 2 - continue - ;; - -h|--help) - usage - exit 0 - ;; - --) # End of all options. - shift - break - ;; - '') # End of all options. - break - ;; - *) - echo "Unrecognized option: $1" - exit 1 - ;; - esac - done -} - -# Set the namespace to "sparkoperator" by default if not provided. -# Set the webhook service name to "spark-webhook" by default if not provided. -IN_POD=false -SERVICE="spark-webhook" -NAMESPACE="spark-operator" -parse_arguments "$@" - -TMP_DIR="/tmp/spark-pod-webhook-certs" - -echo "Generating certs for the Spark pod admission webhook in ${TMP_DIR}." -mkdir -p ${TMP_DIR} -cat > ${TMP_DIR}/server.conf << EOF -[req] -req_extensions = v3_req -distinguished_name = req_distinguished_name -[req_distinguished_name] -[ v3_req ] -basicConstraints = CA:FALSE -keyUsage = nonRepudiation, digitalSignature, keyEncipherment -extendedKeyUsage = clientAuth, serverAuth -subjectAltName = DNS:${SERVICE}.${NAMESPACE}.svc -EOF - -# Create a certificate authority. -touch ${TMP_DIR}/.rnd -export RANDFILE=${TMP_DIR}/.rnd -openssl genrsa -out ${TMP_DIR}/ca-key.pem 2048 -openssl req -x509 -new -nodes -key ${TMP_DIR}/ca-key.pem -days 100000 -out ${TMP_DIR}/ca-cert.pem -subj "/CN=${SERVICE}.${NAMESPACE}.svc" - -# Create a server certificate. -openssl genrsa -out ${TMP_DIR}/server-key.pem 2048 -# Note the CN is the DNS name of the service of the webhook. -openssl req -new -key ${TMP_DIR}/server-key.pem -out ${TMP_DIR}/server.csr -subj "/CN=${SERVICE}.${NAMESPACE}.svc" -config ${TMP_DIR}/server.conf -openssl x509 -req -in ${TMP_DIR}/server.csr -CA ${TMP_DIR}/ca-cert.pem -CAkey ${TMP_DIR}/ca-key.pem -CAcreateserial -out ${TMP_DIR}/server-cert.pem -days 100000 -extensions v3_req -extfile ${TMP_DIR}/server.conf - -if [[ "$IN_POD" == "true" ]]; then - TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) - - # Base64 encode secrets and then remove the trailing newline to avoid issues in the curl command - ca_cert=$(cat ${TMP_DIR}/ca-cert.pem | base64 | tr -d '\n') - ca_key=$(cat ${TMP_DIR}/ca-key.pem | base64 | tr -d '\n') - server_cert=$(cat ${TMP_DIR}/server-cert.pem | base64 | tr -d '\n') - server_key=$(cat ${TMP_DIR}/server-key.pem | base64 | tr -d '\n') - - # Create the secret resource - echo "Creating a secret for the certificate and keys" - STATUS=$(curl -ik \ - -o ${TMP_DIR}/output \ - -w "%{http_code}" \ - -X POST \ - -H "Authorization: Bearer $TOKEN" \ - -H 'Accept: application/json' \ - -H 'Content-Type: application/json' \ - -d '{ - "kind": "Secret", - "apiVersion": "v1", - "metadata": { - "name": "'"$RESOURCE_NAME"'", - "namespace": "'"$NAMESPACE"'" - }, - "data": { - "ca-cert.pem": "'"$ca_cert"'", - "ca-key.pem": "'"$ca_key"'", - "server-cert.pem": "'"$server_cert"'", - "server-key.pem": "'"$server_key"'" - } - }' \ - https://kubernetes.default.svc/api/v1/namespaces/${NAMESPACE}/secrets) - - cat ${TMP_DIR}/output - - case "$STATUS" in - 201) - printf "\nSuccess - secret created.\n" - ;; - 409) - printf "\nSuccess - secret already exists.\n" - ;; - *) - printf "\nFailed creating secret.\n" - exit 1 - ;; - esac -else - kubectl create secret --namespace=${NAMESPACE} generic ${RESOURCE_NAME} --from-file=${TMP_DIR}/ca-key.pem --from-file=${TMP_DIR}/ca-cert.pem --from-file=${TMP_DIR}/server-key.pem --from-file=${TMP_DIR}/server-cert.pem -fi - -# Clean up after we're done. -printf "\nDeleting ${TMP_DIR}.\n" -rm -rf ${TMP_DIR} diff --git a/pkg/util/cert.go b/pkg/util/cert.go new file mode 100644 index 0000000000..37188f3a34 --- /dev/null +++ b/pkg/util/cert.go @@ -0,0 +1,70 @@ +package util + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "math" + "math/big" + "time" + + "k8s.io/client-go/util/cert" +) + +const ( + RSAKeySize = 2048 +) + +func NewPrivateKey() (*rsa.PrivateKey, error) { + key, err := rsa.GenerateKey(rand.Reader, RSAKeySize) + if err != nil { + return nil, fmt.Errorf("failed to generate private key: %v", err) + } + return key, nil +} + +func NewSignedServerCert(cfg cert.Config, caKey *rsa.PrivateKey, caCert *x509.Certificate, serverKey *rsa.PrivateKey) (*x509.Certificate, error) { + // Generate a random serial number in [1, max). + serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64-1)) + if err != nil { + return nil, fmt.Errorf("failed to generate serial number: %v", err) + } + serial.Add(serial, big.NewInt(1)) + + now := time.Now() + notBefore := now.UTC() + if !cfg.NotBefore.IsZero() { + notBefore = cfg.NotBefore.UTC() + } + + // Create a certificate template for webhook server + certTmpl := x509.Certificate{ + SerialNumber: serial, + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + DNSNames: cfg.AltNames.DNSNames, + IPAddresses: cfg.AltNames.IPs, + NotBefore: notBefore, + NotAfter: now.AddDate(10, 0, 0), + KeyUsage: x509.KeyUsageContentCommitment | x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + IsCA: false, + } + + certBytes, err := x509.CreateCertificate(rand.Reader, &certTmpl, caCert, serverKey.Public(), caKey) + if err != nil { + return nil, fmt.Errorf("failed to generate certificate: %v", err) + } + + serverCert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %v", err) + } + + return serverCert, nil +} diff --git a/pkg/util/cert_test.go b/pkg/util/cert_test.go new file mode 100644 index 0000000000..700bc234d4 --- /dev/null +++ b/pkg/util/cert_test.go @@ -0,0 +1,39 @@ +package util + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "testing" + "time" + + "k8s.io/client-go/util/cert" +) + +func TestNewPrivateKey(t *testing.T) { + _, err := NewPrivateKey() + if err != nil { + t.Errorf("failed to generate private key: %v", err) + } +} + +func TestNewSignedServerCert(t *testing.T) { + cfg := cert.Config{ + CommonName: "test-server", + Organization: []string{"test-org"}, + NotBefore: time.Now(), + } + + caKey, _ := rsa.GenerateKey(rand.Reader, RSAKeySize) + caCert := &x509.Certificate{} + serverKey, _ := rsa.GenerateKey(rand.Reader, RSAKeySize) + + serverCert, err := NewSignedServerCert(cfg, caKey, caCert, serverKey) + if err != nil { + t.Errorf("failed to generate signed server certificate: %v", err) + } + + if serverCert == nil { + t.Error("server certificate is nil") + } +} diff --git a/pkg/webhook/certs.go b/pkg/webhook/certs.go index b5b61c3ed1..75e0668332 100644 --- a/pkg/webhook/certs.go +++ b/pkg/webhook/certs.go @@ -17,83 +17,154 @@ limitations under the License. package webhook import ( + "crypto/rsa" "crypto/tls" - "io/ioutil" - "sync" - "time" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "net" - "github.com/golang/glog" + "k8s.io/client-go/util/cert" + + "github.com/kubeflow/spark-operator/pkg/util" +) + +const ( + Organization = "spark-operator" ) // certProvider is a container of a X509 certificate file and a corresponding key file for the // webhook server, and a CA certificate file for the API server to verify the server certificate. type certProvider struct { - serverCertFile string - serverKeyFile string - caCertFile string - reloadInterval time.Duration - ticker *time.Ticker - stopChannel chan interface{} - currentCert *tls.Certificate - certPointerMutex *sync.RWMutex + caKey *rsa.PrivateKey + caCert *x509.Certificate + serverKey *rsa.PrivateKey + serverCert *x509.Certificate } -func NewCertProvider(serverCertFile, serverKeyFile, caCertFile string, reloadInterval time.Duration) (*certProvider, error) { - cert, err := tls.LoadX509KeyPair(serverCertFile, serverKeyFile) +// NewCertProvider creates a new CertProvider instance. +func NewCertProvider(name, namespace string) (*certProvider, error) { + commonName := fmt.Sprintf("%s.%s.svc", name, namespace) + + // Generate CA private caKey + caKey, err := util.NewPrivateKey() if err != nil { - return nil, err - } - return &certProvider{ - serverCertFile: serverCertFile, - serverKeyFile: serverKeyFile, - caCertFile: caCertFile, - reloadInterval: reloadInterval, - currentCert: &cert, - stopChannel: make(chan interface{}), - ticker: time.NewTicker(reloadInterval), - certPointerMutex: &sync.RWMutex{}, - }, nil + return nil, fmt.Errorf("failed to generate CA private key: %v", err) + } + + // Generate self-signed CA certificate + caCfg := cert.Config{ + CommonName: commonName, + Organization: []string{Organization}, + } + caCert, err := cert.NewSelfSignedCACert(caCfg, caKey) + if err != nil { + return nil, fmt.Errorf("failed to generate self-signed CA certificate: %v", err) + } + + // Generate server private key + serverKey, err := util.NewPrivateKey() + if err != nil { + return nil, fmt.Errorf("failed to generate server private key: %v", err) + } + + // Generate signed server certificate + var ips []net.IP + dnsNames := []string{"localhost"} + hostIP := net.ParseIP(commonName) + if hostIP.To4() != nil { + ips = append(ips, hostIP.To4()) + } else { + dnsNames = append(dnsNames, commonName) + } + serverCfg := cert.Config{ + CommonName: commonName, + Organization: []string{Organization}, + AltNames: cert.AltNames{IPs: ips, DNSNames: dnsNames}, + } + serverCert, err := util.NewSignedServerCert(serverCfg, caKey, caCert, serverKey) + if err != nil { + return nil, fmt.Errorf("failed to generate signed server certificate: %v", err) + } + + certProvider := certProvider{ + caKey: caKey, + caCert: caCert, + serverKey: serverKey, + serverCert: serverCert, + } + + return &certProvider, nil } -func (c *certProvider) Start() { - go func() { - for { - select { - case <-c.stopChannel: - return - case <-c.ticker.C: - c.updateCert() - } - } - }() +// CAKey returns the PEM-encoded CA private key. +func (cp *certProvider) CAKey() ([]byte, error) { + if cp.caKey == nil { + return nil, errors.New("CA key is not set") + } + data := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(cp.caKey), + }) + return data, nil } -func (c *certProvider) tlsConfig() *tls.Config { - return &tls.Config{ - GetCertificate: func(ch *tls.ClientHelloInfo) (*tls.Certificate, error) { - c.certPointerMutex.RLock() - defer c.certPointerMutex.RUnlock() - return c.currentCert, nil - }, +// CACert returns the PEM-encoded CA certificate. +func (cp *certProvider) CACert() ([]byte, error) { + if cp.caCert == nil { + return nil, errors.New("CA certificate is not set") } + data := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: cp.serverCert.Raw, + }) + return data, nil } -func (c *certProvider) Stop() { - close(c.stopChannel) - c.ticker.Stop() +// ServerKey returns the PEM-encoded server private key. +func (cp *certProvider) ServerKey() ([]byte, error) { + if cp.serverKey == nil { + return nil, errors.New("server key is not set") + } + data := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(cp.serverKey), + }) + return data, nil } -func (c *certProvider) updateCert() { - cert, err := tls.LoadX509KeyPair(c.serverCertFile, c.serverKeyFile) - if err != nil { - glog.Errorf("could not reload certificate %s (key %s): %v", c.serverCertFile, c.serverKeyFile, err) - return +// ServerCert returns the PEM-encoded server cert. +func (cp *certProvider) ServerCert() ([]byte, error) { + if cp.serverCert == nil { + return nil, errors.New("server cert is not set") } - c.certPointerMutex.Lock() - c.currentCert = &cert - c.certPointerMutex.Unlock() + data := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: cp.serverCert.Raw, + }) + return data, nil } -func readCertFile(certFile string) ([]byte, error) { - return ioutil.ReadFile(certFile) +// TLSConfig returns the TLS configuration. +func (cp *certProvider) TLSConfig() (*tls.Config, error) { + keyPEMBlock, err := cp.ServerKey() + if err != nil { + return nil, fmt.Errorf("failed to get server key: %v", err) + } + + certPEMBlock, err := cp.ServerCert() + if err != nil { + return nil, fmt.Errorf("failed to get server certificate: %v", err) + } + + tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, fmt.Errorf("failed to generate TLS certificate: %v", err) + } + + cfg := &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + } + return cfg, nil } diff --git a/pkg/webhook/certs_test.go b/pkg/webhook/certs_test.go new file mode 100644 index 0000000000..d8f10ec193 --- /dev/null +++ b/pkg/webhook/certs_test.go @@ -0,0 +1,118 @@ +package webhook + +import "testing" + +// TestNewCertProvider tests the NewCertProvider function. +func TestNewCertProvider(t *testing.T) { + name := "test-name" + namespace := "test-namespace" + + cp, err := NewCertProvider(name, namespace) + if err != nil { + t.Errorf("failed to create CertProvider: %v", err) + } + + // Check if the returned CertProvider has non-nil fields. + if cp.caKey == nil { + t.Error("CA key is nil") + } + if cp.caCert == nil { + t.Error("CA certificate is nil") + } + if cp.serverKey == nil { + t.Error("server key is nil") + } + if cp.serverCert == nil { + t.Error("server certificate is nil") + } +} + +// TestCAKey tests the CAKey method of certProvider. +func TestCAKey(t *testing.T) { + cp, err := NewCertProvider("test-name", "test-namespace") + if err != nil { + t.Errorf("failed to create CertProvider: %v", err) + } + + key, err := cp.CAKey() + if err != nil { + t.Errorf("failed to get CA key: %v", err) + } + + // Check if the returned key is not nil. + if key == nil { + t.Error("CA key is nil") + } +} + +// TestCACert tests the CACert method of certProvider. +func TestCACert(t *testing.T) { + cp, err := NewCertProvider("test-name", "test-namespace") + if err != nil { + t.Errorf("failed to create CertProvider: %v", err) + } + + cert, err := cp.CACert() + if err != nil { + t.Errorf("failed to get CA certificate: %v", err) + } + + // Check if the returned certificate is not nil. + if cert == nil { + t.Error("CA certificate is nil") + } +} + +// TestServerKey tests the ServerKey method of certProvider. +func TestServerKey(t *testing.T) { + cp, err := NewCertProvider("test-name", "test-namespace") + if err != nil { + t.Errorf("failed to create CertProvider: %v", err) + } + + key, err := cp.ServerKey() + if err != nil { + t.Errorf("failed to get server key: %v", err) + } + + // Check if the returned key is not nil. + if key == nil { + t.Error("server key is nil") + } +} + +// TestServerCert tests the ServerCert method of certProvider. +func TestServerCert(t *testing.T) { + cp, err := NewCertProvider("test-name", "test-namespace") + if err != nil { + t.Errorf("failed to create CertProvider: %v", err) + } + + cert, err := cp.ServerCert() + if err != nil { + t.Errorf("failed to get server certificate: %v", err) + } + + // Check if the returned certificate is not nil. + if cert == nil { + t.Error("server certificate is nil") + } +} + +// TestTLSConfig tests the TLSConfig method of certProvider. +func TestTLSConfig(t *testing.T) { + cp, err := NewCertProvider("test-name", "test-namespace") + if err != nil { + t.Errorf("failed to create CertProvider: %v", err) + } + + cfg, err := cp.TLSConfig() + if err != nil { + t.Errorf("failed to get TLS configuration: %v", err) + } + + // Check if the returned configuration is not nil. + if cfg == nil { + t.Error("TLS configuration is nil") + } +} diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go index 661912f29d..a5b54e82c7 100644 --- a/pkg/webhook/webhook.go +++ b/pkg/webhook/webhook.go @@ -21,13 +21,12 @@ import ( "encoding/json" "flag" "fmt" - "io/ioutil" + "io" "net/http" "strings" "time" "github.com/golang/glog" - admissionv1 "k8s.io/api/admission/v1" arv1 "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" @@ -89,14 +88,12 @@ type WebHook struct { // Configuration parsed from command-line flags type webhookFlags struct { - serverCert string - serverCertKey string - caCert string - certReloadInterval time.Duration - webhookServiceNamespace string + webhookSecretName string + webhookSecretNamespace string webhookServiceName string - webhookPort int + webhookServiceNamespace string webhookConfigName string + webhookPort int webhookFailOnError bool webhookNamespaceSelector string } @@ -104,13 +101,11 @@ type webhookFlags struct { var userConfig webhookFlags func init() { - flag.StringVar(&userConfig.webhookConfigName, "webhook-config-name", "spark-webhook-config", "The name of the MutatingWebhookConfiguration object to create.") - flag.StringVar(&userConfig.serverCert, "webhook-server-cert", "/etc/webhook-certs/server-cert.pem", "Path to the X.509-formatted webhook certificate.") - flag.StringVar(&userConfig.serverCertKey, "webhook-server-cert-key", "/etc/webhook-certs/server-key.pem", "Path to the webhook certificate key.") - flag.StringVar(&userConfig.caCert, "webhook-ca-cert", "/etc/webhook-certs/ca-cert.pem", "Path to the X.509-formatted webhook CA certificate.") - flag.DurationVar(&userConfig.certReloadInterval, "webhook-cert-reload-interval", 15*time.Minute, "Time between webhook cert reloads.") - flag.StringVar(&userConfig.webhookServiceNamespace, "webhook-svc-namespace", "spark-operator", "The namespace of the Service for the webhook server.") + flag.StringVar(&userConfig.webhookSecretName, "webhook-secret-name", "spark-operator-tls", "The name of the secret that contains the webhook server's TLS certificate and key.") + flag.StringVar(&userConfig.webhookSecretNamespace, "webhook-secret-namespace", "spark-operator", "The namespace of the secret that contains the webhook server's TLS certificate and key.") flag.StringVar(&userConfig.webhookServiceName, "webhook-svc-name", "spark-webhook", "The name of the Service for the webhook server.") + flag.StringVar(&userConfig.webhookServiceNamespace, "webhook-svc-namespace", "spark-operator", "The namespace of the Service for the webhook server.") + flag.StringVar(&userConfig.webhookConfigName, "webhook-config-name", "spark-webhook-config", "The name of the MutatingWebhookConfiguration object to create.") flag.IntVar(&userConfig.webhookPort, "webhook-port", 8080, "Service port of the webhook server.") flag.BoolVar(&userConfig.webhookFailOnError, "webhook-fail-on-error", false, "Whether Kubernetes should reject requests when the webhook fails.") flag.StringVar(&userConfig.webhookNamespaceSelector, "webhook-namespace-selector", "", "The webhook will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Required if webhook-fail-on-error is true.") @@ -126,14 +121,12 @@ func New( coreV1InformerFactory informers.SharedInformerFactory, webhookTimeout *int) (*WebHook, error) { - cert, err := NewCertProvider( - userConfig.serverCert, - userConfig.serverCertKey, - userConfig.caCert, - userConfig.certReloadInterval, + certProvider, err := NewCertProvider( + userConfig.webhookServiceName, + userConfig.webhookServiceNamespace, ) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create certificate provider: %v", err) } path := "/webhook" @@ -142,11 +135,12 @@ func New( Name: userConfig.webhookServiceName, Path: &path, } + hook := &WebHook{ clientset: clientset, informerFactory: informerFactory, lister: informerFactory.Sparkoperator().V1beta2().SparkApplications().Lister(), - certProvider: cert, + certProvider: certProvider, serviceRef: serviceRef, sparkJobNamespace: jobNamespace, deregisterOnExit: deregisterOnExit, @@ -205,8 +199,13 @@ func parseNamespaceSelector(selectorArg string) (*metav1.LabelSelector, error) { // Start starts the admission webhook server and registers itself to the API server. func (wh *WebHook) Start(stopCh <-chan struct{}) error { - wh.certProvider.Start() - wh.server.TLSConfig = wh.certProvider.tlsConfig() + wh.updateSecret(userConfig.webhookSecretName, userConfig.webhookSecretNamespace) + + tlsCfg, err := wh.certProvider.TLSConfig() + if err != nil { + return fmt.Errorf("failed to get TLS config: %v", err) + } + wh.server.TLSConfig = tlsCfg if wh.enableResourceQuotaEnforcement { err := wh.resourceQuotaEnforcer.WaitForCacheSync(stopCh) @@ -235,8 +234,6 @@ func (wh *WebHook) Stop() error { } glog.Infof("Webhook %s deregistered", userConfig.webhookConfigName) } - - wh.certProvider.Stop() ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() glog.Info("Stopping the Spark pod admission webhook server") @@ -247,7 +244,7 @@ func (wh *WebHook) serve(w http.ResponseWriter, r *http.Request) { glog.V(2).Info("Serving admission request") var body []byte if r.Body != nil { - data, err := ioutil.ReadAll(r.Body) + data, err := io.ReadAll(r.Body) if err != nil { internalError(w, fmt.Errorf("failed to read the request body")) return @@ -319,6 +316,57 @@ func (wh *WebHook) serve(w http.ResponseWriter, r *http.Request) { } } +func (wh *WebHook) updateSecret(name, namespace string) error { + secret, err := wh.clientset.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get webhook secret: %v", err) + } + + caKey, err := wh.certProvider.CAKey() + if err != nil { + return fmt.Errorf("failed to get CA key: %v", err) + } + + caCert, err := wh.certProvider.CACert() + if err != nil { + return fmt.Errorf("failed to get CA cert: %v", err) + } + + serverKey, err := wh.certProvider.ServerKey() + if err != nil { + return fmt.Errorf("failed to get server key: %v", err) + } + + serverCert, err := wh.certProvider.ServerCert() + if err != nil { + return fmt.Errorf("failed to get server cert: %v", err) + } + + newSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{ + "ca-key.pem": caKey, + "ca-cert.pem": caCert, + "server-key.pem": serverKey, + "server-cert.pem": serverCert, + }, + } + + if !equality.Semantic.DeepEqual(newSecret, secret) { + secret.Data = newSecret.Data + _, err := wh.clientset.CoreV1().Secrets(namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update webhook secret: %v", err) + } + } + + glog.Infof("Updated webhook secret %s/%s", namespace, name) + return nil +} + func unexpectedResourceType(w http.ResponseWriter, kind string) { denyRequest(w, fmt.Sprintf("unexpected resource type: %v", kind), http.StatusUnsupportedMediaType) } @@ -352,14 +400,14 @@ func denyRequest(w http.ResponseWriter, reason string, code int) { } func (wh *WebHook) selfRegistration(webhookConfigName string) error { - mwcClient := wh.clientset.AdmissionregistrationV1().MutatingWebhookConfigurations() - vwcClient := wh.clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations() - - caCert, err := readCertFile(wh.certProvider.caCertFile) + caBundle, err := wh.certProvider.CACert() if err != nil { - return err + return fmt.Errorf("failed to get CA certificate: %v", err) } + mwcClient := wh.clientset.AdmissionregistrationV1().MutatingWebhookConfigurations() + vwcClient := wh.clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations() + mutatingRules := []arv1.RuleWithOperations{ { Operations: []arv1.OperationType{arv1.Create}, @@ -389,7 +437,7 @@ func (wh *WebHook) selfRegistration(webhookConfigName string) error { Rules: mutatingRules, ClientConfig: arv1.WebhookClientConfig{ Service: wh.serviceRef, - CABundle: caCert, + CABundle: caBundle, }, FailurePolicy: &wh.failurePolicy, NamespaceSelector: wh.selector, @@ -403,7 +451,7 @@ func (wh *WebHook) selfRegistration(webhookConfigName string) error { Rules: validatingRules, ClientConfig: arv1.WebhookClientConfig{ Service: wh.serviceRef, - CABundle: caCert, + CABundle: caBundle, }, FailurePolicy: &wh.failurePolicy, NamespaceSelector: wh.selector, From 8f7e3251e7db10de241aa835183d85f106b15b5c Mon Sep 17 00:00:00 2001 From: Cian Gallagher Date: Sat, 15 Jun 2024 18:58:33 +0200 Subject: [PATCH 67/87] Update minikube version in CI (#2059) Signed-off-by: Cian Gallagher --- .github/workflows/main.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 8212230131..40af1cc3b0 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -127,10 +127,10 @@ jobs: run: make detect-crds-drift - name: setup minikube - uses: manusa/actions-setup-minikube@v2.10.0 + uses: manusa/actions-setup-minikube@v2.11.0 with: - minikube version: v1.32.0 - kubernetes version: v1.28.8 + minikube version: v1.33.0 + kubernetes version: v1.30.0 start args: --memory 6g --cpus=2 --addons ingress github token: ${{ inputs.github-token }} @@ -154,10 +154,10 @@ jobs: go-version-file: "go.mod" - name: setup minikube - uses: manusa/actions-setup-minikube@v2.10.0 + uses: manusa/actions-setup-minikube@v2.11.0 with: - minikube version: v1.32.0 - kubernetes version: v1.28.8 + minikube version: v1.33.0 + kubernetes version: v1.30.0 start args: --memory 6g --cpus=2 --addons ingress github token: ${{ inputs.github-token }} From 0b67bae6bbfd1fe74910a9dc268629f82a90da59 Mon Sep 17 00:00:00 2001 From: Praveen Gajulapalli <13733716+pkgajulapalli@users.noreply.github.com> Date: Sat, 15 Jun 2024 23:12:34 +0530 Subject: [PATCH 68/87] Adding an option to set the priority class for spark-operator pod (#2043) * feat: give an option to set the priority class for spark-operator pod Signed-off-by: Praveen Gajulapalli <13733716+pkgajulapalli@users.noreply.github.com> * feat: bumped up helm chart version Signed-off-by: Praveen Gajulapalli <13733716+pkgajulapalli@users.noreply.github.com> * fix: fixed issue with position of priorityClassName Signed-off-by: Praveen Gajulapalli <13733716+pkgajulapalli@users.noreply.github.com> --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 1 + charts/spark-operator-chart/templates/deployment.yaml | 3 +++ charts/spark-operator-chart/values.yaml | 3 +++ 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 9107161ade..7dcec3a527 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.4.0 +version: 1.4.1 appVersion: v1beta2-1.6.0-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index b84e963604..50af340de1 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -110,6 +110,7 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum | podMonitor.labels | object | `{}` | Pod monitor labels | | podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | | podSecurityContext | object | `{}` | Pod security context | +| priorityClassName | string | `""` | Priority class to be used for running spark-operator pod. This helps in managing the pods during preemption. | | rbac.annotations | object | `{}` | Optional annotations for rbac | | rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | | rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml index 485620fee6..797ea998e7 100644 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ b/charts/spark-operator-chart/templates/deployment.yaml @@ -122,6 +122,9 @@ spec: volumes: {{- toYaml . | nindent 8 }} {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index 1dd9a5b15d..df08008dfc 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -181,3 +181,6 @@ istio: # labelSelectorFilter -- A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. labelSelectorFilter: "" + +# priorityClassName -- A priority class to be used for running spark-operator pod. +priorityClassName: "" From 4774feced4540a6b51cf09fd2f2079052e117ab3 Mon Sep 17 00:00:00 2001 From: Cian Gallagher Date: Mon, 17 Jun 2024 18:32:57 +0200 Subject: [PATCH 69/87] Support objectSelector on mutating webhook (#2058) * feat: add support for setting objectSelector on webhook Signed-off-by: Cian Gallagher * feat: update objectSelector to match expressions Signed-off-by: Cian Gallagher * chore: use out of the box label parser Signed-off-by: Cian Gallagher * chore: update chart version Signed-off-by: Cian Gallagher * chore: update app version Signed-off-by: Cian Gallagher * fix: use parseSelector Signed-off-by: Cian Gallagher * ci: update minikube action to latest release Signed-off-by: Cian Gallagher * revert: undo ci changes. create seperate pr Signed-off-by: Cian Gallagher * Trigger CI Signed-off-by: Cian Gallagher * chore: update chart version & docs following previous merge Signed-off-by: Cian Gallagher * docs: update docs Signed-off-by: Cian Gallagher --------- Signed-off-by: Cian Gallagher --- charts/spark-operator-chart/Chart.yaml | 4 +- charts/spark-operator-chart/README.md | 5 +- .../templates/deployment.yaml | 1 + charts/spark-operator-chart/values.yaml | 3 + pkg/webhook/webhook.go | 26 ++++-- pkg/webhook/webhook_test.go | 92 ++++++++++++++++++- 6 files changed, 119 insertions(+), 12 deletions(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 7dcec3a527..417418170a 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.4.1 -appVersion: v1beta2-1.6.0-3.5.0 +version: 1.4.2 +appVersion: v1beta2-1.6.1-3.5.0 keywords: - spark home: https://github.com/kubeflow/spark-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 50af340de1..8dfe591d01 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.4.0](https://img.shields.io/badge/Version-1.4.0-informational?style=flat-square) ![AppVersion: v1beta2-1.6.0-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.0--3.5.0-informational?style=flat-square) +![Version: 1.4.2](https://img.shields.io/badge/Version-1.4.2-informational?style=flat-square) ![AppVersion: v1beta2-1.6.1-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.1--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator @@ -110,7 +110,7 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum | podMonitor.labels | object | `{}` | Pod monitor labels | | podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | | podSecurityContext | object | `{}` | Pod security context | -| priorityClassName | string | `""` | Priority class to be used for running spark-operator pod. This helps in managing the pods during preemption. | +| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. | | rbac.annotations | object | `{}` | Optional annotations for rbac | | rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | | rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | @@ -134,6 +134,7 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum | volumes | list | `[]` | | | webhook.enable | bool | `false` | Enable webhook server | | webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | +| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects | | webhook.port | int | `8080` | Webhook service port | | webhook.portName | string | `"webhook"` | Webhook container port name and service target port name | | webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml index 797ea998e7..cf12fb2e89 100644 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ b/charts/spark-operator-chart/templates/deployment.yaml @@ -100,6 +100,7 @@ spec: - -webhook-port={{ .Values.webhook.port }} - -webhook-timeout={{ .Values.webhook.timeout }} - -webhook-namespace-selector={{ .Values.webhook.namespaceSelector }} + - -webhook-object-selector={{ .Values.webhook.objectSelector }} {{- end }} - -enable-resource-quota-enforcement={{ .Values.resourceQuotaEnforcement.enable }} {{- if gt (int .Values.replicaCount) 1 }} diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index df08008dfc..d9f63b6454 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -103,6 +103,9 @@ webhook: # -- The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. # Empty string (default) will operate on all namespaces namespaceSelector: "" + # -- The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). + # Empty string (default) will operate on all objects + objectSelector: "" # -- The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade timeout: 30 diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go index a5b54e82c7..2984e4641a 100644 --- a/pkg/webhook/webhook.go +++ b/pkg/webhook/webhook.go @@ -78,6 +78,7 @@ type WebHook struct { serviceRef *arv1.ServiceReference failurePolicy arv1.FailurePolicyType selector *metav1.LabelSelector + objectSelector *metav1.LabelSelector sparkJobNamespace string deregisterOnExit bool enableResourceQuotaEnforcement bool @@ -96,6 +97,7 @@ type webhookFlags struct { webhookPort int webhookFailOnError bool webhookNamespaceSelector string + webhookObjectSelector string } var userConfig webhookFlags @@ -109,6 +111,7 @@ func init() { flag.IntVar(&userConfig.webhookPort, "webhook-port", 8080, "Service port of the webhook server.") flag.BoolVar(&userConfig.webhookFailOnError, "webhook-fail-on-error", false, "Whether Kubernetes should reject requests when the webhook fails.") flag.StringVar(&userConfig.webhookNamespaceSelector, "webhook-namespace-selector", "", "The webhook will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Required if webhook-fail-on-error is true.") + flag.StringVar(&userConfig.webhookObjectSelector, "webhook-object-selector", "", "The webhook will only operate on pods with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2).") } // New creates a new WebHook instance. @@ -119,8 +122,8 @@ func New( deregisterOnExit bool, enableResourceQuotaEnforcement bool, coreV1InformerFactory informers.SharedInformerFactory, - webhookTimeout *int) (*WebHook, error) { - + webhookTimeout *int, +) (*WebHook, error) { certProvider, err := NewCertProvider( userConfig.webhookServiceName, userConfig.webhookServiceNamespace, @@ -159,13 +162,21 @@ func New( return nil, fmt.Errorf("webhook-namespace-selector must be set when webhook-fail-on-error is true") } } else { - selector, err := parseNamespaceSelector(userConfig.webhookNamespaceSelector) + selector, err := parseSelector(userConfig.webhookNamespaceSelector) if err != nil { return nil, err } hook.selector = selector } + if userConfig.webhookObjectSelector != "" { + selector, err := metav1.ParseToLabelSelector(userConfig.webhookObjectSelector) + if err != nil { + return nil, err + } + hook.objectSelector = selector + } + if enableResourceQuotaEnforcement { hook.resourceQuotaEnforcer = resourceusage.NewResourceQuotaEnforcer(informerFactory, coreV1InformerFactory) } @@ -180,7 +191,7 @@ func New( return hook, nil } -func parseNamespaceSelector(selectorArg string) (*metav1.LabelSelector, error) { +func parseSelector(selectorArg string) (*metav1.LabelSelector, error) { selector := &metav1.LabelSelector{ MatchLabels: make(map[string]string), } @@ -189,7 +200,7 @@ func parseNamespaceSelector(selectorArg string) (*metav1.LabelSelector, error) { for _, selectorStr := range selectorStrs { kv := strings.SplitN(selectorStr, "=", 2) if len(kv) != 2 || kv[0] == "" || kv[1] == "" { - return nil, fmt.Errorf("webhook namespace selector must be in the form key1=value1,key2=value2") + return nil, fmt.Errorf("webhook selector must be in the form key1=value1,key2=value2") } selector.MatchLabels[kv[0]] = kv[1] } @@ -441,6 +452,7 @@ func (wh *WebHook) selfRegistration(webhookConfigName string) error { }, FailurePolicy: &wh.failurePolicy, NamespaceSelector: wh.selector, + ObjectSelector: wh.objectSelector, TimeoutSeconds: wh.timeoutSeconds, SideEffects: &sideEffect, AdmissionReviewVersions: []string{"v1"}, @@ -455,6 +467,7 @@ func (wh *WebHook) selfRegistration(webhookConfigName string) error { }, FailurePolicy: &wh.failurePolicy, NamespaceSelector: wh.selector, + ObjectSelector: wh.objectSelector, TimeoutSeconds: wh.timeoutSeconds, SideEffects: &sideEffect, AdmissionReviewVersions: []string{"v1"}, @@ -587,7 +600,8 @@ func admitScheduledSparkApplications(review *admissionv1.AdmissionReview, enforc func mutatePods( review *admissionv1.AdmissionReview, lister crdlisters.SparkApplicationLister, - sparkJobNs string) (*admissionv1.AdmissionResponse, error) { + sparkJobNs string, +) (*admissionv1.AdmissionResponse, error) { raw := review.Request.Object.Raw pod := &corev1.Pod{} if err := json.Unmarshal(raw, pod); err != nil { diff --git a/pkg/webhook/webhook_test.go b/pkg/webhook/webhook_test.go index 63e7b46f17..6f2e2f088a 100644 --- a/pkg/webhook/webhook_test.go +++ b/pkg/webhook/webhook_test.go @@ -23,12 +23,15 @@ import ( "time" "github.com/stretchr/testify/assert" - admissionv1 "k8s.io/api/admission/v1" + arv1 "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + gotest "k8s.io/client-go/testing" spov1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" crdclientfake "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/fake" @@ -183,8 +186,93 @@ func serializePod(pod *corev1.Pod) ([]byte, error) { return json.Marshal(pod) } +func TestSelfRegistrationWithObjectSelector(t *testing.T) { + clientset := fake.NewSimpleClientset() + informerFactory := crdinformers.NewSharedInformerFactory(nil, 0) + coreV1InformerFactory := informers.NewSharedInformerFactory(nil, 0) + + // Setup userConfig with object selector + userConfig.webhookObjectSelector = "spark-role in (driver,executor)" + webhookTimeout := 30 + + // Create webhook instance + webhook, err := New(clientset, informerFactory, "default", false, false, coreV1InformerFactory, &webhookTimeout) + assert.NoError(t, err) + + // Mock the clientset's Create function to capture the MutatingWebhookConfiguration object + var createdWebhookConfig *arv1.MutatingWebhookConfiguration + clientset.PrependReactor("create", "mutatingwebhookconfigurations", func(action gotest.Action) (handled bool, ret runtime.Object, err error) { + createAction := action.(gotest.CreateAction) + createdWebhookConfig = createAction.GetObject().(*arv1.MutatingWebhookConfiguration) + return true, createdWebhookConfig, nil + }) + + // Call the selfRegistration method + err = webhook.selfRegistration("test-webhook-config") + assert.NoError(t, err) + + // Verify the MutatingWebhookConfiguration was created with the expected object selector + assert.NotNil(t, createdWebhookConfig, "MutatingWebhookConfiguration should have been created") + + expectedSelector := &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "spark-role", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"driver", "executor"}, + }, + }, + } + actualSelector := createdWebhookConfig.Webhooks[0].ObjectSelector + + assert.True(t, labelSelectorsEqual(expectedSelector, actualSelector), "ObjectSelectors should be equal") +} + +func labelSelectorsEqual(expected, actual *metav1.LabelSelector) bool { + if expected == nil || actual == nil { + return expected == nil && actual == nil + } + + if len(expected.MatchLabels) != len(actual.MatchLabels) { + return false + } + + for k, v := range expected.MatchLabels { + if actual.MatchLabels[k] != v { + return false + } + } + + if len(expected.MatchExpressions) != len(actual.MatchExpressions) { + return false + } + + for i, expr := range expected.MatchExpressions { + if expr.Key != actual.MatchExpressions[i].Key || + expr.Operator != actual.MatchExpressions[i].Operator || + !equalStringSlices(expr.Values, actual.MatchExpressions[i].Values) { + return false + } + } + + return true +} + +func equalStringSlices(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + func testSelector(input string, expected *metav1.LabelSelector, t *testing.T) { - selector, err := parseNamespaceSelector(input) + selector, err := parseSelector(input) + if expected == nil { if err == nil { t.Errorf("Expected error parsing '%s', but got %v", input, selector) From d37ac384d4bf98fd20ea51532510f175bd411323 Mon Sep 17 00:00:00 2001 From: Vikas Saxena <90456542+vikas-saxena02@users.noreply.github.com> Date: Wed, 19 Jun 2024 03:09:58 +1000 Subject: [PATCH 70/87] Modified README.MD as per changes discussed on https://github.com/kubeflow/spark-operator/pull/2062 (#2066) Signed-off-by: Vikas Saxena --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ab81ee6857..d708bef880 100644 --- a/README.md +++ b/README.md @@ -91,5 +91,6 @@ Please check [CONTRIBUTING.md](CONTRIBUTING.md) and the [Developer Guide](docs/d ## Community -* Join our [Kubeflow Slack Channel](https://kubeflow.slack.com/archives/C06627U3XU3) -* Check out [who is using the Kubernetes Operator for Apache Spark](docs/who-is-using.md). +* Join the [CNCF Slack Channel](https://www.kubeflow.org/docs/about/community/#kubeflow-slack-channels) and then join ```#kubeflow-spark-operator``` Channel. +* Check out our blog post [Announcing the Kubeflow Spark Operator: Building a Stronger Spark on Kubernetes Community](https://blog.kubeflow.org/operators/2024/04/15/kubeflow-spark-operator.html) +* Check out [who is using the Kubernetes Operator for Apache Spark](docs/who-is-using.md). \ No newline at end of file From 012b52ae819c0a9d46a2f64fca2884390ad5680e Mon Sep 17 00:00:00 2001 From: Jacob Salway Date: Sat, 22 Jun 2024 10:21:07 +1000 Subject: [PATCH 71/87] Remove .gitlab-ci.yml (#2069) Signed-off-by: Jacob Salway --- .gitlab-ci.yml | 30 ------------------------------ 1 file changed, 30 deletions(-) delete mode 100644 .gitlab-ci.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index b007a3d2a2..0000000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,30 +0,0 @@ -stages: -- build - -variables: - DEP_VERSION: "0.5.3" - -build: - stage: build - image: docker:stable - services: - - docker:dind - before_script: - - apk --no-cache add git - variables: - DOCKER_HOST: tcp://docker:2375 - script: - - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY ; - - > - if [ "${SPARK_REGISTRY}" != "" -a "${SPARK_VERSION}" != "" ] ; then - tagStamp=$(git describe --tags --dirty)_${SPARK_VERSION} - echo Using SPARK_IMAGE ${SPARK_REGISTRY}:${SPARK_VERSION} - echo CI_REGISTRY_IMAGE_TAG is ${CI_REGISTRY_IMAGE}/spark-operator:${tagStamp} - docker build --build-arg SPARK_IMAGE=${SPARK_REGISTRY}:${SPARK_VERSION} -t ${CI_REGISTRY_IMAGE}/spark-operator:${tagStamp} . - else - tagStamp=$(git describe --tags --dirty) ; echo tagStamp is ${tagStamp} ; - echo CI_REGISTRY_IMAGE_TAG is ${CI_REGISTRY_IMAGE}/spark-operator:${tagStamp} - docker build -t ${CI_REGISTRY_IMAGE}/spark-operator:${tagStamp} . - fi - - time docker push ${CI_REGISTRY_IMAGE}/spark-operator:${tagStamp} - - docker images From 4bb4b5c8659a8d6201b3456830a2f3dc08ec340d Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Thu, 27 Jun 2024 02:38:10 +0800 Subject: [PATCH 72/87] Add code of conduct and update contributor guide (#2074) * Add CNCF Code of Conduct Signed-off-by: Yi Chen * Update contributing guide Signed-off-by: Yi Chen * Redirect links to kubeflow website Signed-off-by: Yi Chen --------- Signed-off-by: Yi Chen --- CODE_OF_CONDUCT.md | 3 +++ CONTRIBUTING.md | 16 ++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-) create mode 100644 CODE_OF_CONDUCT.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..858136bea9 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# Code of Conduct + +For the code of conduct, please refer to the [Kubeflow Community Code of Conduct](https://www.kubeflow.org/docs/about/contributing/#follow-the-code-of-conduct). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c725819882..fc80d6df34 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,11 +1,11 @@ -# How to Contribute +# Contributing to Kubeflow Spark Operator -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. +Welcome to the Kubeflow Spark Operator project. We'd love to accept your patches and contributions to this project. For detailed information about how to contribute to Kubeflow, please refer to [Contributing to Kubeflow](https://www.kubeflow.org/docs/about/contributing/). -## Code reviews +## Developer Guide -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult -[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more -information on using pull requests. +For how to develope with spark operator, please refer to [Developer Guide](https://www.kubeflow.org/docs/components/spark-operator/developer-guide/). + +## Code Reviews + +All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests. From 16cd35a0a220da0304383517c73a5fdb36f4213c Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Thu, 27 Jun 2024 19:05:11 +0800 Subject: [PATCH 73/87] Update README and documentation (#2047) * Update docs Signed-off-by: Yi Chen * Remove docs and update README Signed-off-by: Yi Chen * Add link to monthly community meeting Signed-off-by: Yi Chen --------- Signed-off-by: Yi Chen --- docs/who-is-using.md => ADOPTERS.md | 70 +-- README.md | 81 ++- docs/_config.yml | 1 - docs/architecture-diagram.png | Bin 52534 -> 0 bytes docs/design.md | 61 -- docs/developer-guide.md | 176 ------ docs/gcp.md | 79 --- docs/quick-start-guide.md | 351 ------------ docs/user-guide.md | 848 ---------------------------- docs/volcano-integration.md | 94 --- 10 files changed, 71 insertions(+), 1690 deletions(-) rename docs/who-is-using.md => ADOPTERS.md (91%) delete mode 100644 docs/_config.yml delete mode 100644 docs/architecture-diagram.png delete mode 100644 docs/design.md delete mode 100644 docs/developer-guide.md delete mode 100644 docs/gcp.md delete mode 100644 docs/quick-start-guide.md delete mode 100644 docs/user-guide.md delete mode 100644 docs/volcano-integration.md diff --git a/docs/who-is-using.md b/ADOPTERS.md similarity index 91% rename from docs/who-is-using.md rename to ADOPTERS.md index 04ed6cf755..bf7df2a030 100644 --- a/docs/who-is-using.md +++ b/ADOPTERS.md @@ -1,48 +1,50 @@ -## Who Is Using the Kubernetes Operator for Apache Spark? +# Adopters of Kubeflow Spark Operator + +Below are the adopters of project Spark Operator. If you are using Spark Operator please add yourself into the following list by a pull request. Please keep the list in alphabetical order. | Organization | Contact (GitHub User Name) | Environment | Description of Use | | ------------- | ------------- | ------------- | ------------- | -| [Caicloud](https://intl.caicloud.io/) | @gaocegege | Production | Cloud-Native AI Platform | -| Microsoft (MileIQ) | @dharmeshkakadia | Production | AI & Analytics | -| Lightbend | @yuchaoran2011 | Production | Data Infrastructure & Operations | -| StackTome | @emiliauskas-fuzzy | Production | Data pipelines | -| Salesforce | @khogeland | Production | Data transformation | +| [Beeline](https://beeline.ru) | @spestua | Evaluation | ML & Data Infrastructure | | Bringg | @EladDolev | Production | ML & Analytics Data Platform | -| [Siigo](https://www.siigo.com) | @Juandavi1 | Production | Data Migrations & Analytics Data Platform | +| [Caicloud](https://intl.caicloud.io/) | @gaocegege | Production | Cloud-Native AI Platform | +| Carrefour | @AliGouta | Production | Data Platform | | CERN|@mrow4a| Evaluation | Data Mining & Analytics | -| Lyft |@kumare3| Evaluation | ML & Data Infrastructure | -| MapR Technologies |@sarjeet2013| Evaluation | ML/AI & Analytics Data Platform | -| Uber| @chenqin| Evaluation| Spark / ML | -| HashmapInc| @prem0132 | Evaluation | Analytics Data Platform | -| Tencent | @runzhliu | Evaluation | ML Analytics Platform | -| Exacaster | @minutis | Evaluation | Data pipelines | -| Riskified | @henbh | Evaluation | Analytics Data Platform | +| [CloudPhysics](https://www.cloudphysics.com) | @jkleckner | Production | ML/AI & Analytics | | CloudZone | @iftachsc | Evaluation | Big Data Analytics Consultancy | | Cyren | @avnerl | Evaluation | Data pipelines | -| Shell (Agile Hub) | @TomLous | Production | Data pipelines | -| Nielsen Identity Engine | @roitvt | Evaluation | Data pipelines | +| [C2FO](https://www.c2fo.com/) | @vanhoale | Production | Data Platform / Data Infrastructure | | [Data Mechanics](https://www.datamechanics.co) | @jrj-d | Production | Managed Spark Platform | -| [PUBG](https://careers.pubg.com/#/en/) | @jacobhjkim | Production | ML & Data Infrastructure | -| [Beeline](https://beeline.ru) | @spestua | Evaluation | ML & Data Infrastructure | -| [Stitch Fix](https://multithreaded.stitchfix.com/) | @nssalian | Evaluation | Data pipelines | -| [Typeform](https://typeform.com/) | @afranzi | Production | Data & ML pipelines | -| incrmntal(https://incrmntal.com/) | @scravy | Production | ML & Data Infrastructure | -| [CloudPhysics](https://www.cloudphysics.com) | @jkleckner | Production | ML/AI & Analytics | -| [MongoDB](https://www.mongodb.com) | @chickenpopcorn | Production | Data Infrastructure | -| [MavenCode](https://www.mavencode.com) | @charlesa101 | Production | MLOps & Data Infrastructure | -| [Gojek](https://www.gojek.io/) | @pradithya | Production | Machine Learning Platform | -| Fossil | @duyet | Production | Data Platform | -| Carrefour | @AliGouta | Production | Data Platform | -| Scaling Smart | @tarek-izemrane | Evaluation | Data Platform | -| [Tongdun](https://www.tongdun.net/) | @lomoJG | Production | AI/ML & Analytics | -| [Totvs Labs](https://www.totvslabs.com) | @luizm | Production | Data Platform | -| [DiDi](https://www.didiglobal.com) | @Run-Lin | Evaluation | Data Infrastructure | | [DeepCure](https://www.deepcure.ai) | @mschroering | Production | Spark / ML | -| [C2FO](https://www.c2fo.com/) | @vanhoale | Production | Data Platform / Data Infrastructure | -| [Timo](https://timo.vn) | @vanducng | Production | Data Platform | +| [DiDi](https://www.didiglobal.com) | @Run-Lin | Evaluation | Data Infrastructure | +| Exacaster | @minutis | Evaluation | Data pipelines | +| Fossil | @duyet | Production | Data Platform | +| [Gojek](https://www.gojek.io/) | @pradithya | Production | Machine Learning Platform | +| HashmapInc| @prem0132 | Evaluation | Analytics Data Platform | +| [incrmntal](https://incrmntal.com/) | @scravy | Production | ML & Data Infrastructure | +| [Inter&Co](https://inter.co/) | @ignitz | Production | Data pipelines | | [Kognita](https://kognita.com.br/) | @andreclaudino | Production | MLOps, Data Platform / Data Infrastructure, ML/AI | +| Lightbend | @yuchaoran2011 | Production | Data Infrastructure & Operations | +| Lyft |@kumare3| Evaluation | ML & Data Infrastructure | +| MapR Technologies |@sarjeet2013| Evaluation | ML/AI & Analytics Data Platform | +| [MavenCode](https://www.mavencode.com) | @charlesa101 | Production | MLOps & Data Infrastructure | +| Microsoft (MileIQ) | @dharmeshkakadia | Production | AI & Analytics | | [Molex](https://www.molex.com/) | @AshishPushpSingh | Evaluation/Production | Data Platform | +| [MongoDB](https://www.mongodb.com) | @chickenpopcorn | Production | Data Infrastructure | +| Nielsen Identity Engine | @roitvt | Evaluation | Data pipelines | +| [PUBG](https://careers.pubg.com/#/en/) | @jacobhjkim | Production | ML & Data Infrastructure | | [Qualytics](https://www.qualytics.co/) | @josecsotomorales | Production | Data Quality Platform | +| Riskified | @henbh | Evaluation | Analytics Data Platform | | [Roblox](https://www.roblox.com/) | @matschaffer-roblox | Evaluation | Data Infrastructure | | [Rokt](https://www.rokt.com) | @jacobsalway | Production | Data Infrastructure | -| [Inter&Co](https://inter.co/) | @ignitz | Production | Data pipelines | +| Salesforce | @khogeland | Production | Data transformation | +| Scaling Smart | @tarek-izemrane | Evaluation | Data Platform | +| Shell (Agile Hub) | @TomLous | Production | Data pipelines | +| [Siigo](https://www.siigo.com) | @Juandavi1 | Production | Data Migrations & Analytics Data Platform | +| StackTome | @emiliauskas-fuzzy | Production | Data pipelines | +| [Stitch Fix](https://multithreaded.stitchfix.com/) | @nssalian | Evaluation | Data pipelines | +| Tencent | @runzhliu | Evaluation | ML Analytics Platform | +| [Timo](https://timo.vn) | @vanducng | Production | Data Platform | +| [Tongdun](https://www.tongdun.net/) | @lomoJG | Production | AI/ML & Analytics | +| [Totvs Labs](https://www.totvslabs.com) | @luizm | Production | Data Platform | +| [Typeform](https://typeform.com/) | @afranzi | Production | Data & ML pipelines | +| Uber| @chenqin| Evaluation| Spark / ML | diff --git a/README.md b/README.md index d708bef880..48d92caa29 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,15 @@ # Kubeflow Spark Operator + [![Go Report Card](https://goreportcard.com/badge/github.com/kubeflow/spark-operator)](https://goreportcard.com/report/github.com/kubeflow/spark-operator) -## Overview +## What is Spark Operator? + The Kubernetes Operator for Apache Spark aims to make specifying and running [Spark](https://github.com/apache/spark) applications as easy and idiomatic as running other workloads on Kubernetes. It uses -[Kubernetes custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) -for specifying, running, and surfacing status of Spark applications. For a complete reference of the custom resource definitions, please refer to the [API Definition](docs/api-docs.md). For details on its design, please refer to the [design doc](docs/design.md). It requires Spark 2.3 and above that supports Kubernetes as a native scheduler backend. +[Kubernetes custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) for specifying, running, and surfacing status of Spark applications. + +## Overview + +For a complete reference of the custom resource definitions, please refer to the [API Definition](docs/api-docs.md). For details on its design, please refer to the [Architecture](https://www.kubeflow.org/docs/components/spark-operator/overview/#architecture). It requires Spark 2.3 and above that supports Kubernetes as a native scheduler backend. The Kubernetes Operator for Apache Spark currently supports the following list of features: @@ -28,69 +33,53 @@ The Kubernetes Operator for Apache Spark currently supports the following list o **If you are currently using the `v1beta1` version of the APIs in your manifests, please update them to use the `v1beta2` version by changing `apiVersion: "sparkoperator.k8s.io/"` to `apiVersion: "sparkoperator.k8s.io/v1beta2"`. You will also need to delete the `previous` version of the CustomResourceDefinitions named `sparkapplications.sparkoperator.k8s.io` and `scheduledsparkapplications.sparkoperator.k8s.io`, and replace them with the `v1beta2` version either by installing the latest version of the operator or by running `kubectl create -f manifest/crds`.** -Customization of Spark pods, e.g., mounting arbitrary volumes and setting pod affinity, is implemented using a Kubernetes [Mutating Admission Webhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), which became beta in Kubernetes 1.9. The mutating admission webhook is disabled by default if you install the operator using the Helm [chart](charts/spark-operator-chart). Check out the [Quick Start Guide](docs/quick-start-guide.md#using-the-mutating-admission-webhook) on how to enable the webhook. - ## Prerequisites * Version >= 1.13 of Kubernetes to use the [`subresource` support for CustomResourceDefinitions](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#subresources), which became beta in 1.13 and is enabled by default in 1.13 and higher. * Version >= 1.16 of Kubernetes to use the `MutatingWebhook` and `ValidatingWebhook` of `apiVersion: admissionregistration.k8s.io/v1`. -## Installation +## Getting Started -The easiest way to install the Kubernetes Operator for Apache Spark is to use the Helm [chart](charts/spark-operator-chart/). +For getting started with Spark operator, please refer to [Getting Started](https://www.kubeflow.org/docs/components/spark-operator/getting-started/). -```bash -$ helm repo add spark-operator https://kubeflow.github.io/spark-operator +## User Guide -$ helm install my-release spark-operator/spark-operator --namespace spark-operator --create-namespace -``` +For detailed user guide and API documentation, please refer to [User Guide](https://www.kubeflow.org/docs/components/spark-operator/user-guide/) and [API Specification](docs/api-docs.md). -This will install the Kubernetes Operator for Apache Spark into the namespace `spark-operator`. The operator by default watches and handles `SparkApplication`s in every namespaces. If you would like to limit the operator to watch and handle `SparkApplication`s in a single namespace, e.g., `default` instead, add the following option to the `helm install` command: - -``` ---set "sparkJobNamespaces={default}" -``` - -For configuration options available in the Helm chart, please refer to the chart's [README](charts/spark-operator-chart/README.md). +If you are running Spark operator on Google Kubernetes Engine (GKE) and want to use Google Cloud Storage (GCS) and/or BigQuery for reading/writing data, also refer to the [GCP guide](https://www.kubeflow.org/docs/components/spark-operator/user-guide/gcp/). ## Version Matrix The following table lists the most recent few versions of the operator. -| Operator Version | API Version | Kubernetes Version | Base Spark Version | Operator Image Tag | -| ------------- | ------------- | ------------- | ------------- | ------------- | -| `latest` (master HEAD) | `v1beta2` | 1.13+ | `3.0.0` | `latest` | -| `v1beta2-1.3.3-3.1.1` | `v1beta2` | 1.16+ | `3.1.1` | `v1beta2-1.3.3-3.1.1` | -| `v1beta2-1.3.2-3.1.1` | `v1beta2` | 1.16+ | `3.1.1` | `v1beta2-1.3.2-3.1.1` | -| `v1beta2-1.3.0-3.1.1` | `v1beta2` | 1.16+ | `3.1.1` | `v1beta2-1.3.0-3.1.1` | -| `v1beta2-1.2.3-3.1.1` | `v1beta2` | 1.13+ | `3.1.1` | `v1beta2-1.2.3-3.1.1` | -| `v1beta2-1.2.0-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` | `v1beta2-1.2.0-3.0.0` | -| `v1beta2-1.1.2-2.4.5` | `v1beta2` | 1.13+ | `2.4.5` | `v1beta2-1.1.2-2.4.5` | -| `v1beta2-1.0.1-2.4.4` | `v1beta2` | 1.13+ | `2.4.4` | `v1beta2-1.0.1-2.4.4` | -| `v1beta2-1.0.0-2.4.4` | `v1beta2` | 1.13+ | `2.4.4` | `v1beta2-1.0.0-2.4.4` | -| `v1beta1-0.9.0` | `v1beta1` | 1.13+ | `2.4.0` | `v2.4.0-v1beta1-0.9.0` | - -When installing using the Helm chart, you can choose to use a specific image tag instead of the default one, using the following option: +| Operator Version | API Version | Kubernetes Version | Base Spark Version | +| ------------- | ------------- | ------------- | ------------- | +| `v1beta2-1.6.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` | +| `v1beta2-1.5.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` | +| `v1beta2-1.4.x-3.5.0` | `v1beta2` | 1.16+ | `3.5.0` | +| `v1beta2-1.3.x-3.1.1` | `v1beta2` | 1.16+ | `3.1.1` | +| `v1beta2-1.2.3-3.1.1` | `v1beta2` | 1.13+ | `3.1.1` | +| `v1beta2-1.2.2-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` | +| `v1beta2-1.2.1-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` | +| `v1beta2-1.2.0-3.0.0` | `v1beta2` | 1.13+ | `3.0.0` | +| `v1beta2-1.1.x-2.4.5` | `v1beta2` | 1.13+ | `2.4.5` | +| `v1beta2-1.0.x-2.4.4` | `v1beta2` | 1.13+ | `2.4.4` | -``` ---set image.tag= -``` +## Developer Guide -## Get Started +For developing with Spark Operator, please refer to [Developer Guide](https://www.kubeflow.org/docs/components/spark-operator/developer-guide/). -Get started quickly with the Kubernetes Operator for Apache Spark using the [Quick Start Guide](docs/quick-start-guide.md). +## Contributor Guide -If you are running the Kubernetes Operator for Apache Spark on Google Kubernetes Engine and want to use Google Cloud Storage (GCS) and/or BigQuery for reading/writing data, also refer to the [GCP guide](docs/gcp.md). +For contributing to Spark Operator, please refer to [Contributor Guide](CONTRIBUTING.md). -For more information, check the [Design](docs/design.md), [API Specification](docs/api-docs.md) and detailed [User Guide](docs/user-guide.md). - -## Contributing +## Community -Please check [CONTRIBUTING.md](CONTRIBUTING.md) and the [Developer Guide](docs/developer-guide.md) out. +* Join the [CNCF Slack Channel](https://www.kubeflow.org/docs/about/community/#kubeflow-slack-channels) and then join `#kubeflow-spark-operator` Channel. +* Check out our blog post [Announcing the Kubeflow Spark Operator: Building a Stronger Spark on Kubernetes Community](https://blog.kubeflow.org/operators/2024/04/15/kubeflow-spark-operator.html). +* Join our monthly community meeting [Kubeflow Spark Operator Meeting Notes](https://bit.ly/3VGzP4n). -## Community +## Adopters -* Join the [CNCF Slack Channel](https://www.kubeflow.org/docs/about/community/#kubeflow-slack-channels) and then join ```#kubeflow-spark-operator``` Channel. -* Check out our blog post [Announcing the Kubeflow Spark Operator: Building a Stronger Spark on Kubernetes Community](https://blog.kubeflow.org/operators/2024/04/15/kubeflow-spark-operator.html) -* Check out [who is using the Kubernetes Operator for Apache Spark](docs/who-is-using.md). \ No newline at end of file +Check out [adopters of Spark Operator](ADOPTERS.md). diff --git a/docs/_config.yml b/docs/_config.yml deleted file mode 100644 index 259a24e4d2..0000000000 --- a/docs/_config.yml +++ /dev/null @@ -1 +0,0 @@ -theme: jekyll-theme-tactile \ No newline at end of file diff --git a/docs/architecture-diagram.png b/docs/architecture-diagram.png deleted file mode 100644 index 3503ebb486adcd425188f1dbc9588e3dd10b24db..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 52534 zcmce;WmJ@J^fs!5C@s<{l2QstcXxM-)X*WFjG>m`kgy>u;gSV)$iOv{&wdM68U{3@GmtY zsw{WzytyMMDW>T;wbi7etErQDn-v_vVp!MJCPk=c9w$X8+)~qU-OoFm@>`MNca1@3 z5Vge8camXx#r2n%6hrh?Rs52V_=Cd4*olW2H23H(P|Mv?dr9U6WSY0i)dElkajju_ZB#d-MmuwADcg8WNI9ys1AhE*-)R!$a%ySJyIl!=))@rwhK zzYfRKuO)<&8B{;~yES1Ue@yfgdGpR)$JX<>TRD=mfV5lXjOyQ?J=+CR@(%9FWKC|m zUVa_~+5jqv4GtdIrEHiS&fs2IF6^Ko<*k&kF1Ywr)MrNRnVGCN!dR%DtmnU zF%Rb+M}I+WKDTg+MxWm>w=CCPE7+8~%iw`A>UkUw?cFYI!#D|;p9@%f-#fn* zF1vASkW$jS?uPo7fBnBR?sdL8^-Hkw+MsAxnM+qMKE#l_CQ40*FImm)rpbFjUT6yB zw^_cvn3(=2>6f+TtHppwPDQC;I-xYUj`NQ=|5s<9&#cLhr@uEhzNXo-2lfY^CF{9- zayD+aX>wz^t9kEz04bTL^j_alYZ1>ZUG+(UQAbt_L05VY{CI%-xbZf3vKNBzM#P1> z!(+=g$sSl@@pdQ=_Em1+hbjRtzEcsWee7R3zQPS#bh(`N5#%GNIi@1hiAk7uIG@!#iR{+wKD?c2o-L+VxTjg- zXH=nvt#@OovSmCRY-uGOew<$}^3H7}v;#v)`GoJUf@vcsohOPZ1dS=DHGBshUY`wc z^N;PU2QbaWYM65BAy@J>2;L}+8~MBoiP4EBW}PEz$}AqP30$oqyB9M~^g)!AFM#jG zQH*m_8mHp8!&fZ&U6UBSsz7+l&FlGW7qZKA;GZB!@j4ohOatHv1Gjtbj&1Wlx0^Bz zuIFkfdCaFo`1O5ekAKaznluhyRw@Zmtr>VL&HZ9Kr5ZvDX#RYvvwKXpX5et*rCJ(r zKh*QfX+u%}xH}FCt7#oaDbLgKpZ}v6X5{uc*d566xB1&0lAuSuGyx23Q#9}W4HNf< z%R;Gpo^MSw1>$X99oBl!Pg&X2Exwhw^v#*)=Dm5H?0m%(B62Q)gIyl!#3AsXm0eEW znOV$yKXh!|pIj*MkHIG+Kk-XA@A59$0hYd>bi^~p3%%UYfZqbf-IIvih)9r& z-DFu<(KuD2-oI(gd>kL=otK>xCVaMb``YB6JU#C+ouT2-;NH7^808I4YwZ55l6vyF zUBmEgq66)(%GQrI4Fe+j>bAG`ODLQ8H@yuZD$xcLhyOxiuYK`ABp%MQ$h#jZ!zvw7 z<^jL#F+uac(RN(vzj?=uop5@A#5pFGmwPXNItImvvf{j%vAT3>DzvR3T7R#my#4x@ zZjh_$+p?CX`JkMM+kf|czAPVVTXAPSr~YA4`|Ye&XR7`6tESuLrri-bfi1np^OHBV z0bR`WLcCj-#hzy=r&zt6hQ7nX@y)}twg)@>{4Y#bZ!JFJPd@qQ0iQg4&-wb|3VxC2w+a;y2mTGTNq44xOeK~2-88+Ng zbGt3n-u&M5MC6}er0<|V>->XqXnsu|3e;Mb4 zyFb^6_vNK5rd~eW{Me18iQ*m(cQN+u4c_KKI8w2W-OlWLi%ci2Jt7m|{qjyqjGiM; z!DlU390$uwOFQ01;HAOBXA8R~Vl({k^}>yRHkHdZiAC=KPTZUPY`(#xtma!!BJ*H5 z&Wa(YH<0a5u6dCmt)hD8sO)y;6hvqVTAxh+R3xF}OF8`8f&j&6)AntlOi+t>(dq3y z?#pOGJuD1~d1qEZH|Mdm@_Xb0z=c`{+hdT|sOWvjfWi2<}uU2G>j-7h1FF<=HUAERZ=hf7S#TSCM zmOnX$ggN`Rg=zt}9jhm^xg(;R|5 z{)5mev~_rDTaKA@O!@TTxtNIitQIWab|CvHoO3*LK;)J@vCQ!6kll?l)HSrd+_H~! zB|kS;NEh0ng$PZM@w?jSN+67kcf2=SvxTJE17xYLVsI`@lqevZ4}K#1trXjjj_u~x z8|-WE?qkqk>%44Dz)p^?vOTz?PIDI`I*^|*d5hEn-2g)?+uyePfTI!H)4 zpS502V+Ls`EhmGW7f%9Qk2U0scGr2U(`Ogk3Qrw4BRvD!=;&CXV{ zFkg*|OpnAHSq75NgVef1kABv%kQM51Q89#mOxQvpSdb)4b_zI-z5im z6E%#DqsfW$@zxWWb;PNtF7EDIYx(caHSH->d;EAu0GFQ(mN(yWAN(j)p};ir_4Qi3 zituM_y=LF9WB1EF+R$@N(HjYZ6MONU9%JihiEV1VsQhmEO_<^8!ADy3@87>G*6D11 z*N0M4(dKNc$etH!6;;{$pG;bRcXGxxvan#JxGrJ8*@Cf zfpegRctCX!e?&!f#53d6Aat`qxE2r~xCgqlOXYicjU73KwLlyp}XZ@PE! zURNBAXM<&5YH2B3qmbLi$WX2Vb?3*lkA4wE&pOny0&e(dwU~lb+DX$=Q%g-+ZZ0;< zXx3&u<`dEBqLK3B(Ve?!VgxJfCOI^D{ZU9$CW0@re@sYY74+ zIfh0CpZk6nGxqn@nPV#gELw78**o9UqQcDhjf>W%9l%X{MW&S^uUOK zF$m-CH0YZso?$dH0mIm>t3#TG7D|Ukk}qph+T*u7DW1VjpRyb`{OnH`5~IVDpWrt= zQVZl2?|O@boW}^?Q{Il{EXJm1Abn?qE5B{5X2Y&F*qm(QueY6uZaTq)*7?y?UXIG| zNNsgqKUdiQ>g<%NE|uDVFZrD^A#9{TG6Q3$)cvAh}-taN@i`JY^j*q%&C6fdGEY$WK-t7>a?o?!KGeRHK7(zm9<fZ-a&+^5E6C4BA$H9P%TO&f`q%3wH{G$GP6hjU z7&TD`9HiH)B-8mOIQaxvR7|zM1&~cCCoDY9TWnqJ`k!AfQM8*^m&}c2UCxI6boW4m zhV1rCfA~P0TQiKezOrJzP5tEc#}XW+x4b)^+wtg9=$*{JjhHd<)?xRT_a6>DKgiGW z*SBCZsIjYfj7748Mfy`4A`_R-2G225Baab02ut_dU;0tjd{KyQJBEIQ@8iORyz;N= zk$?yku}sx&fooy7{(nQ>*(mY;IrY1w^Ke z`~a0*!Y13|<%CCQ2g>h^_QhD4(<(eZ0x0%@QTS)k@{0>^uf0+;ay)u=h z*Y&(tig6D2@OtIK0hLbqeEKhy9)=dx&VY92Wramk+JSz8z~+C3Q=NO(nYwkCb}$8@@L?l97X1D_Qu3 zFoC0Jd9nz4amv9w6^`UK$Bm`!yhw|gr%%i0if%91$VJg9{RdyO9|leASt)F_4Zi-j zuBhHep_7!4Bku`Sw%dtB>|mdV8sdcJHecY*1mH`GuTO?>hqKy=^IP<9cP(&~GKq?rww$52rJ?ia^te zIPoER{xtCWuHjv!1$cKS3vfpxuOtB!9+69b6R}m{5j!G_>8h$OowU ztGzi;>`;kco*~m>irF3?zvx4y?}ja4n038>!W26Wc_FXc1x$uTld!F0;-_^o7bu!z zX&qA|zI`-WK@{5ctx}N#t24Mx;n3-UjSx|}7cqCCdQ?k06D@~V`bZ3Y_gANpxh5Y5 zyOJ}0(mTCG$Vsub4{FcNBE;JO#K5VuHWnG(Yrn z@kLUY^Y>g|{~DJp+&KJcP$X*GYwgX`{5HZ2Lh>K`yVI8~Ls1jt&7Z68juCt+6xY$w zSt91Rdho#i_WIP+U$qt#l(uJDD{t)Dr{6_Hj2G(#a(I%_!c?7fSx&^6sf%rRwK3I~ zytSeWRSnu1vV5O3v%$2wQoghemaDCSw85BHh+-v%jV@v8Of6Pvzwpl3cVBAj$7uG{ z0|w|AQs8{KS!lWu6Q;f|dqK6+`nX8|MX)nrhB8BPasB=A8_!(DjImnl z#a=4?~z5))7(KA(ao>UR0|_EN0&fF10Ur?| zWS+ElNl{Yb-jr$0)XmA;We@_Uxj$G|mC$ryp<-t=yAoY?8QiSV_P=XSeoFe}{da4h zOm?4>OJ9kMjK=W)^@A65J`TLD1=l}G=462Ef5h>zOnonuHCGXgr1bCERaSYQr#Mkl z|2+{hpg8=+3jM`DVd0r6d8PyS5QMmhUkGt3wP!ws|KDyGBBTNP7d}BJKVM)0sWko$ z6Zf}MtVvH4q5p|F#kuYEfBG{9;$Xixjmxq!o;S6yH1qASBaU(+n6;IHP=RT8=kBH@ ztKY%eYZ5Bs^gd4EJ$%Bfhrl#G*hGJig6cg~jo0^M@Lgk&394wsIGb3$DMBm~ZLzPxJx`zfWDYwpaZ}x2mvw zrUnL=w1we`iOOXpU+?+;jf#)l=KiUk#);kXC9JFvsb*43X?F8pJC35l*qUxTbK2oi zN1vDw(!(>`d;b~%+B|&!Xge*b5jjdJK z0h!6zPU?+}XL%^P1l@{J=}HJ0-uY!3(jZ~^NcX|_FJm4c{0&&Y((d0;=AYiHR}xt` zJQt!g8IQ^a;_Vv|Kc8}9Y%+dls$((9YA2dE8pdT#dj15?YlvxJSo8RD$`0X5KzVsW zBD(+7cfn?TS|iWs=%*Dky!dt0k`P*>pI&&!Q12kFdB*Se%HXfU1Lb!6BE0p?e$$fP zsbV%+zO8h21QPCpE%ToZmJ*usORpq-RI!!)7M4lxSN+&hJ-3juU+U@8xuEXU*~C=c zh_Gr%5bX_d(Zh!flWC*jXC zD(_NN(_i&Ba*+J;x$dh_+J`W|;qoORE7`^># z+@$Vw#KG4~GIM@xIaQ~L8oTighpaC9kHB*GQ>ZPw{y9aaJ0A3x`Wzxi^*;QQ9RxJO z0}o@<=KXji{yR|Ba3%$jWdNT8a4Yk>OFKI?qhlOOLfM^~yRFWD!dzSm*3O2|gDHCC zmw(K0bd=z%s2ZOtTs^?7d5a(;1@<& zvtpL!l{WYdyLir9ARHNS6(YAhu!i=Z?S1TA=A5a?eA?*m$YofdXko?f4RD&@MYuemTkq$cRzKL zGd0}~Ae;w@TTIXn>MVwF**qq9;62;k3tdlHA&GRYr(1PS`*u13F-r9NAe<}oaSWBC zNJsQkIp42HREsDHs{BclDU67e_e&Jk7Z|P4slU)b8KWYl`WWpTIppC7_V4vMqn}2G zU%4Cd&(G>kZ@PM7=Z@Q=>f1k_7hZB`9*TL-cn>u>@tYcTB*_n_QZqdr>X}+ceLL-B zmIu+PKR)%UR4o@=6jGAE#j+-_+4)T#VxRL|zK|ZFc61Xth5)>DUw7Rc*;K8@Tyq}Nr0k9{D>xkK-fJ33uwAN9Zr!$t6^}SGV0z zPQPqvYA#$;Ts)4}9yUjcBDX6*{*ed{9F{tIhwo1z)3#p!{-9+#s>wfpxN!J5_mivR zY`v)Z{LXCu=6LRF!&(Ot9C8?>H&1<0SlpD)DAq>Tm!0y`?Na8Jt2Y#4%^8~@-a`^ek#>(Eh~gg40=qVjclLb+F0 z!-a4N2#mar2B$(P_1}EHK40_WCgX4Q-zG))a@S9ESCuuVlARiEnI*ad>=$je<}(egg;4$TQN3JS72)5fo=5?ouqi+NIq_5ITvG|2 zFnR`7PZ`s0kPwZS>0+@FPsn39D2bXI;fP`)%TXH=Y2Hf{VuOsx*wNbYim?pH0~6m2 z%#*Spp^OnWIV8tgLIw5%(HMJf);Jv0eY{);&iq{XteLE@SF-ng!(wqOzM)PfB!?>Q z!r`kjlzIzDQRZxnqK>-#>Dm7T$-KCr7u)a}SZ+4#Ndx)H>kE>7lapye`+&$N=(A=~ zN6h5pKkdffe8W*L8bRUiI4kcXy>Yz|C`zQI^XJ>2QKGxkdP0jIXG|=Nzh=?h`PZ>{ zhu~e9ReZ{U*++o!ohj?kZE)WX zLRIBFi54+aS64rfG`Fy@P?7vCnUI*sbtS5x5V44M26&i1c?lSug9qulJ8R%G316E& zY_~XDTuDi(oz~UWm6+ZX0+ID+ocK_TDNIx4I6XB*FYs^*ccgG;#;`=%%G?~~frqc} z)!FWymJ4WD*!}LwFQ$Pmr$!C#-Q_7MDFvL*o~iU}>F9)V!@R1TKx2tWpQx#zpU42k z^mESW%Hg>)e{HYNZ{O1$`^1VOZ4Uv6u=S_*OEy~~{Pb5n0&re`OQ8@aLD7UU4|jJa zB^~$7`1qVem%+{Tb;x}WP0a-D*6}ivG78>KYThq(b-LCb9v(ayX|YcD>lbti`kCJ7 zzx|oMUVML4aJS5Z&3(1JQ&q9Wjszh@w;(=bg7eeY=0cf}Eb7jX>9HHiG(lC*>V8-f z@1Srr-Ng``M2H1j~;>7;ve6&4uipK^1v zolH%su{CLCjLVW+x`Kkm4NpaK$c^(qHny*>0{4c}AF9F}t5Yl%MH5f9>RwjX z_o!4cL;GjvRkcU*2rs1XmY}auK)%m`h}`a31Vtc$ulo|lO!Z0e0+S2CVG0|W@_Q1HmlDB{>N+b zLI)vR>+5e6fiHvyfBN+4XO*Z#uuSLJDRxiU=q$+QgwT+)SC>nZY<3G;^o<%=w#`E# z6OM>b3p+iXpglq60JvLJb(3=FnyPoxP8#-#YgKAOum+3>-@Sn%X=Y-AF>e0r*ROhd zde46c1)-1(88^6>L|Qq{HSz&h<>De}L87Ch+jD6$9x%20uLhehPDxl;(4~vsP&fNs zx)-zqRD`1Y6NOZtMWWYarKr@VBy?gzm!zY^@m)|v z7qPp)zp(sALF9_=w*>175rl3nrKd;%qj+L-o+P5-P)Ro*UbrpaTw^ID z+o0Bigs4X8qbBVxXJ5O#EVHp0hqK>wlQv)QsJg_MbyY3#_qrM#oH6FTXX2}=S}`P} zT2JR)G%DWIRMl=(wK)HCm+oB!IkA*z9pmzJ;V2_SSm^0~P|T~Tsj08$_tIx(Wo3Q( zR4!ypyF@?ulAei)iJzZeSlF2US^K`8^xcq!moe-W?bIc4i3B^$F?IAzL2F{IE)IGS{&Woe~J7{|8%ci9tIt^ zOTyx<8VQMl8SPmPI((KK%|qdbywJbYW`?W2E>g}fAumZ>!%!2Q>oG<|{7;@etGb2; z196s>7#@7D!(V+ILQayB-#ennMkjO~97+WxgIi9WHZS>hBR78Khp)}DJxk|hY$>m) zJuXy3A+;<%K*OW~c4vep?OW5qWha=CN}@vZMC0}DlopR~y75yqXDoTBUVASw-~s=C z^a#0D+TYaF)Xj~{pMBe1Kp-lpK{Udn=IS7MV#jlpfFu;Vz{#EafHhx5B9$zO&CSgX z6`zws{lQT+Lsr6Fv%yz(JY|OA_Giw+?+EUi`t1reQhYA!rFkTgL?md_05 zG4_@lA+f|sNqUu+Ba_z(`A1qC|=1&kSe|Id%`@b=t1jyFcn<`l~5OM)5eiC&)k=@KZx_0F$dX=Q#1e;-O z4;Gal?V380-?*wH!gfJPjLAt!@&y(k(}Sp7WvAEkf5OT6C_rOhMyG6ip0DSXbc*$I z(C%EbHOW?OuOg||Y6b^E)mHFjTSz(cuBe&M;Y*n!xl3GAZ}r5!4RMkckkdYX5{%I` z86BQC6G(C+AozI4UG5C+U~wKO?g=Iq)~r}@{QLJ66%~6zBw2TCuqedEh2Fr%(LV+#tTUn_%ZJYWHQGH(UaMntoz_X%2yu$DKxIK%Py$RYC785zt+}z z=ew}TxQq3xK`7Y(_rtZgieh>C^m9?s=FSdw>&ZYrX^^VHSgCEaI~Zq0spv041i8QUzR%*_O$fqZ)U{i1rRKR8(}EBh6m6 zqUD%n4^<4klXt@rVd3{*&Q0EVjY2v-m1@zI4(5kE(NL7w1(5cgS6K~eicMPlx2LLY zp38;15DV$xzwNb3KzrVku zw5Enz`c16fqjk_*@$sLWWhmx2U-Pt(TvNmJDA}quT!DYTEIV`UWGYs7pYShKRKaR=^6FpnZw+?N2gTZ>(hjXLFWB&i+%BT;o~?ismMe%ZLaG zxnI2a8K^>ymx>hiLQ^w{gEEKxIY#G16`5ayM=uNc&YZFRs#v9yBJm(Kcd=CFVW2dC zNTfR{UuY)H)Oc6dG`j1+S;&`jc)dl=-uzMcz`Zj8O(Ua>w=T7tCt`pA#8^A2LqKd4z**%cR-tR}ZevacO);vS`uaYNSpn~yul zXpWD0)lB5kYev~LgYC@=^FE4?Y7%6X{Q5WW;$Y?s>gwsGBqk=t#@;cSt8)f@A|489 zKssSROc^3A|I@x_PGj8hOLMhpi+omqcLx>vg(8&xk!iL5vA3VIGkf#ivTeY3;)83d zl!+gSKfBk9izSJhL4OLR_h@sd<$3X9<<~EIdiqj>S~g)}VIiTWs;a7zlD<_J)}O-J zY%waqp+~IanA8vec5q0AgoN1G*c==lYN)G&MpnDbI7+24C@3g05;H@2d~mS!PY;Yp zsxCy<9aQgs{`@&HF>$;(zB^T2^yLd~$IncFWhH@5%A^2-B?ds4c%P1!&ox2hudLY3 zH&bKd) zMH{$P0RQq*)h!tViTvv73h*sn+z-n@e=UxEz&DQeuY-QZvNxEOlvD$(n~;Dn%Xy2b z3frZwHr~8ls^t88Mu|E@5O+22-{YNM1*tgB)ID@C($($SxBe$bW-X`|TU%RTgW%}M z6|_d!*`o6*8%;v494%}p+W`HuL@OI-7S4G8{t2gN$M^5wN_u;Gh((m3naOh6w>8Ff zqQJP%@b|1U)6>&Ge9+a<01I29BC#v3p5ns*X)#y+GQEwr8<&tkyFa|Yzdt!Sc{Z{d zaNec^V>KJHnnYH75Uh_bkDm9G&kqx=hL;0G33Z(p1mp2;wG@TbTbipnb)rmdlE6mGB+vxJ)aAZ zT`!Oj-`}7DI`XD`4-TcS-sX?E3$5#OVocDEfByWLjN1;(JFLP5PlvNv;2i@ZI?OI; z+Q-vP4L+M=sEHTD!$xMN*ZyVW30W#YLn<+*p#!{I)f0ZoF(QWEObiSRER>iY_1D^= zj*#y>cBlI{^!Shpr{^h(3BPfP03b@m`+Nw#aOz3uGEjGUp(qm*y-SV|NPUYfYj(9| z;@8hBch64tVF5I>8>eqwM|U38yKd^_#WZtM6;&K#c~Lls?7D!yAHy&YR5U=akZ@9< zBV9L12kZy0ra7v<%+6grIQaJM@aFoFI|2X}Vy98gx~akNmg&o=K5{GRlM6+N^u?mS zs_F%3>^V4GX6xO)*z`||tReMDA`V3mIvt?B8`k%6q~D#X4pPZa+%E=@^+W490C0VCkF>9DQTneGao=B zK$aR$Aqau;A_gHWY;4gnF(wV}_Tu8=a&p~pIBo-iNcct7NkRm9E)f0U zdUmgzX}YN|!?(g=WjLObOR+S=2ZdEqpxWn=3z~t??DT;mr;te1bT2NpI28NA4BL%B zm{=@{w3f_$42D?6o=%fD>#bAG?F1oF z@cUjhM_WT_qeWF>Pj-1$SFw_l4Ftb{JQWz739;3p)KRPxCpVot7lqYyqmOAQAoda|XDo^eK)83V)di z)zAJN1-ajI^=^v@IN~`G;YDYjd$~!2G|Dt!jrZmKJa&IEu1Aw7E<*ebs+JP#}PN2T=HC7coWIC z%o^SH72Vcu8I^xka=|oVGKDs+_Rq{o=lAte{XE>A)^4VK@zPy;ZO4%~%Ouv1mATqo zUn_%u?5)lSfH+g1QXT*VVzC<3{HoGZRc|3w?|#%N_L`lX>6dl;LkiV6LRBy0{U7|du(6B9=x(eebYy5_I&Eixv(Ry@wHS#1?gnz8_fOCI93$r*RLnTyDnTMy5+IkdhgHAoR30Q zf0BSh@du&kZ|MopbZVlIrWQ%Ro#*3yS$+u{JgAvn=$H$dq3R4@#g{{+PkBW$J;naJ zEmhQYeCK_3Lx|J2%ET}L2O_cu#%CoFRRFV2hYoED=qpWh$(PL5R@MW}F8U=`BiDi< zN;G+u)m_Zk*=`Oh{Sbj)mg|HB0a&`QN1!hd`=Q}eoT3m|k+Z|f0!x>g#V6*Kt%C{CoHYYtwj~`NofNu1c|=fn*D~w?AoEVtkWclorDPJX$=cM^)!)a) ze(^ruI6FH7b9aujnD1k>2UW*cUgqND)&sK0Cce-);=koaLJB5jPiOo}AIVR97f$)) z*shloEIo%s^$^OaN}_Q@u%Wi)kvX7>3vdVU!7?}&9xgXa1y-t`)V_rUX#xS{@wjq*-xn_DE5X-{~Oa!k1td4AR{421-Rz`BbMbwF`n}t4Kq?b zZ1opNC+3JEWy{zNd18i)CZ1Q0p%e5-Z2m{0<9yXiGA>!=j!>e`nB)Pu`=PTAWyQsc z%F5AUVPVnH{XIQHo5vV3upihBwGO#LG{^pXVz7w9BGBHf$KpcVItsNrT~Ybh(zzhN zh06_SNwPvN3(afJNA$cE=L7^BJ<*^yAAKi&pV(T31Mm(J3xs&)Re<|)^?cli*N6E7 zpWx=jeq-N88}kglbVND=+ieZ5ls64y86nJk$6+%=q!nJ$v#4&|5gzxTFOi(YM>xSa zracga^z-7YGEH55IJ!bdQDboDxXy7Hl>*iixM$*mb^4(2QU^8T=LDq(I;n^6DpW1CI2I*dqJTtsn93QPb;Gxv|mnbN*9?h4tbB#ELa z1wW*P_su$J;w8`)Pd;~zL5gzccCWXzXBS6gN+l>^rKZYqE7xMi#GAH4Rjk1oK#w@h z0J}g6@E~zI2}kEw6K()hCP!*;YiMMA6J+$Hc&Tnm6)O>WKGD8JR0j|-t&WL3@1~px zUm(j##|5J3PscL`8Zka5dNMunW;35bD|m~A{9a%-*w&<`HU)E38joXtcXtX;`_P}6 zh2IWEUsLPJhd%Bs#bEYqQ7S%1EU^5fYyMzSwu@pWiD;t8_M(A$yUzrhmf(YVxQnb4 zT$H0f;=m&cDorzpiT(nCw^?zD2_;JOe%l7fCR~H0E?BV_x1w{o_iXQpOSFX%#WOhI z2>|i&ML%8db2SH~1WEkmKw+b#h%Imq3kGtiu!g5mNDAGXPkXVHs}}g=dBWME%UY!2 zmI4%%nv}HgE-->(M_6wevgv>gWNGORfTmCngLx0YYj9gcjR)$?Y|`W9Xu-mKUL+cA zrUlKvXlA^1jwBf)qlI4+mfbH`cj6KWG3_ey`;Pm0;T25W%L%tkEU|aJ`X?+@JG*cn zsp+uJ%ax@A+zSzAlZv<+vYeU(?``Vr6JssH5%hz-BV_{I^bjU-s)XiZPR=h2ld@Xu znE+f?rMCLwu#;W^XOD!bX$OEF3LdRdP$6T{P(6=K$byIjT+ovu)5w5D4?a%}3z_a{ zZ4EQ~vUgDJyJPkjcOTr|$6KGd#uCe!GG&j-=r(6!Wu2|I)iF0;3Be*88X77rDr$GY z!})Vk3V)<1&G|+0YwB3!%#Fh+adXq5F9z54MDo$mX&t4AbdPHS1S#sg6b6N^mKS9; z?&+h^;}4AoA08W*N3}<=MZJ7BLpog>Nl6l4YWLuK{K9c>WxGC1O9wPDjLF8rJH#b zc-dAWaxsOOoGA7x0iRF0`I##1<4&=7vjx_|_r1`HA%u@&^i$2P{ zM^`MsI>)LtA+xPXdhT+MZvB*HY=+S%XDs@a} z_*SEDs*A!N{*96~?$sSfc)!gl?AJvs6s%UGUO>U5!OYA&JUpDJxVEw~GCCTp`JvMe z-SzTtpjWO;@Z=Ap3$+A9GQ#xXNPZqE{U8T5dL4pQVA#mhr%!`}gF&PFdULq)DLk3V z>${;Uyw>M5S1N)L?xDofav4q4_J^uO#Aym)si^I~cay!n{TC}2=V0o=AS*U(j({vJxo&m4MYpxt6F=VMNb$8Ao; z{RKmA)M<54M(qw4%vW`FJkuBNe0xBI%GDEKiM&M7Z6PCh=nJ@wKZM_AUG}W`=jq zIZBY%GZbtaC=;^BectKMYW4zQC-}_+=Pqp_Q=+?L(f1#PTuu4!H7oPy9#mkTYnJk` zpm;Z`{J~`E!NmFL6D{NM6g#)W3^5f|78oAm;-E%gJj*o$^E3C=9qbp8&I1@RB8NXY zTVlc(uX|1MnBia4JsislJ4Z)Rd8CAkU+mT7NNgXc_Fb&@mSg;4;!~FZ*u?y@8keqe zR;fWF@8jkOBETqKPl#9^9?xG%Q|wgg1Jpb>j`u0IL(@sB=6a()g(Cn#e0&c>dV6zS z^fCY((*1dtT#-WuNeK+}fVY2u>TUYqg`y5&1sk(XJY6+|kL&JD7Ddq4Ik)r8o(cmF zJ_`979-NO_khIaIgz%L7%lkQ+1S9t@!+d;iDBjzgm}fjn7>X~{FC$&WxYL{j(3{NI z?Cfk1YN@HD#IKEwDN*=^goL~z!HRhu)f@9NG8!hP4IQ&@jr)XPp6^( z5})J>u>gR@3{B!!dw>r70x`=gB8VhoCnPL^gs-s{GgU2}^bzm3CY`g_ogU$<-qWCl zt8OFsUd7j40|M|c4ja$2;qQl@K46dGA!NJCl>;kX8>WU?{TXUAq8;mpMF92ifj>XFoP?6+&K zNNiSC)-^If>bprLi+Sv)j((*%HiI2_FwUQq-CmS|!IR5&?CN5?g;2r&SBjOt8_!so z$^7}6$UK*)L>q&-u1*yHZQblB9bMwFxK=gNY@*PMJYs?7->(xcuQk=w?R z$IE@&ad8_?1iRTxU#lR4CF92N*%mAF-pg3lF(?KK?8KGsDBeuY6P3^42($-`cB(L< zkD~1>>DU7Rf3(4~cqn-YL!Lr+PH5Gv?FFj@1x&kD`sqt?wNpO<4+fSx?qSk=`G#nt zoxyTO`w!!^QB)I6jGTgC*@SRo{`sgWC-tyA2qXG}D0%e74{xm6k|#@3d_12>Vy)9L z(AvJJ6>EEt^?ieOi7DK>+=fh_5A#UD0DZrpFA1txg(o$3@}`v6Wf++WHNd!Xh*5~F zBrivEt(#0j7S-+~!uEMFdKPf?DDKS$7w#P+lQD+pR7s4lOOwBS@6rS*Ib!I~-9DcL z9y;IOuUtL{lG>SrvvYZugN+RnIS=5Oq@zu1Ib5#5pA0{J9lTX~2?bl7Tt6{6wUg2_ zGfPqUpFJzn3`E~%eEM{^t+%^o#GkFZZZxB$w-WKlllDA6E|V$6YCIBK(&Uzq%ysU;R`M9KpaO|Z>6X$NL+ zGWwt$IomwSCy~KV)<5DE>EVDRrJNia%hTj#U_d+JGk$B@v;=$j@S$ge|F!pI10X-~ zmF46fP6pR}MDQQ5iPi2W%y|HHR&a3GNn$k^D>aG;0X-|Th)7F$`Jhd;vitohTq>&D zpJo2Xp#=q;!;#QZpEJ`3^4)zN(D0DP7;%4N=4S z3pRKtA@(iUiE))CXkDFrkG{NvhP;wb2p(MuqL;@z)+{pU?b-+XEm-;OInkpgJHsLF zFeW{GRiWEZ`>s-L)6w@+&wY5=s*X>TFle<<^jEY}*(V%j@SxJBRV3c5CIqlAbxsL7 zJEf`lq=DoQ4Ywx)wOzyxVZR~B8*$mEkD>uZ-D3kCTV&Yj1CF3yW2JtGzc)64wzC5srSB z+d@CvsKp+NT9y-2wJq*!H@&`(R^yyhGg|cH)9PojFwfbgA4|gpGEt_zIsT8(tKKaU zUsL-_1Utd7@Jyc|PvccS1T8YCRH52$kC*Cq8Vh=Y077=inj=FoF}0aleY3c&%m@eU zTynpMPNGL)t^EZQa7SM{D(QG=^P3CNK9bPFZD_Tg1jhB_8_URb$Fb|vj?nX63vtuI z;dM0+b1zZis~Xijk?VXLMMKAHWm z#+<$yf?W~N`q++_&VscDTH?M;Z7kIQUzLK=Qp%=rFIbt-`6tcMSuoB5cVgF+ZNGO}O~SZ|bji5|PyyyflC9yR3XV007zl{mnJ zNjXQ};I>d}{FYk?Z7P4r1*2@_@;y=mOgKQhg#u9c*G2z!i1{DP6|2tWMHI2izQ}oI zOX9VMo5n5&=mDL%VX%9 zrZdEF=Wr7<*hrWr;AfAR?6ezq2^~1Qho^qy67O+-+)uX@ZB%e~?Z-V#&qmSSTBNPF zPRpWhi+k4LbJDh^+jz(s`-@WPU2BI5X7C5gW-#Z+r4(9XtVyxTvAlc4eN#% z2RlS_w>OV*HG=HOV4IqaAh4s_CO29&$rxGNB{|UL>cgwE zNZoYey{|oOXCE*(ZL^l`OXb}(2WB?F`?+n7t^WEorvkrU_znOf0|PpIrV_wxdO1z= zONsZ)a@L2!-f6P6*vRm^%x#=p*kjLI&Fj7ujP)IrUbv z4$vNB>LaI6TvOWpgW6(}=Y75q*vJ%*`o0$9GYogys54~iaxgbH_x1Jd!^vqpMYfr) zu!!JZgFaiOIKnm6%_kv17|q-J_X!OA4+;c^Ro(GkbzTl6LQ~ErTfsaM0c!WKv0AlM z?uwhMHCMGpn^E`h;;vG++LTTH3Pk5|uXnW~Zk6mheXMCC zC9|W<@A&$Cxg6sp(*ECcWm{hh2^ENG29Zu$RiUVgjq0tx(dc)10=ix_^?VdZR}T-s z(HO$42k_T_)77ykqi|~TJ=Zy|%RFP-(%bIiH50?`c_{#J-R@W0cEqtp>lHUO3S^MG z<&;$6P_xA)eV-d!wS|2oqj;Ki!y)bvC$RCVcOR(_|1A>hT{6%YC@3m|F~j!_7vNf~ zEG_R@2s=T?o*9I+j~@EsBeZdGmzMF_4oQ0~&*8M`YK2%cM)jzr{#J9ee+<9xVKVX* z1a~WV)`m;RphWX*b<%W#lWA~=-;M7J^XG-#wj40+vgFJnT4H+qb+0Vp^TV%Te{8e) zH^h;|iC-|=srUEs1#r8*G#)TR3-6`oeEirU;&Jv3)mu0>1^15xl*0g{n&HD!Rns-o zQ0xC#BK8U}H&xyiWGCsb`wMMd9zWV(=^I0@n*2rPxWfAok7;nsaCD=to$ik-sKVAf zrfMs_zyI3!%`t1Y`}!;RTQ72fJ~&)eUS1yfgPz``;z$-?unY|ag|m?SLe0$C{iHG#cX;jrN+W!^K>BPx?`)< zv`NIziCh9m?vy@G>8_K%ns8)PrS~5W03g8DcBvaRhpQJV9)y!26t5iy_%pCKz)Eg%8v)XK&OX7o5ZJ~-*bil$*;#Qo+b9wGI|;OkZ# z%4=-AB>H2H=ApS^DB5w=!1=bI12!Hcg-=;YkmsU3(2v8iSj%`)Knm)=y9b+H)q#>f zw{|n$HVfwHra<&1U`V9Q2ivDY4!h~om6(E7R#rYub14E5y{qrS!amqlw>^eRA7ElW zZgD)_L3J1a8$1uL7bqxq=38!s|1Y-QI;zU8ix=K>ND2tjp`a)r-2&1rf*>tOY`VKc zx=}hd-JqoO0i>ih-QC??-@>a9KR}v)({T{tE*xj;9M3tOxLCJFP$8Xzz$`vAX2Z!r2B+ z@{QKQ12mZ(n0Xr8_e5Q}8StXQm9w5U1I6q4iJ~#qK92Xj;3#Z z-W}EHPjCiIntf!6o_PAsl7@&d>W2w@X7|o4T7B$ebV3x!e2;fBJU-*gq()tT!RFz# zHORzu=EaGBu5E24+V`ALbE!mWbYnt;>4RNV(!l$W9?FBGnlK=HL`okRP)I1d@VMix zTnDm|d^=Oqt;UjeL2W?O@BKjUarTiQWo97`3=qRHGIjzgWmrrt?4Nxse-FOtX}^GU zppmLv2Nag)J-~~9O(qiXZG?&76N@Mkn5K)k|B&}sAeytAvgzemTABx1K>8NvMx9S; z0Kx=2TA!-48^{;8duTrWV10Oaut z5Q!UVU%o73^^FP}5!TP9ef#F0zGkr4#V(?tpdcgjhXNDS>|2TNG#xnqyv{ecTub(0 zX-+9t1b*hq^{=#WSmCIMPujG~^xlaxFQWXrcQ8GZ3)%Y-Wkr!$$GsVs`wqvOi$gHY z;HKZO*NDCUEBjIdU}ACInJ}1e0^6WiC3!e<^va;>l8`_x^#DAr#+S6z`_tG&P{)1& zI27Qx3HqON+lfg^j*pJcRtU4tCW9CtjuWmI@Z@ZnNy!dle+XyToC+AQ); z6sBhmT;`s@Q(9484ldm^_oHs$#dj4ruTs&}*Ixp(>%KWjq|n_3a7$WCq@1q;7xu+s z;PKIsQT}ioi>{82&fE32Ue`v-kcZ16KR+Lk7a%lKQpibgI}_FkE_4W=K2_Jy;Okj$ zG`m4XaivF2I%`-!p4z8sv~R2ZEa|n*)o%0@q%$rCuT?22bbBqW8L;LfnpuPB@q%`1 zVU8EoMMbYRUXXA-b8fgijS%ehRP%`&v@FRL&9a$`_*w92-*xVg=#L9ZC8lBJccHV1 zFTi%$=Tx%d^SNevu@eXw*m}t+D8gB=z~Y5{Nh<@Alh!m%4Gp9F5vTBZAbzm+$rWS~ z^TD~HI0}C;Y>Jnu@pS&S20&x_fy+ zfi&USDR3&TL`z2Du6lr#Y`*{Ox{(3a>1kP)5@NV$9JBTnFo8n{g5(Ivs49Fa7XIn( zeUX7G4I~x z)Pi{t?Ci>E_tOn7?A6h2dTW>A8{x2t22$%iJ&ExT@fmexwGI?4*w@ zYT8Sg{uF`o9Z*6O6PuZWu~CnqQtM#K2r-jJrG7i6RJP;J(=7}yaRl=(1uKM*$k zt!0foJOr}|%9db7swH)x-;|ULMCZsRPa`!LSO5bHcxEr+7Vw@vNwJNjm zC8B9;Z0rhhMAa1_2b2`i`5+ILo@5^t}j z$Z3!3`l3dBXlZBzADxiJ*DjF&s+VIZcd~dc$fGiHCE#$J8;<(cS5@gwU=2UBRY~ydog+KKi=D+|KC0=IX z+x_NyPq<5oYEdOQWhChjS^rNM9Hw)%c6ofuk60S0nN-wRnc%n^K0v>WF%T7P^cL|K zd}C(^I4>xg*!uLyWMpM)tmZw(^#Kr1`*49*s%A-M3^q_1p+^o_Ib7^8pRr(w`vjPU z*caBoGZo8sHcXQaG3u%8GXYMfPP>lPgx3`_ws5_NS9?Et2Xr+6FcpW+hzd4TLA zIy%}ZTp-QlAP1*rRfXO(TX;V2zqe9U95@~j8h8-+{PLpy1`^m?+m)bE)32^;tE-C^ z0I=FvJ6i2oEI3+#{ZdzG2hLOy;tPsZC)ZfF9gk z?<-&N(2M)&N_z?9*8S1CjZn37>p`hZVU#Tc*?*C zoj?a2`s5cHJqE|ZNPaEe%5CA`;s-*#%{~KxU#w##n#8nc0(@sDyM$=15BfaJ6W=uu z+jpZRV5}GFK1u69P@<@q<5`)FLcgZAb|d(d;37FWIdMzwjd+dcHupoXO-uevAnm2X zX9ge;*YlTCR`&zfla+RKm(A10S8a)&JONWv z-qO?O78i%HbH~0ZH-4|Al+l{l8voK0ff{ITj7UY$_iru(M@{?s`y)a^e7&q?updX9 zVF6Ff0rs1jYt=LjdO+!gQ~Nu4ZYQi}o@uDiQ?_J)`#F-cHTLLwkZ&@U>aqlp_{ z#L{qpxX%jg3KDp1Li_INUL(7w-~z7c+XcU zG85&J(7!(0wQCBwxP1@YMb`idu3b-S>(l^fa;f2RN4R@b{9nV9|9q;@5*D(_NOgUy zO&ITsmuhpTU-z8u@2F@v4+Prni%qkuI*|g;tmAya@WT=&Ha0fGG13&QE)VnoLq+_e z(U3il+tYDFjS@8-7nDR$6_&pt02V{w#ay&mpVszSSSw21bAk28sNP!c%HAIb&fmJN z?2k3zMVu95AO9L$KXktJ3~=aulA4+d*58gTe3@`?`xI~t=<}3-G4yBdIbi=(VHE!9 zw9a80sL-6z9zQUOQSY0Nk$(0wd)2onTic}NS3J9HyXj>gxN+<|PEX&$IPc;8Xhmg% z+O%>|^EsC_QOk>Waggi`1pfBh3gjc@3z9hwjSYXcu4l&LkFHaoZ0pDPdTc!^|{1E zq?aaB5&KeMdUEKFPx+BgE#`^}Nnt*E#BDx-_oS^3c)~#0wm8w9lam897dW=77@|C& zw$IGU;w_?hAY(BwzVpwoVu)Fi7m-E>51nXRHW5o-=-8T>Jm#W|MSAAKn z?~}_&@%)+dd{?a5ebGlWRG6yq4K0E5vD$JP2pJHMK)|{<^qGSbnkc+Jic$%8Ym)P{|L;#@MN8_|{{iV)9AUkJu_v zm7yQ9_vat?H$DkeVg_(wxs>4Wois)Pow8$AEqF&@#atX+i;If`pxx{=fzL$@LDQ?a zzr1_YkLF_wh5J>U_+k_8v$69~CZ>}?YdOC*${Xa-266`G$<&nSFIR-b#6a%M+F+!- zx3hyaY$zEfvFv}TTJ&A{?Z1z~YdpTa`{&?z(TtMOtiHTEF`4Uo&G@@!X~3c;vPN_YBqz z{vQF0?BD>CKZ3*;kz7saFMXJsw@9j$VRK&;+_R5>6hl?WM}d3bLS2FshQ3l3GZW`}g;6`p+*UMyDLW z?=!*!?YSbyBY*40cEh(%!M&$E|0k21A=3Jw_1^&WZto5I-L z*5@ftOyxrn1zAwQ-AP$#i|49u(B87Owe`lrOH#5EG?s1oE$|sYT-85GE)_;{4-E3m+vGW3ctO;YOjuXbII|JXHM-aJ(lK~SB{`elRu^?lP; zyJK0mATDy&UZ|SboQjiQ2sD zon6Xln;5s4Eiw5wlZGwBaTCK+_IBc2Xr*lhi?bTu`h<|Lk4H@Vy9tmA)EWEhCl?Xpguq&-KEwQ~F>*Q`OIN7KG z9Qpx##4SsvM0FjE1uSDOpAvyUAsoo^qp4O;uXHdYo+4~uQ;l+0dRe_`YMMWq>+T4y zlqOU_mP>~~UkaaNLtxFZgHS9?OxmlH6M{n+208#GI#-A&JE`9lM2+Gg3M0X2poM$x zWW7m7-kzLV>UoqfIxZ)Y$lL-97uoHrgbX$E{8T~~Hb~O0zQ{cF(1Zuo8V;qv#-?$` z__kW97O&}B9^QNaQlySJ#G=EyOwkZv2yncWR7m4-#glEjezr=kLIr<2Ji%d!C6l@G zJt>cOj%fHLnZmD5<&+t^RjCREqbC*I2<-S0YrhOrsR@<88MPpC2RWZr(I zJV}N*v^Yb*GF4{C^h$fSX+`y&ixb4vu9)$;uB=;uZ010w$C}pnX4KL3u?5u%ilO7SYhq>}8iGi+NMtamh&Ld{|8DY{Q3Odk0YO zBS7`_`LsUTHQUqay>eK%^*7SURm|FS^Hgp%WbP>1<%^vV|1CEi?3A;LVV4-p?0H)oP1hW%2(g} z2{-B!>-uxYez(;FH-?|D@0+g2xRMYK2&r!a8qAJrYt#B<)7+XydH-~1ukR_Y zN8Y)0$@(fbYSZndqk*P&jIPkcx50)Op--(7W5D9Yi;o@HVEn3~w3>;Zk$E9k^m8*( z8^Xca^CCTJRzz}9f0fZM{~e>sne8?CXrF>Tt*6G=X%Yh5Hmc?G!TEW!!*-lgN)PCE zPFx#%uT3~efhk)kV#EPhy@2R7%^8iBjt;-eCy)+83c(hcLxPhZ?!5lQyP`ti^kHQ+ z-*nH|p9s>()FizXwe~TY1gb-nprF+sv67lzbr82Qqd-PljRtMqUc?XN=KrxN?EZWF zTy?rqvBPIp{oJkkH|KkVQbg}Kzn~leXg~h=(YK3mkjW2uV;CO6f)^HjI{yDXk{DP@ zdK>7rD;kw!&#&tJr093K)+gbT6XAY{#znKmvv*?IWe6F@$>L&xEl}y^qU5dwDaeVz zOHNMCXU~3{ePR?4(7HP$(v%nAxvcaVN9!!93z-R_TP`F=1*XM;aBOG=dQKjl$=e&c`u7s!qJ9N+Z` z>mnABgN7&G3l3dC@B)zzodaQSZ1h4EgV$90NiWXK@5bRbCpnrm*iNoNEF zV58^kKLDZDQM!=0rPXVoKWu7hItgA!p^W&O&Rn>J+4n=TZR>RGY%SNf-HO`fURC~4 zXG0GHXF>eU&qv-mPt@Dmu%-7?&Fx9_MXX=r$b1EnP7Yjlry<^Hb~f*gF~2u%M~v_z zL${ted{5oxnne{>Y<~Rsacf&!<7OBKX?SF0WbeA-Q{64T0?_Rp zwY9xnq{7&^(}0MGNZ*w^d_bM%9CSvAP^pkd_v<{@+ZnosFm+X~BM3>hY1_p?faw93 z_@B>jkWo-v=3zgN9^6L4yyx$;2E+IrW;q}y2U6CK*4A&*NS%2QAF?m9cK`ycId22s zW!2>+Atvwn8(^CKa7QSR;{r3J6#4r^C^`fiNm#K4bPc|~zANeUF65S{0At>==bJo> zI<`>!!=0X(2#nZ%_tP{G4lD@v-}iW_=HhY*s)SL~s`FG02?+@UgX|Qy{P!;a7SH-p z{&@tnFm<)dH1OX7#N`FNqZh|O9~-X-#xHKez=2f zs1UpZ`W(HvNW)uNT0onj1kNaGU`0axamT8p^&c*SXVR^Zip zPzOaoz|KIR==YLgXs~YA>k{P{N_k}@3Bi6GE%Pk1aqc$;Uh0ixFfdKl-NU1iC)Ib- z{9!7wg++-#J$G=yc6L&Li2slk4-e18HGPRcJOxX(!Ph!tu=5o4&~O=}P0v%PAr>)0W($8jC}D3ytlM%GPje|)YP=?DCaA7GaJ)S?C@w%3r6Te|Zf$C4L>)v$ZdcIjd zNbj$=-urNV*Te4B$@E@m{#N^UJ`K4s^V)|+Y=q`mOLwn%)+woaXrlGaWb=J=<@Eiw z*L~4vGt=YsS6)nl+wopUq%Bvi#5zwj=O(MkAlNRuF6w|zIs_Z*Sl3HZFMB|&G8waVv{-WJeSIda^UL!hSNn0UHPw{uiEl+aw`sqL^d zl;j~AcP><=`Er?&-&K{Xej9dK9m4k;RCkGWOtbGc6a|gZj=e4-?MxE;Qc*| z9c;!%SX&L3%%^0_kSpWo2%tVXlp940YKV?0AY(nGL_wR6Dk>M(*KRgLd>Lb7s#H`| zF7!33jNifirc5d%S|&Xlhc+c)uOlKNGRMatKp8QN4HxtL?IjkX-_wt~>{);OLs)uB zHtdB1@9>jc9gVv{FIOE>ZxJb`)BYj0j6x1mUxcL1L`Uh!Z7f($klcd8XriAs5=ScH z=<}uZkBGWHT_dkseonLxQta@@r&U?E4I`F>_#LS|BSjMyg*;q1OFD|6znwG^OS3<( zFdu&J9}u7*zQw0LYjP4Gy#L1e7X-$D2@h*HzB>yfL-dpslX_vozP2BDf7vWkqs3yH zbM%(X&wMIlSFFvd`CO9B`$@mKiJ#a&W$2p``p+7szVa3!gBd1toiFMk8!VRGcME8C z$OO$*ryd$rxYhlPN(|X^Ya2K(g{Pg%a@^WA(L~uHq=>?85I8}!Y7sU%dO~IjuC-8o z892i7O5nv_($Ovd4kq~Di}?*vFOSES(iFyZ2v2{DA6i)yO3g#`I!<4+aj=xcgn!s0 z?4P|qpjx+m5%g_5{i+E$Sw_3vV1y@7IWnokaRy^h99Ar>>}0!J)xYe*>{E70_RuR(W`5+Bbk>)BsYQUapq$X*D@3aA8} ztzC6!8u(?9lb9g#Hw=AdA`(Tke-ht+;Ia9-A9vQ&yibBVat+RcVqN?jR~7Fk=2u6G zI&@L1l_bpiFmbcYCl1}I!77-AuIsnExVqZq`Z)teCPKQJbrRISf+!LG3zDQwUTW&k zs^J^7@184sU#~hiJX9(gtw4iqa?ohq`H(_27U@l1|Z3&`G8Axp1LZf!YN^it3$)`V=bwXm2KSZIdxSs;UB_Kqsv> zeRyaHRCKxPpN6OWSPg_F@uOL8x>yn zbc`=0PHl2SsczeZnn6w|V76i8|L)avG`WXMA8-grU#3FW_Xs33RUgpMmwY7K)q*J4ZZ z62j5g3n~?e>kXt2gCod0*aL$cBTFdgErZcJy>D{0stR(w!=V$;(&((UPFR}phqeWPe-CFNc0ZUNPZ0vuq zh%#96M16?M33F6vrm>gT{bcOEx>VVC50?}*FR#m$be5LM-WXzL%gLR;OafaX5gCx-8f z3zD3U1o2nqK3!QXN^CfCADSeGS)(iFS6>~=zx= z(GWNW)KjCQS^Jv5&1hrH=`CN6x`i0v{9g|nk=Y7s;6Bgrv@13p?6>-CTdmgi9Y7S} zxim)5NC>oIG9@NvX?t5!MTO(GnGjORpLB>HeuM_S3faJZzm^J$*z#&OhmLE;Wk32a zACEuxWwr2C75tnX8PcojSrXnY`wluowBTwS>Zc?8cs8=wk_LCRv z;%d*IAliK$C;Ilh5VROvV8lxbz8I2lJ)MF!cLf%cmp47=4G2W(*dMlG_gf@JM3E-i z_NZb)f=L3;gh92dhuoT8mP!6RkHH(9Qyi?*GtHA*Q59h;?H?y3uRV~)v`=_Hlag{^ zKi$QGV|63L>Rn6Dmx!P9Q5SSh`h;58@77(vwng0lJ(Rb%H<+Ga!oL(S+I$odw60tB ziQfW3K|sL1+=4-g0M7<6>1IPZ*aY&D;<--wd)KKjvz=H7#X9P4)LJbE} zX^>Qh=D^a=cic#}NE?8qTv%9mYBPcW>l7)qm9lkT?!X@SLl7874uM)#)N>-OVvj+> z=)()LI^_(^t3Rdwp({4mQ>A=^@Qnsy&vN?#x}2DtWFp*nu}zw_;xs<_1ygWZz?6x( zfvL%4rc;~jV`un|j46fW>93+)E$uXjytSf77BS5w1T>Yw-ePM zOITa}my=t#!#5;h0=ewm+$-=6Z+Xj;Jss!@?80Q>5_LN0DJ1D=wo9S#xh8>E^(e+wMfv&v97E3#PNGJU zMvTd~wyCx#Sp5c**nF##gJr_UDj!n_u2nPJ$p#iESQJ`%yqhpQ?RZHsaqi&vv#_17 zx~#TMOmcjyM4BmIw)fAD#@=R)CAnA^8|do=5MWkn>gl1Qk*LV+mJuKEZP@UpeX z`)CWoEl%b#la}9~CmcS^SoL~W*!*gF5(2NPs#;uJ98NI>S4Xe{H1Q@K3|YxjEfUP6qQ%TpvPzu(*_wae$`!F<_~mYmqeHQr;#{5Qo;A_czc%%0 zUc=|*x-r;_6%teap-(gxiK+}`w>Qn~BN5Qj*>@GtA4_49U5V>5@04S7lMum@`NIy! zIeCf35bQ=G2m*xhfT-PdTXT6J{@G*sn5oK(TVwG%gei8!K?efA;d)F&^fm7>!0)Lc zCo2PqlGRJXOl1I0EZVNh^;#0~-$UY%w2?!Gb)P-2S$jKT_om~=vk#YzvElyXv~x*; z0x}3mb{(^&Lj&&~P#XZ4`uO-5gE_T^>bH1WNF0fu`lgQynsTyE6~0f{Ui)R=cm#7* zK_6M2sCvg4Z}APFJfL|Zco0{aFi{GeS5v>A;83d)LM?n*b+FZ@kSY%drUR|RFksxB zg&PK7Lymn&g0@G-GQulU!y5a=CbYg*%OWIQ2?;FKJ)t4bG_(R>KrzVdfPn&2x_a29 zTN5Bj#f=SVHQ&hVi;Jw-Ps#cuV%J2F0v+-6gcz>^sF_M78F9;#IHyl;`?C zF7zg4$Qxkd*JXC;F`G9I7M9pUy@o6T*^p*`C#SD`CMskznh?O_>9^DNDDpS+l2U-m zOJP~N(cqv^8wdx|Py2Ub(SdtlAGu6+<`kih2^|U=z4YO!c}Sov>+9{s!o_8<%vIDF zgK`|*)BHk(n&@R32doV(Z;Hmz;5XRpFTbjc|5ck;(0Eq%wc+Q%`;c=44sHDh9HBoU zH7SX&Px?25wz#euA~dDJvqRnm$gFliF`Y2E3P8P^7-Ubogke1R+Y zq8OuAm}#)f#~rm@h7XIDc-Yf&*{zZpmcP=ZzOHNjOb3=5_0W%d-sT8X=3L{z$e5D@ z{s0OxGLX2%JON#T;TsZxjlcy`7$+_%y6@!gv$6r5{qe2c&PPKMIkW?zX!w9t28hk_ z{Xl`TmKON~Ttl2%!d{{# zmF%p14DEg`0B@IM!H#r>oib6hB6(cNP!3zKu0Y$5z+ZkNcWA)E>FriLh(Gd{nU}hqkOA-Sy?D))`4?de$r~ao$ zCAWU@XOPd@%mDVJVcT4CheCjiy!@#znXU7)B8pphloImavqMU+>#ZOscQcO9r2Eq~ zd_baH*J5Jnynny7wUzcStPlE6fc&d4es_X*XG1vnGq5QHB^vd9SP84m)0bPncc~Z@ zbDq`F*_Vef-6P{awDFO(PWOh?&z^}@i&`B5s9O~8E)pW#h9vk!*`C|80gK666*(_+DdNGF{-K-f0$sFo1F( zyG>w$fb^^3;-2I3(L=xT_n~C4c9n_im~=5~%V0BW)_vHgC$YpK(#S53!Wtf&H{D@= zBi~MfaPmlI4wM8-hSou?RcWSffDM`QgIwu-a1?b+43}3ms*@fVVcmpigE#{)&CBkQ z#{>k8r_AF2JE(~WP>ls9{|f!Ya~ZXETO0Xw9<#S1g;G5d!|zRIo$B?6sqC(ihb^W= zel}hWLIOkGl`@w<`VF>9q6xd<2^Z~;lJL3mkX8PO`4Z3YHBJ7uqW~33cG*ztSUex8 zTGW0YXZzmS+4LAtAW$Q-sZ-b> zUr|u$cyH6XW2*E{AxLzqDIuW#f9BDgQ`-hzu0$0ETc0YQz0~SV|89JGU*grq2G?X3 z0|o4mDs?<)&HxYqV1CBoM*t3ibEU{DOfX(;f(H2JhNOgH6Vcv&i0f>$>r~ZCiRX?O z-fREh{;YdlZlw0%6&>6Dk_Pg22F~-Y+*I+U!xzdrHZ-M1);q!^S+{` z8)Dm^M<-r|3k-6rySV_3-s7yftB%pK(fHH`8L|SbvM1u&D;kw@RT`E;puXLWU8D%gm)IV&w8Sn^YePhsLOw{jh?aIA4V!*_)deO9J}-;@;$e zUaEyv98$7h^KI-@+M?Y@15ydHum@JUB^1EUk=8+*yoB8Gj z-lex@G~*u_kMc#xyy+Y03+a#OpQL|A?>whX8@~$M=T|CFNsFnobVLN)TpJxA!5%Kz zKL|j35TfSfIY!&QgiWQs%rQn$kWMXZ7lP|INP8q#_Ex1Qmb^%4SvueJ#Y7tQx zHc#$-S)c2^66N@&@VyoEKXew8gjtW3Q}E;hQ_2-w^@XBC zZpmK>M2ASNyjE?e$xl63;QElvFJy+#%l%u7@dz2J;FNmb2Z2mI6`odGwDC6pciA9g z1&gM`mPMxCJA;bB5gnZY);=7BBu9Kb<H^Xi6Lo6~Zl z=9~8NaI^7SBfAB({r$V+#f0YH4B5A<$J_Bk`<@@In>Tc_%I}oSimo)}Qz(cq+tUQy z1P=MYRKQT?)nEb!qi&C)LFfaPhb4A*Nl8biQz)z#fg zm8~4g=U+}Tj=O0F2NC$aYG~eU>xv3DXuKW!yp&~P z`cM9U@W>Tvzz)kRi0jsl>V>-RS{s5F`jN|aeDkNXvw)fiG4kGql)a1p=W2a|F$gxXYKF9UT(l;)_E7#zH?50K6wT31uRO0myn$ zBYx(m85)j4@gq+W;KQFy^t|}a`b#ag5mlLWf4a2geo-2%IX*I${%21mXyZ)7&eE67 z!E5ty;Rbii&_Xw*Dcan{2kA>RJnA98Qb0>Ro_tkn9*dQkBe)41d-NUK5i$u z5CV}qlok_FX?AB|=O)1d3PBQibdZI*jzM)?Xc%=45E5}p?t~NzO?TzwG1t)%zGK}Z zR|L4SLA=+!&CFP%*MZvrfMe#lJ#Zw%jAnoF-ugVCc|?=v-WuD;>sL z>`ku6d*WLc)B4PIkGvD>?z9{;bfZ6HP-jAMHdt7E__NR$grwriIq1&-+fT;Jul{fT zRD9~KeB|l}z4Qt*K3PxnSDv-{Mf1~(KFy&xQx0q!evhVw{@k>6+jJUfVG2IE*D+XG zgU?@~l&aUdHtyHqo{It3D-fx4U&_8AkE)=n$gS7wQCmFjZ{(prtNMAr|EKQW zVO5QbEd6bxwfY)={T}7*u;1@4H!qyp#`(1V^%M zB#S07bk6P%Sy;dOKm{huflnK_z>JL6fa&Ay%|hFGb<_1)61P{A{bremyWiYYxrg2R z3-Aq~SXlhzCz9m}7H&cqf)|HYLJJ?u-!#KY-ddqpPROlpaVV123b4oTCW2uV?Blk*0iqw& zYal2pZ#MJN@7B1RCx6*214%`G_If{-q%koiC39Ou)oGR0C8IkdJaNbPX>z{DIE9qe z&4;aH`h_?PUmSxfBv2!+OhT}aN!zS`bC&sk{VFad=Ir{IHC17Kd6}*aJ=UzR_E^Zn z1w6sr=1L;a@6oQqy3OQF#Xx8Xe`uzm+so*`x!^C85?2OLXb%sM*x1-c>Maf~E-q$f z1co|lO|0}Biaj4-)tN#`N^FZtIijlEl!5-AT zKtogfVbkGhJ&4RV;v2mZ!sd4HSB1&#yQRzEcTgH}b^~k9iNN{J@mUrs@6Xw~;$6;JY#e^a z!mUiG&xKC^cx82UoKA=UH!an_MLMeY86^=RAznOy+?I+GXcJ$riJNTaeoO0C)xj+n zs_eoNTp#h>MM2x~<4AIw7JD@CoF(p(8z@Q&R2Xd}|Lar$-bbG`NJ2)oy}rIK|6ZRf zxu>Vca5Tq3eL62Gt=lt}Y7r4Oxt8i?|5lmo2ia0!?K3JO_W3Iai6I@eu933j9x#Fhnfx~Cd%C6-C_#=DFjnvd#1UIU$ zbeif`M2t@d*lPsh8=O@F`c0TBH`^A=6&;_5*C!ZSi=OR^pU$?krrDP87HNaQU1?7L zCsGJobBOuV)B!?w5UwK+3yZ*=X>ZJAe)~jGnlQ^1C553GyNsrwR3@6uLmCpH# zW7U#h8V@}4_>w=**0?rH*JFrlPOFYfllD-jjw-_B z`M%(?R45TmUAqP}^S8iRb<@eJ0>yTRMn>!unDRGHx>N%A9e1R__XH>d)(=#u4-_hL zRdsbhU?3VYau?IfBXc;>sC@EHk6UQuVJW!T+*Yvb?Gn!S7$X)Jy;oB|(ub;xiSIK< z{KUep?b_7)Vl{{E$aPA+S-wS2@lL}NoC4h2LZFs(Uvm7nzoXFnf6`eHD3~S)#8~Zq zD63`Rx&hLM_KWL1ze|T>3M#%bDa?1d>DKPFX=0A##L>uV>R$I}ZXg?)HoL%nHNGQN z5iIQEt@3_zm2&j>qsnPz)U0`JW7*1=>)$S#O_-g}|BAUp;44TcIjv29)=LM(>Of0R+JUw2>b0E%`iyo(N1_djl+w%FBka%(}Dm^4LgtCHyioSi1yR@m1dNtg`u>wOO1RPF8hf%XVue!^B(|D8?q%OO^%MC~&0ebgj z_Xd^T*o{-`sZ&*|u5(a^F@`V!H=ZE4Ci+^fjn#8XzlifJ&8jh9OZB9b*8LJ-x3;FxEifQZRcm^S)Fo##Wl~8H|N6T5^;s^zNP!A1Aglp&sSY{mJMx@!psk=V-x8^Q zDbZdEHU3GiV^>5>NC>otfEFF-kM^~d)$S~p{>(rdV^uxVG<|pKJ-Jc5G&UnR#Hg6A z<@@HG5;mTM4Yvm6s)u4pO?z-KbT(LiXnc%F`pJ?S|XFm|+q`t63w2e}oSN^hE&w&U50NlIy|C^p6gsgQoF6rvsX2SqGAM z+fy}tA4G=_A>8*&Soak>ykpL@2jJ}o`3xGyk-?>~wg&PvH!HI6j*$@+oQWtJeuX@s zkbLldw?#Culn8{6At`1lBKfz9K$^pRY{f~WlvD2sU?HU28D;~iUmY!~@*fMT+0 zO>4iN+gZxH}#1~6B2hXLj z*AB~9ySlXp#G-@#_F>0Q%7`XWCmin0xEQNcw@4j-0SXv^B7pV|_?`*K$oPxc%zs4< zLcf(DdG;5e5GY35G;w_??(`TLw~T#jrBcu4D0hx-XkgIA$olACJp9`0s-7)|8^x27dg(QT7D!w!;5RrpbAI!bCw*JxF-25&B zg6ABPCaWXx7OIodWCIqS+X|Tq5Y-nr9-1Cj?Nues&2~~)g7y|pl<|wMi+>B_E;0xo zO}$@)>`ZS98CY;tPR&@K6_pdI(7`!**1{b8Y6QwkLqJ-CuEE&8X-RlD0iqF>ksVjd z;IM^|%vHcJq8KQc;fVSl$f~2s4wZ@-~e>RH<=dCj$g6|`E{bfnQ3Fd73)ac z`C3L((q5tZB-DH3h<&Kok;`=3%nRIDN|6&Y(`)VL!A4>ijSm$-5)mXdWxD&<%gpqe z-<)_y)5 z%7IqjJV15|!ZE4W9^1)O_96b)-a$|n-`$nd{OBMDC&QQAU0hOZ2-FGaBR(iA19t(C zD#yge9_~2hgCf|=%Zv9MZbWHyK0AP;$*$fDsuqfXuf6=|`o3(V>fAHZe->wJLf&Qk zR|;03Jdg$?zQOX#rPTy?GUsf<$`2%|?P#nL+dczFD65@H=+$cnqDS6F(g;^ovoe<1 z8M9C!jX_G7qJf67J2l&yR!>-${cA+mJ$8o#w3kOe^KG^m_7R2oV_QCV0cUSe5U_B{ zgD(@<>D}1L<#9;uZ9LL*!ihwJoqu=ViAo!vn^~*NWDj_o*RWEzR3R;NdR33|-yweD zO;1O?Mri)JRr&1sBX5Q7@gq~4aEI^k#5h1+D_en8{xO`i4g4ec5RaAet5NIciI9!$ zoG~GU;h*FI?r((Egh&@^%ZY#;%BWD>Z8BXxsP`*IYM|s&b$cmVI4W5>A4G!@eBH|X z_y5eurT>O^k-aH2NUl>T1Vz*Gqs57&`c5{3r)J7_18)ZkU>1xKd~t(Dwqj+3|Kc@C z;*h?Kfj}aQ-p)2UA3R{;K!k~xk!Xs)KLS3MOiji%C4y&}#t87%#6Ad`s4dGsP5R(X2rOJ*~L|E7}OZq3k6F9PIf6-S*$D3CgTL}6xcZlHq zaD2{g!fPY$bz-gfs@YuJy=VS&+T`qHmSq376&#=q2N7>_WsImC!EV%ChAm7b4+N}+ z&1I$Ia?gRMWCm>`z%lCRnt`A!20-U+_Mx%R7b-q)!=f*;yAgBl=4NA3Y z*=T7IX9RG70Q&#y>#L)p4Bu^s5|IW0X=z0X5u{6yPU%*-@<}Y+gx5lvWe+(A6f%yT3jsj`d~`# z%WYZ8_8T|pq zs-1q~Hu)UM+}G4y0dUu7pFH{Emjnt5_iz3dC8q;Wo9ID6-ce0)mZKb``S~H`rNjYT zrV|@hOo$t8=63Zt;w{j<6}Rsp>Uni8h3CmFu`fTzTOiL&nJ>ui}{(8Tz?1nw> zT=V+4s4UZRVxFj-=xw`Z#!|H}Zp#1ZIniS!eb?;N2ljZCoTZ8Ct_;-&%!(ziO`|#X zK*g*oQb_M8`sA(IdOBDgZ*Ou}=;Fq!QnWR_^3~mW6wyIbR6SKy@h@`i0Qj#1WhB5d z0W}{$NBEdXlYv>S(e=llQMbmk zL9<2$$h&muE9qov!a?kInja#z{XAGfN-F-P*`U+HW{gZkN_vSo5HH%w>kxvu!6EG* zU0K?mt689%j6tcU^l2NLM$7r3!ShQ{F4Jl86#`Pg4JQ1Oo2xb9yuhO{N`6BlBNY{u zkl^4dZ5D6&mqlPrDoTT^2a!DWGcoR?swvJ9a5$z_wsocm?9GPYGf;1#Us^xV`TSX5 zPH(4p-;BU##AKTh%IHs|c0a|yZ6$_5mg3LK`sQY0@zWL3^qjrdN0y*n*^t~zKtwdI zK3q?FaF_kOTA^CkIZZ_IIb9s`DfiqMNqZ^cGW?D`pW&APCWoq(RS|klsZ&1K z4Gbq<$V;pjKQ&w_j`c+~VV(w?*Q+qg0%pD#u&3{RzXo%y0E!KM162xdE;c6SMWKGZ zW57os98M%wNE4FBk|)Q=eY5Dg;5Erf%Uo$J&ZL<7r&%>1w%}?cM>Z)bNe|)-x<^b! zfj(qFWDLxMRQfWpB0~LAhoy3(D2Q5ld~}I@(+Ff|tnYAP*=DJ3jI6AKy^26z@$RRu zQCAoU#J=E#1z?g8NIUhU@VgQWj{E`^wEAuUf4${CS%iSZIEHR#c2%(|K+iNL`u!5=ms9x>s6yR6^KQmnV%t+4^v+~4p?Pi zSAw1#i25^)ZXv;_G5`SontSh2mRkw$z@BdPx#$z8ikdvYkK?YB1$3=9CXB2?J+ ztK297J?(FYT7&$mD25?MDVu}mpJ~y!AY_TJNJBe8EuD?O z0K^}*7a2J&MIXM^ei=kc*&>cLZZdf}0i#L>fZZ3{jZspOXy75_arXDUwuzksKv`mn z3ksB_#oz5bcL?;l9|-eB!xZWhl3Zm!P9$S6$)lX?4(+E-AgRZwBTc%sq_>jffz)WU{k!zu@GYGn~|XD_D!))ZJ1b^>)yOPJ0&sk$%rf)i*B8LFoX|C@J{|K z4)Z{2$e4qdZ{^JSMqx`}YB>ceKY%fD%vdZ(uSL5kLAVt=qP#UGOFamVBQKSPZmez7 zgbMICxW#mMUi0N;N?JcxJsBZ>YU%XB62PLNI(M3Z`5k=vhd6_`?}`^G07h@#*2VgO z&~nn5n`aAxROI|tggifoh1Mu#K+YeW|E~fpVvG_yT+E_L4&?lgHonVN0mMp)ADAZ-Z0B8WR4wfX4ymW!Q zqp1nhq#=pwb{ZRQ2}dA6c$mJxKm9~Aff(mmv5g}?(97}>2kIh%ZY7J2$!?l4h`G5Z zywXe&SoK-+s36Y*&?uCP9-dNK z{w>sW?;xn(rBqXQl>4e=G%-U9ON{v&F$0}Jr4v%c!?sa~2@Pw6EA)ZfahTLaO6AQo4mYZZM=VHX1`Ph11%Z>~ zQZ~HRaoUVx_Dc6)$@TEWlykK%J_*lp?F17W``93qe<dvtfY(FB4*v~BYgxPP?N`TK4JkOEx5;^XFxQ-$arr71#ndU@gQhKLiT+O zv(q1@2U%eh=a|I-W~9@2JqFBprI4wh{p?Ho5-r>OUn#0^J?Rq1R7VEh$<6Fm4P596 z)<9voUqo__dJ(|+5t}U_E2DrSaGOiC=}>XAYenRlrm^!p!K6r$p#q^cBjIRnQ;p8~ zVt^!SWDsu(OmC7JUNq8CPg_~Up8~xeH3($o|UnBRDS;? zz%950unuj5(TedBomwDF#=3`chu=(r+U9>18KYTAm^}z9_y=dlei)mBfl(W{ z_T=IcjFk=W8Z3S4P3H&$uzExTPE-&!su2{CzSpLeyicC20|oCHGva^Tq5GdDG=Nu(x%rb8^zI~8 zRnW5+i-25N;Ofq$ul$=gpIQr)z51-^NJ$$ltxRb|!l84J9GxbL$_Z z^pzaIFa;ITmigrP zr{t_xNS*LD(p#+Pr7F^73hrziig#@3)U@mKXTyBTa&hwaUiD)s$TqT0hLya-mx!H z;XOEUUYYr=6SFg)?d~0{#`%QQ*ERdvdNeGa4=NT)Kii6rs@FzLc6n-%+v;}MoLL4k z;6UNe06BJui#JMil1-SJ9^1~U?DW|CTvx!#LEi}8TpeLUP8DkYTcCK;*bKmBhho}{ zzNt{Q2M3J!40cx>)}SKD-?Wn^e_=6|(gq5nh-oW;o6bgjbo{9JNcYejvi{r$lXpCO zua+Iu*YgH^Vkgxd>$al-;Q!?wah~1P{y75(1-eoHf@25NNozcb6wn;CF)s&DUX5GO z?U#!|zrR@e8XdE?A0WnopTW{Rx$`{uWN5maez_zdjSWFK@XQ*qxqZ92362<7k}gY)}nWQ@!(x7IQtG7^f)Y?)@_Hh%xW--g%)SRT8 z;w)57w;ffQ%YyaFY|tF~G4Z<}%FB~Dsyvom4JMsbQXVaKBvB)xW+c<bX7eJM;NwoAJu4^0k71 zT+6g$lI`VDCtLWpv>UBX#-y~VmqEe_JpY&ui8xT(y_$wJfSL|H#*Cd1L<;!kS>nvU zUp0J0Pu*Q}#yA;@XRML{PCrROinv*dqNQ=dgdIA1zCRA$=3#-Q-o}9WmkB}#+nYJJ zVhMYD1-BmK7yJUHUh!SbIJafIW`~nVU9p=epuoJbd5bT7V&ew*HcvOC2}Z#eS^KL7FfB ze&CbRjmr0~ili!1OdRYyfMx&uG|6W$EXX5;68w^))puNqzumnh~RtrVkJoRr3BBb4{1iwE_+@ z!!-5=#Q>V6LC!S<<~p47eh;>Kz9_(s9aAJ5nfL z)dPeqn^>QOx?-h*d@e$rW=vwqfS6cS!HOn4Eb)O(#KGsw{YM~#j}3b&u8(8BIW{{Z zQRbrJJToMC>FMARkR(X+IdIw?UOx;(_9PN=&;~z1$*||V?CfhuhNv}qIGx0&bsEsbnUim9C!HdJiFL`gI zr0aWsV7~Gf9EepJ7W3*Zp#aP)@!=J--0uB;pI=lenr5wTzo)a)f;Y#wbsw8Ee;a?x z{(&vjxV_zIGn_NlXoCa^vB>{m!d9@=p+Zh7bm&lp{tXZm8E*@R$@}z^W0*;ztpisb zd%6p6%oH3^7LCc{L_p~uB}Y!X`5eAkz#f;~pgTg9o)B)qcImn`hC;zRa3t3VXqx6( z;1-)V7(Pfqj?S{Fw{<+6X2xEiPJ`OrNtYM5U0KMNFcmeDoGc87gmmdiP_ED2&569- zG^GC7x>96M5}lXtt}PyC{!@sJ>fe4^g#yjB{XX>3pkjPkXWUvIw*nLm1zoXWCof>Y zt0j8}NJQioN1|x&!D&RC-gRDL3^d8~IcSvBiS4j25Qg+2rJw7rgQ?O*c9(!zR3p6W)DvP62besDCV`ZfVBHP- zX5Nt9?eFV=8eO(BW(`zrTVa~}RTq)t$NV6g{Sxxpgw2L#8De#cl;|q0F}kO{C;s61M*|>%rCEM`>~wV%`?Z2k%WZyMgNfl1q_pM>wDQ5 zp{sadt2lI$uHVBcAUhf0w!Gcfr{$rbiZj8e8*_tVHJs1 z6Rx-^y8z+&353$IFzBhvNBTvH#@)t@Pat<7AjDLf2a1T|5#+e9t)+{oZX;2)$ehu& zJbaB+NU+o=msR9CPN&YKjicmFH6#?jIc^U}nBAukaIfOM_6dOc-FA-GGQB3Cxb|$= z%oJ+^EGg;R!7x#OSi8xs28p|LR$Px|isjD0!bwu`?bgk{E$7_)D;1RBnbcd=rPXiG z`~j*3*E%|kzO`V;xP2skA3+;FB0avZ#_g%fmU}N-O~FMptxB$jGgBLtn(%-?pYVm# z8*RMGWOlqm;)`rU-Kd$_hglVD(u}K+ysW>Ab@2ntq}3wTA6bcRWB{-MNf(p?*vkp7 zhljx=bb88;>TyGig4Hh{=iHru@V(Q-i~NbOOuDS7cEBW>ais$b%HuQ%mPSQS8%oI; zMEkB9-5~QloQqR{o@M&wl$8^@5JnG->!DzaVwsCi!MW9}U$(NC=vkcMXea=21`q5a z3L7@Fn|5zz|8crZpNe<{Gpsu2E{x+>6WSb#GlNv%XXESzP!dX>V-iWJeyThNo8dlBgrzkp0VtRiBHBe2-#UBods&bl*eai1`Rl zCf6fZ-e=S_J)Z%{jW&cTE}k^0{#uyU*V=WJO+eMfQ7+}%jhfl`ar#p)V0c7>2&7XP z0V$%VoZ)qY{$Xt0JrH0$Ent+tfcgN=lNY1k6#41nAR`fsijFD990dkO(wjLcC^|k% zne7$FS9PvyxU%$8{htojT={ZQPYY(<%?wOQF4TIu4O9keh8^7Y`QljQ%=gS9+&Ul< z^K8YZ_0dl0rRfvqyGYh3;ZirFfj2 z_hm7>&yJG-g}zh^lR|2Q3=84(XI2gTaN=?etlAzNO;NFvyOnv#tu;BZ9hA0?UBx`B zK-a$dkaXKSJ5uEGvYeIDH*DSh?BR=c-W7%FTU@i0XTT1o<0$2j}H+`ip%4@9u zPO%g#wwc~VhnYE^{azDRGvi!k=R@@*8azWhZCr2o%*&}Q)3?Y^C0gC+cviQZg2pw3 z%K8NM*%K10akKSPN!Qs$(NROC#Y&q- zwwe$GBaaax3&IUFMeEiXef^oaso^pfPfua623LsHDuxoP6c|I z+?`mOzMUG6k^Q&2)_|Y3?U6eKAlOQI&&1l>B>`NOTOLcE7Lt$6rs~5s%_@9=&xg8cRe+3=0q&614n3!uDN`s zF(-CCRcM!B9HSA{`2F7ciIzY6BJ9J5f4g;;Am(MtT9P?!T{X^OQ6OrrLzkYx#GxpX z!i(&!>Nxhh^|bkH{xhWtmX$b=JdekT&zr5YAul_Xs2VL%dwnyqO#4&Hn)yag+lPWVT+lB+z9sck(nWl-{s1)!w=ueVTray3Z_a&OmSe}K_Cc9!a(^}S_v-vB*7MNP`L$KMAZ0bwMA4o@k@^F#keMZtf7Z zOML%VrZt+)kD3|V(mUEQ`(B~|O5-=v&d27&K14L?xs$)Zq!+&+Q@6YhH88nWZ@L`T zSpnxu_(si|1^(S$Cs{tD=FaCV(-KRn9h?e+z=wktFI+KDIN3nHjc@iXc}7e3P;ZcK zu|IZ#)|nU^CtQxY+sSKGlpNmbfmO97=WGsF5U}yOXO3R~^!3^*PL|h))Pb3IRS%gx zcK9LC*cjTDH1mm0aBduc7%YgKM^ZGdT<0s9^GTrHX^f_qi5&gD?YzEjnKVh{3b34a zYAGw=r}$*|`q#(Y2+(5TBk1TMj*CkzBme}qm?W%qTGyaPO6x*W#9I$Dh&p-=Im0 zPqm{o`A?r#fPS}uIO4`~l-~g%!EOWL4PByLz1iaf+~!o@WGLA)XrJtr1T zql%+UEQAN3ZAk>XYS|=V z2LbBk<)tv_Cjn~ax@UhE+YVO;W&rj6Sy|I-hJr`cPgnGoG&28;<|!hYAcqjMWTGu? zBe1TsF5%o<;^&&*je7_z(=NqWl$NU|xkM1wf1}9C9F$(;U(7!_BugbG<`^lHZhprW zVS1v%X6}6|UG+h=oKzc*_3a-CO)7R5`xCR`t$XlJwDdHXg>3`!O2Z=WpS z7PX$BloY!Wou2R{`SNhLaSwhP4N2sS{#%mIY0z*1IqEg)N|SPi z(pN73wh!*44Hj9uT~_D~l-@bwz&@3cHY#{ zV|R;MudDrR2;i{vwgtcPeaHBr)4=xX+J669Hf|s3w;vR}S-5 z#+ME8T2%si%yQg&TKPvq)ve_)_v?tqp6q^cWHEmL zs&Dv6lAg~BWyd@gf1$nBNivm0Gymnh_`_7lbUx=*kSirJpn@#fRa=f10!IGHH9Z*?#`QZA%YT3x4 z|DIxvBd|Cw`s;!OYiiarTQa0|=Ldxlpo4XO^TCPYk*NLlcK9T#1VhWW26%!PT#L`L z090N&juRh1U0xL0fmX8ZS-$24?2%Uf;{F|~Ddr!FAefkx=w!-0|q)rw5Q}LrlkU6!WvYk#q<1RXC`M%M_D;C zCWZ@?{TET@n>}20b(6m#Na}(jfdK)^R+$AXIyJzZ59H_=AqD%1iA;2Sz%1T8CQNFb zO<;Bw>8VI`XJk~uwrQVDpj}3(QN;Ru>1LtRdR4B0{}!D@TpF-P1DLmqCBLwTFWsxB z(N5IbMyR|r&cHv#Z(myOjaXykCl6X03XtV*zcK|*6XN3n?;^>~hDF`%Gvpryah9Z3 z@qq`Ky)hX!M8pjUdRC3(Hjh7gPOsY*?S+z|$fN0AJ*7yzFEYauBVC9;kis9=Mt@%- zj#Wn5JNgwM{zFYo4cJ7z0%Reuhl2x**Z+5wNct6+q%Vn8 z_{X$Ck)J3x3doSgkdbO1edke!Qc;o0*MR_F3BUPhL>6nw`Tj5WOt5E`xmM2SVjpW%fI-l$hU{FR*hFAUVa&+ zW3ZspjF3&PjS=g9jsrr}KWeFv4pTMlD5ldJz#6nX1oqi4-ap`P#YjBYG=%>G+VTQc zN)5hpOp+`Th;qs+nak(uIXfunD|8#Pct#xLaeoW9L~Uumcz;*a-Z$;r*`Sa?lLlR4 zR_&O42cwx35Auaheve3mg))9-yl=Wq?6wPs!|$h)Jl7QYDvS1!?)^R>7X0mfR)*Ix zqRjJwatCzs>e-!Z43u=7!k2SWX>ZIFrtO(KZEDge{ckk9)@h311&cJEF zV|E{x@0)2IdFqe9(;#BZ*#YsZeWjtq@~J+S9Pwek8o=sdt6g^}#J}3iE+JH&I7~lU z6pkaf-9;i*2%ht1Tb9C-jA--|*I&Do1U8Gymm@B*X7?Jj;vxVgGb{JC!HQ=;?Y}4j z;UF1Jwl!vMoHl6!&l|IEZSeXB5Q1nqgqd|@Q51IeM}e5PVCM;+A+=-`k5^XGIFMew zTR?YD@MLu>uVe;*`duxH9a1I7HSq)|&_~^cT9V*f!O1u%(tcTh=BgNlE&-;v#X@(Z zo5x&n%YGkw;GAXz;Hb6R{YRA)U0iPJ$7);l-OgBV!iBqN(wSe%Ep_@Rrt)A+P?yep zL~Xc$V${Ybe>>ZYF0Y8SL2K?k?I9v^KCrU0O}atY{enUqqM`kX^Sb}U5h>?qgClkf8b0Lh3zpGkraIAGzr`%MWD zftvpL#~O!JnV}p3*BVet5WnF8{fq{(C^rf&_(+nZM$ot)+-waB-EO)bEN8RI- zSl%o~Ktzpwu9eB;4phRc1EUl>@{K7k&BFLTZj9uBZUM0!+Gd~*nph8_^M^<*@bH1* zd15yGV!@#l{z+c1Qln9POJQw!J8==kxZt9$uG_4v3ZCP*s+R5J%Juxy2G`5Kd6Y)i zWwODo>ZqJhtOx7wcnV8<^Pz(tYZa6;M&!$WoGxmoQ?mRkO;_^a&7Pahre@JGXe@=A zf?>sfOt#oc=e&;tGAGa{xfog8*Q-0@LS{GxuP)x4BAF8ocvfe7{kg8KUQH7UV{=^i zAn-KZ#=l2(zR@EYNzaQ0_8}UoU~)xWBtr>X9wXnTKIYJWRy0wsh+#M1R6o_R9CSDX+E`)QXF;k7oz>EYTXJ_20i=+@7u+pSD~AHbhJ*5OwOU z9n~qgu5waJO37SdHefH0x4=v0ReOpAt?Z<=(jLNkt#)LlOT0Gcj$7c0dU`1*DoRRt zva_@v9l=EKJawg6LfB}2ox|xF%0;Pz0F|QW|aqAcx0UOk8N{!)jm z9-!LTC`(HRxjWA{xw#X_&W>w+*lOvhsu^DfEmu>V#xs9YJxiZsYN|FHuWt-LX4N*n zq&VLic>n~x32Esc9u5%q7Wz=OR>Cj$T3hWGe`Sk}r(&#ce3%UX(GW;KExkUNv0LOf z*>%bNjO=_De)=k6Bf#i9nx|G?1f{zeN)^^=$%g)tB|ZG=A@$1Da0WSUtGDt-@pB

j8nbL&$~tmsO0P#jM7U-@N?Smm{+Rov{r#@`38hmAk{8J) zyd2Q+EydV`&lXO0Zk1++8+V)X7Z1V4lzvPSE68CfXyrX3T{q?zwKH4dr9QN&E-1K= z13ED>F?Cs(bm^gJD{=~3czJ{ex_Pqq4Tlv&Ph@5yS2X$t6d!73M^a0-f#LAjx@!Y%jQrD{1(@DTCAxm z-UR=i8WPJSYEdw=a(A`6x(W{MROjA0FaM0RW?iMvoJH+kGK{RfVU1QM$P|jv69GLr zkpRsd@D}IXyi!71FXqI{6#g0xNSrFzeqJKM#uE2+HMZ&iqtFM#D>f zeh;wQxVr(D#*$eW%zi7`fX6y77#stt<%8;=&u8Ku&N^^b`iiwaU1`=Rj9h2G__|qd z6H0Y_;eLL8-WSW_qz_!8)X-;VX10V>oAHqZcitLEFm_;AoGQF-d+$RZaU}Me{HdZ{ zQK1#zCdXnD9TGMkjF6DDW%)Hl0v>E)U_LdXI=$h1W$CON+r)YF_+$3X#_}t2+xJ?< zSJgeR>++pTiczu`ARaeGZC){&jA3F=D5G)O~ zv`W(f(4zwoViS0sO!f2xM8V#=oxH172)8#Q-+2F`WZbkrei+zejf2y(q*85a7g|#~ z{s8oOai4k2ZU%!!WaT5f&* zs`fIig504oP$v91Qgr4jHEi~AI?`+#R2}NA<8$fcZ^nwwqL%jpiQ4;msYFKVD@^ky z*KGRYjG|~t)9`tq`sF(V$+sdiNB!snt@P_?U zg8b4xRf}O6I}6u;_SOg}2vwJ+`ETOmmA{pceXKn3vEw)b`UF@a<$VpyNxf38!-s-3*rB6&Y7Ac0PO!KL~7=24l z8n&UMnvK_mIVXNwWGbl6BSg6+Uju#H;Jt-oq;vb0JtZ$_-1*I_g{|uijneb>3H=A_ z{)!3DIyex8bI%Hc(=?v;9l@z)0t};REK4{^GE3bh{H3~LqXEKke{76?PDx4B^Jsl| zeqP@!OdV#KST-;NhtD=vbXNC&*z1iAb5Dl*RgOz(XS=%2cT6Yo&4&qC;OA=CeZSr4 z)nIftIoelza1_a{NdxA984{(eZb;kHlDUO0^w+4bhr!)lj0fo~Y#g_*((4T=L^r6r zfsedh_Vc2orrJI_@!v-A9ArYPK5z6WvkK5OR66e75@|Gmi{KNT=f%u{DKW=oT}~y7 z)ji=comr-M%Agm}_DkDCNlvkEFKNz|ks^q3j=GEFs|4xK7g{SelEkrsQS+ZQt1ioX zPYAE(X^q@2bR2Po)8^SO>(g#>|9WnGOZmtMuJsUlvzs@=^xWg#avt+|!002`VBHsP zm#W4g+OFNEfL=nMW@~I4)zV&brjY7x);N4w&+Z}C`DEI3E+U^96O9@6JOQCK!cQ`m zr?fVPAyK6$HgR|frJpEH`Yd*W>$d8biqkysZX7)laq}3ms-vF@UEh<_)|)(+?4oKE z&TyvMicJQA%q3TP2gz4&mm_sMN}DgY^Q3DA4&M2MiG^>p@TN=swO_a|Z;UeU^t z3T>ccDH_I~#gH#%hvVX)6|5aKF77?u-`4wZbS#N0n8%aaYE%$C=AIdsSnx($wCMQW zHx)3j+7@x9an$LVZjQJ ze!#wU0DZN%ICWm5G{P_v&tb`@$^8e@zJm_}wtiy?kuHL~OtHxIjoim+&-<%3`UqRL zyzocyM)GRRCe+wmZy??O*!E%IC*2x~LCR6&n8nb_b+uR*E9zkNXKlW= z{ZE6Q87L4W?B7Dz<`Bf)pmIVGubWT^1Zo!s-XW7g5HBQb2zY^EKnw-AF~rS*Uqbxa zUprLD-E#)xhZsZe$S!K1D+445G^B->0Q?<9gbyF_$G>kueByt3i}>n)R{(zae|`J! z(hxrjK8Co;|NAZCdj4H31aVpa%iI6u_g&Wy-!yw|B$RS{h)tH|n2=GKhvIPMnQT%v zxtOHCWKoH`Wi)7kaEPj@jkkDRDx~mnrN#hdjk~*hlqv8E-IiWoU(W~0sHLS$Il4`0 z%9N8$1se_C{ckZb^%{o^>(W4MXkXqdfIl z8L*22Cr;tijtq=e5Uv0;W#bDAWdlHS;&pZAa9mpRmSM(QfGYej|5~$b<}=W2xvSd8 z#l_7y*?=@j109C~HeWS9W7k^3TI;9mxfSYtwunA3nMGOX63;h{1cid#{;|zifqt3B z(_%;LfY(4<%RB~pYFa6PoP$quh-kWZD>jyxU#IxZOpVQ`Wo(lIIyt7fO^%HXZjEX< z@K@F@Z01D*-J*$0ic>T+G_24*R9b4*7iW$17TVb8YigPYW=l4gO=y%|dbXU-dnh`A zhVR*h6P`CuknmvO8Ox!e6?H8u6BZflD8B@peW20I$RBr#DdH+m)e)J|dN1f8VaS72 zGM)^bklhh|#*b9;yryze8fgv=w|w>_WPH{oxZ%amRV%_bHG;LZ_VZ)c_aUbyA{yKh zH>XlhSVrRli1cPcMmW&C-G!YUytH!*G1F`WRAOT{e#EGFo+HP*fHWT5ToLGN%*P!cC&i5LQ+DzzsM-!fR`Qh`oR zpv}Aulk|$s5I-NEGrOfpNp6hm4&3B0FfefE$59Jd&;~kSTJQ2(&YqmO5LKo=Q#;mm zvThmCQmo3)$Ur2&n?`9v+{x*0Y7#d#Hp*#+wWnA4pP3V~RFsct%PVe=eu~}O{WfU? z . -``` - -The operator image is built upon a base Spark image that defaults to `spark:3.5.0`. If you want to use your own Spark image (e.g., an image with a different version of Spark or some custom dependencies), specify the argument `SPARK_IMAGE` as the following example shows: - -```bash -docker build --build-arg SPARK_IMAGE= -t . -``` - -If you want to use the operator on OpenShift clusters, first make sure you have Docker version 18.09.3 or above, then build your operator image using the [OpenShift-specific Dockerfile](../Dockerfile.rh). - -```bash -export DOCKER_BUILDKIT=1 -docker build -t -f Dockerfile.rh . -``` - -If you'd like to build/test the spark-operator locally, follow the instructions below: - -```bash -mkdir -p $GOPATH/src/github.com/kubeflow -cd $GOPATH/src/github.com/kubeflow -git clone git@github.com:kubeflow/spark-operator.git -cd spark-operator -``` - -To update the auto-generated code, run the following command. (This step is only required if the CRD types have been changed): - -```bash -hack/update-codegen.sh -``` - -To update the auto-generated CRD definitions, run the following command. After doing so, you must update the list of required fields under each `ports` field to add the `protocol` field to the list. Skipping this step will make the CRDs incompatible with Kubernetes v1.18+. - -```bash -GO111MODULE=off go get -u sigs.k8s.io/controller-tools/cmd/controller-gen -controller-gen crd:trivialVersions=true,maxDescLen=0,crdVersions=v1beta1 paths="./pkg/apis/sparkoperator.k8s.io/v1beta2" output:crd:artifacts:config=./manifest/crds/ -``` - -You can verify the current auto-generated code is up to date with: - -```bash -hack/verify-codegen.sh -``` - -To build the operator, run the following command: - -```bash -GOOS=linux go build -o spark-operator -``` - -To run unit tests, run the following command: - -```bash -go test ./... -``` - -## Build the API Specification Doc - -When you update the API, or specifically the `SparkApplication` and `ScheduledSparkApplication` specifications, the API specification doc needs to be updated. To update the API specification doc, run the following command: - -```bash -make build-api-docs -``` - -Running the above command will update the file `docs/api-docs.md`. - -## Develop with the Helm Chart - -### Run helm chart lint - -```shell -$ make helm-lint -Linting charts... - ------------------------------------------------------------------------------------------------------------------------- - Charts to be processed: ------------------------------------------------------------------------------------------------------------------------- - spark-operator => (version: "1.2.4", path: "charts/spark-operator-chart") ------------------------------------------------------------------------------------------------------------------------- - -Linting chart "spark-operator => (version: \"1.2.4\", path: \"charts/spark-operator-chart\")" -Checking chart "spark-operator => (version: \"1.2.4\", path: \"charts/spark-operator-chart\")" for a version bump... -Old chart version: 1.2.1 -New chart version: 1.2.4 -Chart version ok. -Validating /Users/user/go/src/github.com/kubeflow/spark-operator/charts/spark-operator-chart/Chart.yaml... -Validation success! 👍 -Validating maintainers... - -Linting chart with values file "charts/spark-operator-chart/ci/ci-values.yaml"... - -==> Linting charts/spark-operator-chart -[INFO] Chart.yaml: icon is recommended - -1 chart(s) linted, 0 chart(s) failed - ------------------------------------------------------------------------------------------------------------------------- - ✔︎ spark-operator => (version: "1.2.4", path: "charts/spark-operator-chart") ------------------------------------------------------------------------------------------------------------------------- -All charts linted successfully -``` - -### Run helm chart unit tests - -First, you need to install helm chart unit test plugin as follows: - -```shell -helm plugin install https://github.com/helm-unittest/helm-unittest.git -``` - -For more information about how to write helm chart unit tests, please refer to [helm-unittest](https://github.com/helm-unittest/helm-unittest). - -Then, run `make helm-unittest` to run the helm chart unit tests: - -```shell -$ make helm-unittest - -### Chart [ spark-operator ] charts/spark-operator-chart - - PASS Test spark operator deployment charts/spark-operator-chart/tests/deployment_test.yaml - PASS Test spark operator rbac charts/spark-operator-chart/tests/rbac_test.yaml - PASS Test spark operator service account charts/spark-operator-chart/tests/serviceaccount_test.yaml - PASS Test spark rbac charts/spark-operator-chart/tests/spark-rbac_test.yaml - PASS Test spark service account charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml - PASS Test spark operator webhook service charts/spark-operator-chart/tests/webhook-service_test.yaml - -Charts: 1 passed, 1 total -Test Suites: 6 passed, 6 total -Tests: 46 passed, 46 total -Snapshot: 0 passed, 0 total -Time: 107.861083ms -``` - -### Build the Helm Docs - -The Helm chart `README.md` file is generated by [helm-docs](https://github.com/norwoodj/helm-docs) tool. If you want to update the Helm docs, remember to modify `README.md.gotmpl` rather than `README.md`, then run `make helm-docs` to generate the `README.md` file: - -```shell -$ make helm-docs -INFO[2024-04-14T07:29:26Z] Found Chart directories [charts/spark-operator-chart] -INFO[2024-04-14T07:29:26Z] Generating README Documentation for chart charts/spark-operator-chart -``` - -Note that if git pre-commit hooks are set up, `helm-docs` will automatically run before committing any changes. If there are any changes to the `README.md` file, the commit process will be aborted. diff --git a/docs/gcp.md b/docs/gcp.md deleted file mode 100644 index 9e6f0f3e5e..0000000000 --- a/docs/gcp.md +++ /dev/null @@ -1,79 +0,0 @@ -# Integration with Google Cloud Storage and BigQuery - -This document describes how to use Google Cloud services, e.g., Google Cloud Storage (GCS) and BigQuery as data sources -or sinks in `SparkApplication`s. For a detailed tutorial on building Spark applications that access GCS and BigQuery, -please refer to [Using Spark on Kubernetes Engine to Process Data in BigQuery](https://cloud.google.com/solutions/spark-on-kubernetes-engine). - -A Spark application requires the [GCS](https://cloud.google.com/dataproc/docs/concepts/connectors/cloud-storage) and -[BigQuery](https://cloud.google.com/dataproc/docs/concepts/connectors/bigquery) connectors to access GCS and BigQuery -using the Hadoop `FileSystem` API. One way to make the connectors available to the driver and executors is to use a -custom Spark image with the connectors built-in, as this example [Dockerfile](https://github.com/GoogleCloudPlatform/spark-on-k8s-gcp-examples/blob/master/dockerfiles/spark-gcs/Dockerfile) shows. -An image built from this Dockerfile is located at `gcr.io/ynli-k8s/spark:v2.3.0-gcs`. - -The connectors require certain Hadoop properties to be set properly to function. Setting Hadoop properties can be done -both through a custom Hadoop configuration file, namely, `core-site.xml` in a custom image, or via the `spec.hadoopConf` -section in a `SparkApplication`. The example Dockerfile mentioned above shows the use of a custom `core-site.xml` and a -custom `spark-env.sh` that points the environment variable `HADOOP_CONF_DIR` to the directory in the container where -`core-site.xml` is located. The example `core-site.xml` and `spark-env.sh` can be found -[here](https://github.com/GoogleCloudPlatform/spark-on-k8s-gcp-examples/tree/master/conf). - -The GCS and BigQuery connectors need to authenticate with the GCS and BigQuery services before they can use the services. -The connectors support using a [GCP service account JSON key file](https://cloud.google.com/iam/docs/creating-managing-service-account-keys) -for authentication. The service account must have the necessary IAM roles for access GCS and/or BigQuery granted. The -[tutorial](https://cloud.google.com/solutions/spark-on-kubernetes-engine) has detailed information on how to create an -service account, grant it the right roles, furnish a key, and download a JSON key file. To tell the connectors to use -a service JSON key file for authentication, the following Hadoop configuration properties -must be set: - -``` -google.cloud.auth.service.account.enable=true -google.cloud.auth.service.account.json.keyfile= -``` - -The most common way of getting the service account JSON key file into the driver and executor containers is mount the key -file in through a Kubernetes secret volume. Detailed information on how to create a secret can be found in the -[tutorial](https://cloud.google.com/solutions/spark-on-kubernetes-engine). - -Below is an example `SparkApplication` using the custom image at `gcr.io/ynli-k8s/spark:v2.3.0-gcs` with the GCS/BigQuery -connectors and the custom Hadoop configuration files above built-in. Note that some of the necessary Hadoop configuration -properties are set using `spec.hadoopConf`. Those Hadoop configuration properties are additional to the ones set in the -built-in `core-site.xml`. They are set here instead of in `core-site.xml` because of their application-specific nature. -The ones set in `core-site.xml` apply to all applications using the image. Also note how the Kubernetes secret named -`gcs-bg` that stores the service account JSON key file gets mounted into both the driver and executors. The environment -variable `GCS_PROJECT_ID` must be set when using the image at `gcr.io/ynli-k8s/spark:v2.3.0-gcs`. - -```yaml -apiVersion: "sparkoperator.k8s.io/v1beta2" -kind: SparkApplication -metadata: - name: foo-gcs-bg -spec: - type: Java - mode: cluster - image: gcr.io/ynli-k8s/spark:v2.3.0-gcs - imagePullPolicy: Always - hadoopConf: - "fs.gs.project.id": "foo" - "fs.gs.system.bucket": "foo-bucket" - "google.cloud.auth.service.account.enable": "true" - "google.cloud.auth.service.account.json.keyfile": "/mnt/secrets/key.json" - driver: - cores: 1 - secrets: - - name: "gcs-bq" - path: "/mnt/secrets" - secretType: GCPServiceAccount - envVars: - GCS_PROJECT_ID: foo - serviceAccount: spark - executor: - instances: 2 - cores: 1 - memory: "512m" - secrets: - - name: "gcs-bq" - path: "/mnt/secrets" - secretType: GCPServiceAccount - envVars: - GCS_PROJECT_ID: foo -``` diff --git a/docs/quick-start-guide.md b/docs/quick-start-guide.md deleted file mode 100644 index e7045c0820..0000000000 --- a/docs/quick-start-guide.md +++ /dev/null @@ -1,351 +0,0 @@ -# Quick Start Guide - -For a more detailed guide on how to use, compose, and work with `SparkApplication`s, please refer to the -[User Guide](user-guide.md). If you are running the Kubernetes Operator for Apache Spark on Google Kubernetes Engine and want to use Google Cloud Storage (GCS) and/or BigQuery for reading/writing data, also refer to the [GCP guide](gcp.md). The Kubernetes Operator for Apache Spark will simply be referred to as the operator for the rest of this guide. - -## Table of Contents -- [Quick Start Guide](#quick-start-guide) - - [Table of Contents](#table-of-contents) - - [Installation](#installation) - - [Running the Examples](#running-the-examples) - - [Configuration](#configuration) - - [Upgrade](#upgrade) - - [About the Spark Job Namespace](#about-the-spark-job-namespace) - - [About the Service Account for Driver Pods](#about-the-service-account-for-driver-pods) - - [About the Service Account for Executor Pods](#about-the-service-account-for-executor-pods) - - [Enable Metric Exporting to Prometheus](#enable-metric-exporting-to-prometheus) - - [Spark Application Metrics](#spark-application-metrics) - - [Work Queue Metrics](#work-queue-metrics) - - [Driver UI Access and Ingress](#driver-ui-access-and-ingress) - - [About the Mutating Admission Webhook](#about-the-mutating-admission-webhook) - - [Mutating Admission Webhooks on a private GKE or EKS cluster](#mutating-admission-webhooks-on-a-private-gke-or-eks-cluster) - -## Installation - -To install the operator, use the Helm [chart](../charts/spark-operator-chart). - -```bash -$ helm repo add spark-operator https://kubeflow.github.io/spark-operator - -$ helm install my-release spark-operator/spark-operator --namespace spark-operator --create-namespace -``` - -Installing the chart will create a namespace `spark-operator` if it doesn't exist, and helm will set up RBAC for the operator to run in the namespace. It will also set up RBAC in the `default` namespace for driver pods of your Spark applications to be able to manipulate executor pods. In addition, the chart will create a Deployment in the namespace `spark-operator`. The chart's [Spark Job Namespace](#about-the-spark-job-namespace) is set to `release namespace` by default. The chart by default does not enable [Mutating Admission Webhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) for Spark pod customization. When enabled, a webhook service and a secret storing the x509 certificate called `spark-webhook-certs` are created for that purpose. To install the operator **with** the mutating admission webhook on a Kubernetes cluster, install the chart with the flag `webhook.enable=true`: - -```bash -$ helm install my-release spark-operator/spark-operator --namespace spark-operator --set webhook.enable=true -``` - -Due to a [known issue](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#defining_permissions_in_a_role) in GKE, you will need to first grant yourself cluster-admin privileges before you can create custom roles and role bindings on a GKE cluster versioned 1.6 and up. Run the following command before installing the chart on GKE: - -```bash -$ kubectl create clusterrolebinding -cluster-admin-binding --clusterrole=cluster-admin --user=@ -``` - -Now you should see the operator running in the cluster by checking the status of the Helm release. - -```bash -$ helm status --namespace spark-operator my-release -``` -### Installation using kustomize - -You can also install `spark-operator` using [kustomize](https://github.com/kubernetes-sigs/kustomize). Run - -``` -kubectl apply -k {manifest_directory} -``` -Kustomize default manifest directory is part of the repo [here](https://github.com/kubeflow/spark-operator/tree/master/manifest/spark-operator-with-webhook-install) - -The manifest directory contains primarily the `crds` and `spark-operator-with-webhook.yaml` which holds configurations of spark operator init job, a webhook service and finally a deployemnt. - -Spark operator with above manifest installs `spark-operator` in default namespace `spark-operator` with default webhook service `spark-webhook`. If you wish to install `spark-operator` in a namespace other than `spark-opertor` and webhook service name other than `spark-webhook`, `Job` manifest in `spark-operator-with-webhook.yaml` should look like below. You need to pass the desired namespace name and service name as arguements in `command` field in `containers`. - -``` -apiVersion: batch/v1 -kind: Job -metadata: - name: sparkoperator-init - namespace: myorg-spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 -spec: - backoffLimit: 3 - template: - metadata: - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 - spec: - serviceAccountName: sparkoperator - restartPolicy: Never - containers: - - name: main - image: gcr.io/spark-operator/spark-operator:v2.4.0-v1beta1-latest - imagePullPolicy: IfNotPresent - command: ["/usr/bin/gencerts.sh", "-p", "--namespace", "myorg-spark-operator", "--service", "myorg-spark-webhook"] -``` -And Service will be - -``` -kind: Service -apiVersion: v1 -metadata: - name: myorg-spark-webhook -... -``` - -And `args` in `Deployement` will look like: - -``` -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sparkoperator -... - - args: - - -logtostderr - - -enable-webhook=true - - -v=2 - - webhook-svc-namespace=myorg-spark-operator - - webhook-svc-name=myorg-spark-webhook -``` - -This will install `spark-operator` in `myorg-spark-operator` namespace and the webhook service will be called `myorg-spark-webhook`. - -To unintall operator, run -``` -kustomize build '{manifest_directory}' | kubectl delete -f - -``` -## Running the Examples - -To run the Spark Pi example, run the following command: - -```bash -$ kubectl apply -f examples/spark-pi.yaml -``` - -Note that `spark-pi.yaml` configures the driver pod to use the `spark` service account to communicate with the Kubernetes API server. You might need to replace it with the appropriate service account before submitting the job. If you installed the operator using the Helm chart and overrode `sparkJobNamespaces`, the service account name ends with `-spark` and starts with the Helm release name. For example, if you would like to run your Spark jobs to run in a namespace called `test-ns`, first make sure it already exists, and then install the chart with the command: - -```bash -$ helm install my-release spark-operator/spark-operator --namespace spark-operator --set "sparkJobNamespaces={test-ns}" -``` - -Then the chart will set up a service account for your Spark jobs to use in that namespace. - -See the section on the [Spark Job Namespace](#about-the-spark-job-namespace) for details on the behavior of the default Spark Job Namespace. - -Running the above command will create a `SparkApplication` object named `spark-pi`. Check the object by running the following command: - -```bash -$ kubectl get sparkapplications spark-pi -o=yaml -``` - -This will show something similar to the following: - -```yaml -apiVersion: sparkoperator.k8s.io/v1beta2 -kind: SparkApplication -metadata: - ... -spec: - deps: {} - driver: - coreLimit: 1200m - cores: 1 - labels: - version: 2.3.0 - memory: 512m - serviceAccount: spark - executor: - cores: 1 - instances: 1 - labels: - version: 2.3.0 - memory: 512m - image: gcr.io/ynli-k8s/spark:v3.1.1 - mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.1.1.jar - mainClass: org.apache.spark.examples.SparkPi - mode: cluster - restartPolicy: - type: OnFailure - onFailureRetries: 3 - onFailureRetryInterval: 10 - onSubmissionFailureRetries: 5 - onSubmissionFailureRetryInterval: 20 - type: Scala -status: - sparkApplicationId: spark-5f4ba921c85ff3f1cb04bef324f9154c9 - applicationState: - state: COMPLETED - completionTime: 2018-02-20T23:33:55Z - driverInfo: - podName: spark-pi-83ba921c85ff3f1cb04bef324f9154c9-driver - webUIAddress: 35.192.234.248:31064 - webUIPort: 31064 - webUIServiceName: spark-pi-2402118027-ui-svc - webUIIngressName: spark-pi-ui-ingress - webUIIngressAddress: spark-pi.ingress.cluster.com - executorState: - spark-pi-83ba921c85ff3f1cb04bef324f9154c9-exec-1: COMPLETED - LastSubmissionAttemptTime: 2018-02-20T23:32:27Z -``` - -To check events for the `SparkApplication` object, run the following command: - -```bash -$ kubectl describe sparkapplication spark-pi -``` - -This will show the events similarly to the following: - -``` -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal SparkApplicationAdded 5m spark-operator SparkApplication spark-pi was added, enqueued it for submission - Normal SparkApplicationTerminated 4m spark-operator SparkApplication spark-pi terminated with state: COMPLETED -``` - -The operator submits the Spark Pi example to run once it receives an event indicating the `SparkApplication` object was added. - -## Configuration - -The operator is typically deployed and run using the Helm chart. However, users can still run it outside a Kubernetes cluster and make it talk to the Kubernetes API server of a cluster by specifying path to `kubeconfig`, which can be done using the `-kubeconfig` flag. - -The operator uses multiple workers in the `SparkApplication` controller. The number of worker threads are controlled using command-line flag `-controller-threads` which has a default value of 10. - -The operator enables cache resynchronization so periodically the informers used by the operator will re-list existing objects it manages and re-trigger resource events. The resynchronization interval in seconds can be configured using the flag `-resync-interval`, with a default value of 30 seconds. - -By default, the operator will install the [CustomResourceDefinitions](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/) for the custom resources it manages. This can be disabled by setting the flag `-install-crds=false`, in which case the CustomResourceDefinitions can be installed manually using `kubectl apply -f manifest/spark-operator-crds.yaml`. - -The mutating admission webhook is an **optional** component and can be enabled or disabled using the `-enable-webhook` flag, which defaults to `false`. - -By default, the operator will manage custom resource objects of the managed CRD types for the whole cluster. It can be configured to manage only the custom resource objects in a specific namespace with the flag `-namespace=` - -## Upgrade - -To upgrade the the operator, e.g., to use a newer version container image with a new tag, run the following command with updated parameters for the Helm release: - -```bash -$ helm upgrade --set image.repository=org/image --set image.tag=newTag -``` - -Refer to the Helm [documentation](https://helm.sh/docs/helm/helm_upgrade/) for more details on `helm upgrade`. - -## About Spark Job Namespaces - -The Spark Job Namespaces value defines the namespaces where `SparkApplications` can be deployed. The Helm chart value for the Spark Job Namespaces is `sparkJobNamespaces`, and its default value is `[]`. As defined in the Helm chart's [README](../charts/spark-operator-chart/README.md), when the list of namespaces is empty the Helm chart will create a service account in the namespace where the spark-operator is deployed. - -If you installed the operator using the Helm chart and overrode the `sparkJobNamespaces` to some other, pre-existing namespace, the Helm chart will create the necessary service account and RBAC in the specified namespace. - -The Spark Operator uses the Spark Job Namespace to identify and filter relevant events for the `SparkApplication` CRD. If you specify a namespace for Spark Jobs, and then submit a SparkApplication resource to another namespace, the Spark Operator will filter out the event, and the resource will not get deployed. If you don't specify a namespace, the Spark Operator will see only `SparkApplication` events for the Spark Operator namespace. - -## About the Service Account for Driver Pods - -A Spark driver pod need a Kubernetes service account in the pod's namespace that has permissions to create, get, list, and delete executor pods, and create a Kubernetes headless service for the driver. The driver will fail and exit without the service account, unless the default service account in the pod's namespace has the needed permissions. To submit and run a `SparkApplication` in a namespace, please make sure there is a service account with the permissions in the namespace and set `.spec.driver.serviceAccount` to the name of the service account. Please refer to [spark-rbac.yaml](../manifest/spark-rbac.yaml) for an example RBAC setup that creates a driver service account named `spark` in the `default` namespace, with a RBAC role binding giving the service account the needed permissions. - -## About the Service Account for Executor Pods - -A Spark executor pod may be configured with a Kubernetes service account in the pod namespace. To submit and run a `SparkApplication` in a namespace, please make sure there is a service account with the permissions required in the namespace and set `.spec.executor.serviceAccount` to the name of the service account. - -## Enable Metric Exporting to Prometheus - -The operator exposes a set of metrics via the metric endpoint to be scraped by `Prometheus`. The Helm chart by default installs the operator with the additional flag to enable metrics (`-enable-metrics=true`) as well as other annotations used by Prometheus to scrape the metric endpoint. If `podMonitor.enable` is enabled, the helm chart will submit a pod monitor for the operator's pod. To install the operator **without** metrics enabled, pass the appropriate flag during `helm install`: - -```bash -$ helm install my-release spark-operator/spark-operator --namespace spark-operator --set metrics.enable=false -``` - -If enabled, the operator generates the following metrics: - -#### Spark Application Metrics -| Metric | Description | -| ------------- | ------------- | -| `spark_app_count` | Total number of SparkApplication handled by the Operator.| -| `spark_app_submit_count` | Total number of SparkApplication spark-submitted by the Operator.| -| `spark_app_success_count` | Total number of SparkApplication which completed successfully.| -| `spark_app_failure_count` | Total number of SparkApplication which failed to complete. | -| `spark_app_running_count` | Total number of SparkApplication which are currently running.| -| `spark_app_success_execution_time_microseconds` | Execution time for applications which succeeded.| -| `spark_app_failure_execution_time_microseconds` | Execution time for applications which failed. | -| `spark_app_start_latency_microseconds` | Start latency of SparkApplication as type of [Prometheus Summary](https://prometheus.io/docs/concepts/metric_types/#summary). | -| `spark_app_start_latency_seconds` | Start latency of SparkApplication as type of [Prometheus Histogram](https://prometheus.io/docs/concepts/metric_types/#histogram). | -| `spark_app_executor_success_count` | Total number of Spark Executors which completed successfully. | -| `spark_app_executor_failure_count` | Total number of Spark Executors which failed. | -| `spark_app_executor_running_count` | Total number of Spark Executors which are currently running. | - -#### Work Queue Metrics -| Metric | Description | -| ------------- | ------------- | -| `spark_application_controller_depth` | Current depth of workqueue | -| `spark_application_controller_adds` | Total number of adds handled by workqueue | -| `spark_application_controller_latency` | Latency for workqueue | -| `spark_application_controller_work_duration` | How long processing an item from workqueue takes | -| `spark_application_controller_retries` | Total number of retries handled by workqueue | -| `spark_application_controller_unfinished_work_seconds` | Unfinished work in seconds | -| `spark_application_controller_longest_running_processor_microseconds` | Longest running processor in microseconds | - - -The following is a list of all the configurations the operators supports for metrics: - -```bash --enable-metrics=true --metrics-port=10254 --metrics-endpoint=/metrics --metrics-prefix=myServiceName --metrics-label=label1Key --metrics-label=label2Key -``` -All configs except `-enable-metrics` are optional. If port and/or endpoint are specified, please ensure that the annotations `prometheus.io/port`, `prometheus.io/path` and `containerPort` in `spark-operator-with-metrics.yaml` are updated as well. - -A note about `metrics-labels`: In `Prometheus`, every unique combination of key-value label pair represents a new time series, which can dramatically increase the amount of data stored. Hence labels should not be used to store dimensions with high cardinality with potentially a large or unbounded value range. - -Additionally, these metrics are best-effort for the current operator run and will be reset on an operator restart. Also some of these metrics are generated by listening to pod state updates for the driver/executors -and deleting the pods outside the operator might lead to incorrect metric values for some of these metrics. - -## Driver UI Access and Ingress - -The operator, by default, makes the Spark UI accessible by creating a service of type `ClusterIP` which exposes the UI. This is only accessible from within the cluster. - -The operator also supports creating an optional Ingress for the UI. This can be turned on by setting the `ingress-url-format` command-line flag. The `ingress-url-format` should be a template like `{{$appName}}.{ingress_suffix}/{{$appNamespace}}/{{$appName}}`. The `{ingress_suffix}` should be replaced by the user to indicate the cluster's ingress url and the operator will replace the `{{$appName}}` & `{{$appNamespace}}` with the appropriate value. Please note that Ingress support requires that cluster's ingress url routing is correctly set-up. For e.g. if the `ingress-url-format` is `{{$appName}}.ingress.cluster.com`, it requires that anything `*ingress.cluster.com` should be routed to the ingress-controller on the K8s cluster. - -The operator also sets both `WebUIAddress` which is accessible from within the cluster as well as `WebUIIngressAddress` as part of the `DriverInfo` field of the `SparkApplication`. - -The operator generates ingress resources intended for use with the [Ingress NGINX Controller](https://kubernetes.github.io/ingress-nginx/). Include this in your application spec for the controller to ensure it recognizes the ingress and provides appropriate routes to your Spark UI. - -```yaml -spec: - sparkUIOptions: - ingressAnnotations: - kubernetes.io/ingress.class: nginx -``` - -## About the Mutating Admission Webhook - -The Kubernetes Operator for Apache Spark comes with an optional mutating admission webhook for customizing Spark driver and executor pods based on the specification in `SparkApplication` objects, e.g., mounting user-specified ConfigMaps and volumes, and setting pod affinity/anti-affinity, and adding tolerations. - -The webhook requires a X509 certificate for TLS for pod admission requests and responses between the Kubernetes API server and the webhook server running inside the operator. For that, the certificate and key files must be accessible by the webhook server. The location of these certs is configurable and they will be reloaded on a configurable period. -The Kubernetes Operator for Spark ships with a tool at `hack/gencerts.sh` for generating the CA and server certificate and putting the certificate and key files into a secret named `spark-webhook-certs` in the namespace `spark-operator`. This secret will be mounted into the operator pod. - -Run the following command to create the secret with a certificate and key files using a batch Job, and install the operator Deployment with the mutating admission webhook: - -```bash -$ kubectl apply -f manifest/spark-operator-with-webhook.yaml -``` - -This will create a Deployment named `sparkoperator` and a Service named `spark-webhook` for the webhook in namespace `spark-operator`. - -### Mutating Admission Webhooks on a private GKE or EKS cluster - -If you are deploying the operator on a GKE cluster with the [Private cluster](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters) setting enabled, or on an enterprise AWS EKS cluster and you wish to deploy the cluster with the [Mutating Admission Webhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), then make sure to change the `webhookPort` to `443`. Alternatively you can choose to allow connections to the default port (8080). - -> By default, firewall rules restrict your cluster master to only initiate TCP connections to your nodes on ports 443 (HTTPS) and 10250 (kubelet). For some Kubernetes features, you might need to add firewall rules to allow access on additional ports. For example, in Kubernetes 1.9 and older, kubectl top accesses heapster, which needs a firewall rule to allow TCP connections on port 8080. To grant such access, you can add firewall rules. -[From the docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules) - -To install the operator with a custom port, pass the appropriate flag during `helm install`: - -```bash -$ helm install my-release spark-operator/spark-operator --namespace spark-operator --set "sparkJobNamespaces={spark}" --set webhook.enable=true --set webhook.port=443 -``` diff --git a/docs/user-guide.md b/docs/user-guide.md deleted file mode 100644 index 60354843bd..0000000000 --- a/docs/user-guide.md +++ /dev/null @@ -1,848 +0,0 @@ -# User Guide - -For a quick introduction on how to build and install the Kubernetes Operator for Apache Spark, and how to run some example applications, please refer to the [Quick Start Guide](quick-start-guide.md). For a complete reference of the API definition of the `SparkApplication` and `ScheduledSparkApplication` custom resources, please refer to the [API Specification](api-docs.md). - -The Kubernetes Operator for Apache Spark ships with a command-line tool called `sparkctl` that offers additional features beyond what `kubectl` is able to do. Documentation on `sparkctl` can be found in [README](../sparkctl/README.md). If you are running the Spark Operator on Google Kubernetes Engine and want to use Google Cloud Storage (GCS) and/or BigQuery for reading/writing data, also refer to the [GCP guide](gcp.md). The Kubernetes Operator for Apache Spark will simply be referred to as the operator for the rest of this guide. - -## Table of Contents - -- [User Guide](#user-guide) - - [Table of Contents](#table-of-contents) - - [Using a SparkApplication](#using-a-sparkapplication) - - [Writing a SparkApplication Spec](#writing-a-sparkapplication-spec) - - [Specifying Deployment Mode](#specifying-deployment-mode) - - [Specifying Application Dependencies](#specifying-application-dependencies) - - [Specifying Spark Configuration](#specifying-spark-configuration) - - [Specifying Hadoop Configuration](#specifying-hadoop-configuration) - - [Writing Driver Specification](#writing-driver-specification) - - [Writing Executor Specification](#writing-executor-specification) - - [Specifying Extra Java Options](#specifying-extra-java-options) - - [Specifying Environment Variables](#specifying-environment-variables) - - [Requesting GPU Resources](#requesting-gpu-resources) - - [Host Network](#host-network) - - [Mounting Secrets](#mounting-secrets) - - [Mounting ConfigMaps](#mounting-configmaps) - - [Mounting a ConfigMap storing Spark Configuration Files](#mounting-a-configmap-storing-spark-configuration-files) - - [Mounting a ConfigMap storing Hadoop Configuration Files](#mounting-a-configmap-storing-hadoop-configuration-files) - - [Mounting Volumes](#mounting-volumes) - - [Using Secrets As Environment Variables](#using-secrets-as-environment-variables) - - [Using Image Pull Secrets](#using-image-pull-secrets) - - [Using Pod Affinity](#using-pod-affinity) - - [Using Tolerations](#using-tolerations) - - [Using Security Context](#using-security-context) - - [Using Sidecar Containers](#using-sidecar-containers) - - [Using Init-Containers](#using-init-containers) - - [Using DNS Settings](#using-dns-settings) - - [Using Volume For Scratch Space](#using-volume-for-scratch-space) - - [Using Termination Grace Period](#using-termination-grace-period) - - [Using Container LifeCycle Hooks](#using-container-lifecycle-hooks) - - [Python Support](#python-support) - - [Monitoring](#monitoring) - - [Dynamic Allocation](#dynamic-allocation) - - [Working with SparkApplications](#working-with-sparkapplications) - - [Creating a New SparkApplication](#creating-a-new-sparkapplication) - - [Deleting a SparkApplication](#deleting-a-sparkapplication) - - [Updating a SparkApplication](#updating-a-sparkapplication) - - [Checking a SparkApplication](#checking-a-sparkapplication) - - [Configuring Automatic Application Restart and Failure Handling](#configuring-automatic-application-restart-and-failure-handling) - - [Setting TTL for a SparkApplication](#setting-ttl-for-a-sparkapplication) - - [Running Spark Applications on a Schedule using a ScheduledSparkApplication](#running-spark-applications-on-a-schedule-using-a-scheduledsparkapplication) - - [Enabling Leader Election for High Availability](#enabling-leader-election-for-high-availability) - - [Enabling Resource Quota Enforcement](#enabling-resource-quota-enforcement) - - [Running Multiple Instances Of The Operator Within The Same K8s Cluster](#running-multiple-instances-of-the-operator-within-the-same-k8s-cluster) - - [Customizing the Operator](#customizing-the-operator) - -## Using a SparkApplication -The operator runs Spark applications specified in Kubernetes objects of the `SparkApplication` custom resource type. The most common way of using a `SparkApplication` is store the `SparkApplication` specification in a YAML file and use the `kubectl` command or alternatively the `sparkctl` command to work with the `SparkApplication`. The operator automatically submits the application as configured in a `SparkApplication` to run on the Kubernetes cluster and uses the `SparkApplication` to collect and surface the status of the driver and executors to the user. - -## Writing a SparkApplication Spec - -As with all other Kubernetes API objects, a `SparkApplication` needs the `apiVersion`, `kind`, and `metadata` fields. For general information about working with manifests, see [object management using kubectl](https://kubernetes.io/docs/concepts/overview/object-management-kubectl/overview/). - -A `SparkApplication` also needs a [`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). This section contains fields for specifying various aspects of an application including its type (`Scala`, `Java`, `Python`, or `R`), deployment mode (`cluster` or `client`), main application resource URI (e.g., the URI of the application jar), main class, arguments, etc. Node selectors are also supported via the optional field `.spec.nodeSelector`. - -It also has fields for specifying the unified container image (to use for both the driver and executors) and the image pull policy, namely, `.spec.image` and `.spec.imagePullPolicy` respectively. If a custom init-container (in both the driver and executor pods) image needs to be used, the optional field `.spec.initContainerImage` can be used to specify it. If set, `.spec.initContainerImage` overrides `.spec.image` for the init-container image. Otherwise, the image specified by `.spec.image` will be used for the init-container. It is invalid if both `.spec.image` and `.spec.initContainerImage` are not set. - -Below is an example showing part of a `SparkApplication` specification: - -```yaml -apiVersion: sparkoperator.k8s.io/v1beta2 -kind: SparkApplication -metadata: - name: spark-pi - namespace: default -spec: - type: Scala - mode: cluster - image: gcr.io/spark/spark:v3.1.1 - mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.1.1.jar -``` - -### Specifying Deployment Mode - -A `SparkApplication` should set `.spec.deployMode` to `cluster`, as `client` is not currently implemented. The driver pod will then run `spark-submit` in `client` mode internally to run the driver program. Additional details of how `SparkApplication`s are run can be found in the [design documentation](design.md#architecture). - - -### Specifying Application Dependencies - -Often Spark applications need additional files additionally to the main application resource to run. Such application dependencies can include for example jars and data files the application needs at runtime. When using the `spark-submit` script to submit a Spark application, such dependencies are specified using the `--jars` and `--files` options. To support specification of application dependencies, a `SparkApplication` uses an optional field `.spec.deps` that in turn supports specifying jars and files, respectively. More specifically, the optional fields `.spec.deps.jars` and`.spec.deps.files` correspond to the `--jars` and `--files` options of the `spark-submit` script, respectively. - -Additionally, `.spec.deps` also has fields for specifying the locations in the driver and executor containers where jars and files should be downloaded to, namely, `.spec.deps.jarsDownloadDir` and `.spec.deps.filesDownloadDir`. The optional fields `.spec.deps.downloadTimeout` and `.spec.deps.maxSimultaneousDownloads` are used to control the timeout and maximum parallelism of downloading dependencies that are hosted remotely, e.g., on an HTTP server, or in external storage such as HDFS, Google Cloud Storage, or AWS S3. - -The following is an example specification with both container-local (i.e., within the container) and remote dependencies: - -```yaml -spec: - deps: - jars: - - local:///opt/spark-jars/gcs-connector.jar - files: - - gs://spark-data/data-file-1.txt - - gs://spark-data/data-file-2.txt -``` - -It's also possible to specify additional jars to obtain from a remote repository by adding maven coordinates to `.spec.deps.packages`. Conflicting transitive dependencies can be addressed by adding to the exclusion list with `.spec.deps.excludePackages`. Additional repositories can be added to the `.spec.deps.repositories` list. These directly translate to the `spark-submit` parameters `--packages`, `--exclude-packages`, and `--repositories`. - -NOTE: -- Each package in the `packages` list must be of the form "groupId:artifactId:version" -- Each package in the `excludePackages` list must be of the form "groupId:artifactId" - -The following example shows how to use these parameters. - -```yaml -spec: - deps: - repositories: - - https://repository.example.com/prod - packages: - - com.example:some-package:1.0.0 - excludePackages: - - com.example:other-package -``` - -### Specifying Spark Configuration - -There are two ways to add Spark configuration: setting individual Spark configuration properties using the optional field `.spec.sparkConf` or mounting a special Kubernetes ConfigMap storing Spark configuration files (e.g. `spark-defaults.conf`, `spark-env.sh`, `log4j.properties`) using the optional field `.spec.sparkConfigMap`. If `.spec.sparkConfigMap` is used, additionally to mounting the ConfigMap into the driver and executors, the operator additionally sets the environment variable `SPARK_CONF_DIR` to point to the mount path of the ConfigMap. - -```yaml -spec: - sparkConf: - spark.ui.port: "4045" - spark.eventLog.enabled: "true" - spark.eventLog.dir: "hdfs://hdfs-namenode-1:8020/spark/spark-events" -``` - -### Specifying Hadoop Configuration - -There are two ways to add Hadoop configuration: setting individual Hadoop configuration properties using the optional field `.spec.hadoopConf` or mounting a special Kubernetes ConfigMap storing Hadoop configuration files (e.g. `core-site.xml`) using the optional field `.spec.hadoopConfigMap`. The operator automatically adds the prefix `spark.hadoop.` to the names of individual Hadoop configuration properties in `.spec.hadoopConf`. If `.spec.hadoopConfigMap` is used, additionally to mounting the ConfigMap into the driver and executors, the operator additionally sets the environment variable `HADOOP_CONF_DIR` to point to the mount path of the ConfigMap. - -The following is an example showing the use of individual Hadoop configuration properties: - -```yaml -spec: - hadoopConf: - "fs.gs.project.id": spark - "fs.gs.system.bucket": spark - "google.cloud.auth.service.account.enable": true - "google.cloud.auth.service.account.json.keyfile": /mnt/secrets/key.json -``` - -### Writing Driver Specification - -The `.spec` section of a `SparkApplication` has a `.spec.driver` field for configuring the driver. It allows users to set the memory and CPU resources to request for the driver pod, and the container image the driver should use. It also has fields for optionally specifying labels, annotations, and environment variables for the driver pod. By default, the driver pod name of an application is automatically generated by the Spark submission client. If instead you want to use a particular name for the driver pod, the optional field `.spec.driver.podName` can be used. The driver pod by default uses the `default` service account in the namespace it is running in to talk to the Kubernetes API server. The `default` service account, however, may or may not have sufficient permissions to create executor pods and the headless service used by the executors to connect to the driver. If it does not and a custom service account that has the right permissions should be used instead, the optional field `.spec.driver.serviceAccount` can be used to specify the name of the custom service account. When a custom container image is needed for the driver, the field `.spec.driver.image` can be used to specify it. This overrides the image specified in `.spec.image` if it is also set. It is invalid if both `.spec.image` and `.spec.driver.image` are not set. - -For applications that need to mount Kubernetes [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) or [ConfigMaps](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) into the driver pod, fields `.spec.driver.secrets` and `.spec.driver.configMaps` can be used. For more details, please refer to -[Mounting Secrets](#mounting-secrets) and [Mounting ConfigMaps](#mounting-configmaps). - -The following is an example driver specification: - -```yaml -spec: - driver: - cores: 1 - coreLimit: 200m - memory: 512m - labels: - version: 3.1.1 - serviceAccount: spark -``` - -### Writing Executor Specification - -The `.spec` section of a `SparkApplication` has a `.spec.executor` field for configuring the executors. It allows users to set the memory and CPU resources to request for the executor pods, and the container image the executors should use. It also has fields for optionally specifying labels, annotations, and environment variables for the executor pods. By default, a single executor is requested for an application. If more than one executor are needed, the optional field `.spec.executor.instances` can be used to specify the number of executors to request. When a custom container image is needed for the executors, the field `.spec.executor.image` can be used to specify it. This overrides the image specified in `.spec.image` if it is also set. It is invalid if both `.spec.image` and `.spec.executor.image` are not set. - -For applications that need to mount Kubernetes [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) or [ConfigMaps](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) into the executor pods, fields `.spec.executor.secrets` and `.spec.executor.configMaps` can be used. For more details, please refer to -[Mounting Secrets](#mounting-secrets) and [Mounting ConfigMaps](#mounting-configmaps). - -An example executor specification is shown below: - -```yaml -spec: - executor: - cores: 1 - instances: 1 - memory: 512m - labels: - version: 3.1.1 - serviceAccount: spark -``` - -### Specifying Extra Java Options - -A `SparkApplication` can specify extra Java options for the driver or executors, using the optional field `.spec.driver.javaOptions` for the driver and `.spec.executor.javaOptions` for executors. Below is an example: - -```yaml -spec: - executor: - javaOptions: "-XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap" -``` - -Values specified using those two fields get converted to Spark configuration properties `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`, respectively. **Prefer using the above two fields over configuration properties `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`** as the fields work well with other fields that might modify what gets set for `spark.driver.extraJavaOptions` or `spark.executor.extraJavaOptions`. - -### Specifying Environment Variables - -There are two fields for specifying environment variables for the driver and/or executor containers, namely `.spec.driver.env` (or `.spec.executor.env` for the executor container) and `.spec.driver.envFrom` (or `.spec.executor.envFrom` for the executor container). Specifically, `.spec.driver.env` (and `.spec.executor.env`) takes a list of [EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvar-v1-core), each of which specifies an environment variable or the source of an environment variable, e.g., a name-value pair, a ConfigMap key, a Secret key, etc. Alternatively, `.spec.driver.envFrom` (and `.spec.executor.envFrom`) takes a list of [EnvFromSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envfromsource-v1-core) and allows [using all key-value pairs in a ConfigMap or Secret as environment variables](https://v1-15.docs.kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables). The `SparkApplication` snippet below shows the use of both fields: - -```yaml -spec: - driver: - env: - - name: ENV1 - value: VAL1 - - name: ENV2 - value: VAL2 - - name: ENV3 - valueFrom: - configMapKeyRef: - name: some-config-map - key: env3-key - - name: AUTH_KEY - valueFrom: - secretKeyRef: - name: some-secret - key: auth-key - envFrom: - - configMapRef: - name: env-config-map - - secretRef: - name: env-secret - executor: - env: - - name: ENV1 - value: VAL1 - - name: ENV2 - value: VAL2 - - name: ENV3 - valueFrom: - configMapKeyRef: - name: some-config-map - key: env3-key - - name: AUTH_KEY - valueFrom: - secretKeyRef: - name: some-secret - key: auth-key - envFrom: - - configMapRef: - name: my-env-config-map - - secretRef: - name: my-env-secret -``` - -**Note: legacy field `envVars` that can also be used for specifying environment variables is deprecated and will be removed in a future API version.** - -### Requesting GPU Resources - -A `SparkApplication` can specify GPU resources for the driver or executor pod, using the optional field `.spec.driver.gpu` or `.spec.executor.gpu`. Below is an example: - -```yaml -spec: - driver: - cores: 0.1 - coreLimit: "200m" - memory: "512m" - gpu: - name: "amd.com/gpu" # GPU resource name - quantity: 1 # number of GPUs to request - labels: - version: 3.1.1 - serviceAccount: spark - executor: - cores: 1 - instances: 1 - memory: "512m" - serviceAccount: spark - gpu: - name: "nvidia.com/gpu" - quantity: 1 -``` -Note that the mutating admission webhook is needed to use this feature. Please refer to the [Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. - -### Host Network - -A `SparkApplication` can specify `hostNetwork` for the driver or executor pod, using the optional field `.spec.driver.hostNetwork` or `.spec.executor.hostNetwork`. When `hostNetwork` is `true`, the operator sets pods' `spec.hostNetwork` to `true` and sets pods' `spec.dnsPolicy` to `ClusterFirstWithHostNet`. Below is an example: - -```yaml -spec: - driver: - cores: 0.1 - coreLimit: "200m" - memory: "512m" - hostNetwork: true - labels: - version: 3.1.1 - serviceAccount: spark - executor: - cores: 1 - instances: 1 - memory: "512m" -``` -Note that the mutating admission webhook is needed to use this feature. Please refer to the [Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. - - -### Mounting Secrets - -As mentioned above, both the driver specification and executor specification have an optional field `secrets` for configuring the list of Kubernetes Secrets to be mounted into the driver and executors, respectively. The field is a map with the names of the Secrets as keys and values specifying the mount path and type of each Secret. For instance, the following example shows a driver specification with a Secret named `gcp-svc-account` of type `GCPServiceAccount` to be mounted to `/mnt/secrets` in the driver pod. - -```yaml -spec: - driver: - secrets: - - name: gcp-svc-account - path: /mnt/secrets - secretType: GCPServiceAccount -``` - -The type of a Secret as specified by the `secretType` field is a hint to the operator on what extra configuration it needs to take care of for the specific type of Secrets. For example, if a Secret is of type **`GCPServiceAccount`**, the operator additionally sets the environment variable **`GOOGLE_APPLICATION_CREDENTIALS`** to point to the JSON key file stored in the secret. Please refer to -[Getting Started with Authentication](https://cloud.google.com/docs/authentication/getting-started) for more information on how to authenticate with GCP services using a service account JSON key file. Note that the operator assumes that the key of the service account JSON key file in the Secret data map is **`key.json`** so it is able to set the environment variable automatically. Similarly, if the type of a Secret is **`HadoopDelegationToken`**, the operator additionally sets the environment variable **`HADOOP_TOKEN_FILE_LOCATION`** to point to the file storing the Hadoop delegation token. In this case, the operator assumes that the key of the delegation token file in the Secret data map is **`hadoop.token`**. -The `secretType` field should have the value `Generic` if no extra configuration is required. - -Note that the mutating admission webhook is needed to use this feature. Please refer to the [Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. - -### Mounting ConfigMaps - -Both the driver specification and executor specifications have an optional field for configuring -the list of Kubernetes ConfigMaps to be mounted into the driver and executors, respectively. The field is a map with keys being the names of the ConfigMaps and values specifying the mount path of each ConfigMap. For instance, the following example shows a driver specification with a ConfigMap named `configmap1` to be mounted to `/mnt/config-maps` in the driver pod. - -```yaml -spec: - driver: - configMaps: - - name: configmap1 - path: /mnt/config-maps -``` - -Note that the mutating admission webhook is needed to use this feature. Please refer to the [Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. - -#### Mounting a ConfigMap storing Spark Configuration Files - -A `SparkApplication` can specify a Kubernetes ConfigMap storing Spark configuration files such as `spark-env.sh` or `spark-defaults.conf` using the optional field `.spec.sparkConfigMap` whose value is the name of the ConfigMap. The ConfigMap is assumed to be in the same namespace as that of the `SparkApplication`. The operator mounts the ConfigMap onto path `/etc/spark/conf` in both the driver and executors. Additionally, it also sets the environment variable `SPARK_CONF_DIR` to point to `/etc/spark/conf` in the driver and executors. - -Note that the mutating admission webhook is needed to use this feature. Please refer to the -[Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. - -#### Mounting a ConfigMap storing Hadoop Configuration Files - -A `SparkApplication` can specify a Kubernetes ConfigMap storing Hadoop configuration files such as `core-site.xml` using the optional field `.spec.hadoopConfigMap` whose value is the name of the ConfigMap. The ConfigMap is assumed to be in the same namespace as that of the `SparkApplication`. The operator mounts the ConfigMap onto path `/etc/hadoop/conf` in both the driver and executors. Additionally, it also sets the environment variable `HADOOP_CONF_DIR` to point to `/etc/hadoop/conf` in the driver and executors. - -Note that the mutating admission webhook is needed to use this feature. Please refer to the [Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. - -### Mounting Volumes - -The operator also supports mounting user-specified Kubernetes volumes into the driver and executors. A -`SparkApplication` has an optional field `.spec.volumes` for specifying the list of [volumes](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.9/#volume-v1-core) the driver and the executors need collectively. Then both the driver and executor specifications have an optional field `volumeMounts` that specifies the [volume mounts](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.9/#volumemount-v1-core) for the volumes needed by the driver and executors, respectively. The following is an example showing a `SparkApplication` with both driver and executor volume mounts. - -```yaml -spec: - volumes: - - name: spark-data - persistentVolumeClaim: - claimName: my-pvc - - name: spark-work - emptyDir: - sizeLimit: 5Gi - driver: - volumeMounts: - - name: spark-work - mountPath: /mnt/spark/work - executor: - volumeMounts: - - name: spark-data - mountPath: /mnt/spark/data - - name: spark-work - mountPath: /mnt/spark/work - -``` - -Note that the mutating admission webhook is needed to use this feature. Please refer to the [Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. - -### Using Secrets As Environment Variables - -**Note: `envSecretKeyRefs` is deprecated and will be removed in a future API version.** - -A `SparkApplication` can use [secrets as environment variables](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-environment-variables), through the optional field `.spec.driver.envSecretKeyRefs` for the driver pod and the optional field -`.spec.executor.envSecretKeyRefs` for the executor pods. A `envSecretKeyRefs` is a map from environment variable names to pairs consisting of a secret name and a secret key. Below is an example: - -```yaml -spec: - driver: - envSecretKeyRefs: - SECRET_USERNAME: - name: mysecret - key: username - SECRET_PASSWORD: - name: mysecret - key: password -``` - -### Using Image Pull Secrets - -**Note that this feature requires an image based on the latest Spark master branch.** - -For images that need image-pull secrets to be pulled, a `SparkApplication` has an optional field `.spec.imagePullSecrets` for specifying a list of image-pull secrets. Below is an example: - -```yaml -spec: - imagePullSecrets: - - secret1 - - secret2 -``` - -### Using Pod Affinity - -A `SparkApplication` can specify an `Affinity` for the driver or executor pod, using the optional field `.spec.driver.affinity` or `.spec.executor.affinity`. Below is an example: - -```yaml -spec: - driver: - affinity: - podAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - ... - executor: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - ... -``` - -Note that the mutating admission webhook is needed to use this feature. Please refer to the [Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. - -### Using Tolerations - -A `SparkApplication` can specify an `Tolerations` for the driver or executor pod, using the optional field `.spec.driver.tolerations` or `.spec.executor.tolerations`. Below is an example: - -```yaml -spec: - driver: - tolerations: - - key: Key - operator: Exists - effect: NoSchedule - - executor: - tolerations: - - key: Key - operator: Equal - value: Value - effect: NoSchedule -``` - -Note that the mutating admission webhook is needed to use this feature. Please refer to the -[Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. - -### Using Security Context - -A `SparkApplication` can specify a `SecurityContext` for the driver or executor containers, using the optional field `.spec.driver.securityContext` or `.spec.executor.securityContext`. -`SparkApplication` can also specify a `PodSecurityContext` for the driver or executor pod, using the optional field `.spec.driver.podSecurityContext` or `.spec.executor.podSecurityContext`. Below is an example: - -```yaml -spec: - driver: - podSecurityContext: - runAsUser: 1000 - securityContext: - allowPrivilegeEscalation: false - runAsUser: 2000 - executor: - podSecurityContext: - runAsUser: 1000 - securityContext: - allowPrivilegeEscalation: false - runAsUser: 2000 -``` - -Note that the mutating admission webhook is needed to use this feature. Please refer to the -[Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. - -### Using Sidecar Containers - -A `SparkApplication` can specify one or more optional sidecar containers for the driver or executor pod, using the optional field `.spec.driver.sidecars` or `.spec.executor.sidecars`. The specification of each sidecar container follows the [Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#container-v1-core) API definition. Below is an example: - -```yaml -spec: - driver: - sidecars: - - name: "sidecar1" - image: "sidecar1:latest" - ... - executor: - sidecars: - - name: "sidecar1" - image: "sidecar1:latest" - ... -``` - -Note that the mutating admission webhook is needed to use this feature. Please refer to the -[Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. - -### Using Init-Containers - -A `SparkApplication` can optionally specify one or more [init-containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) for the driver or executor pod, using the optional field `.spec.driver.initContainers` or `.spec.executor.initContainers`, respectively. The specification of each init-container follows the [Container](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.14/#container-v1-core) API definition. Below is an example: - -```yaml -spec: - driver: - initContainers: - - name: "init-container1" - image: "init-container1:latest" - ... - executor: - initContainers: - - name: "init-container1" - image: "init-container1:latest" - ... -``` - -Note that the mutating admission webhook is needed to use this feature. Please refer to the -[Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. - -### Using DNS Settings -A `SparkApplication` can define DNS settings for the driver and/or executor pod, by adding the standard [DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-config) kubernetes settings. Fields to add such configuration are `.spec.driver.dnsConfig` and `.spec.executor.dnsConfig`. Example: - -```yaml -spec: - driver: - dnsConfig: - nameservers: - - 1.2.3.4 - searches: - - ns1.svc.cluster.local - - my.dns.search.suffix - options: - - name: ndots - value: "2" - - name: edns0 -``` - -Note that the mutating admission webhook is needed to use this feature. Please refer to the -[Quick Start Guide](quick-start-guide.md) on how to enable the mutating admission webhook. - -### Using Volume For Scratch Space -By default, Spark uses temporary scratch space to spill data to disk during shuffles and other operations. -The scratch directory defaults to `/tmp` of the container. -If that storage isn't enough or you want to use a specific path, you can use one or more volumes. -The volume names should start with `spark-local-dir-`. - - -```yaml -spec: - volumes: - - name: "spark-local-dir-1" - hostPath: - path: "/tmp/spark-local-dir" - executor: - volumeMounts: - - name: "spark-local-dir-1" - mountPath: "/tmp/spark-local-dir" - ... -``` - -Then you will get `SPARK_LOCAL_DIRS` set to `/tmp/spark-local-dir` in the pod like below. - -```yaml -Environment: - SPARK_USER: root - SPARK_DRIVER_BIND_ADDRESS: (v1:status.podIP) - SPARK_LOCAL_DIRS: /tmp/spark-local-dir - SPARK_CONF_DIR: /opt/spark/conf -``` - - -> Note: Multiple volumes can be used together - -```yaml -spec: - volumes: - - name: "spark-local-dir-1" - hostPath: - path: "/mnt/dir1" - - name: "spark-local-dir-2" - hostPath: - path: "/mnt/dir2" - executor: - volumeMounts: - - name: "spark-local-dir-1" - mountPath: "/tmp/dir1" - - name: "spark-local-dir-2" - mountPath: "/tmp/dir2" - ... -``` - -> Note: Besides `hostPath`, `persistentVolumeClaim` can be used as well. - -```yaml -spec: - volumes: - - name: "spark-local-dir-1" - persistentVolumeClaim: - claimName: network-file-storage - executor: - volumeMounts: - - name: "spark-local-dir-1" - mountPath: "/tmp/dir1" -``` - -### Using Termination Grace Period - -A Spark Application can optionally specify a termination grace Period seconds to the driver and executor pods. More [info](https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods) - -```yaml -spec: - driver: - terminationGracePeriodSeconds: 60 -``` - -### Using Container LifeCycle Hooks -A Spark Application can optionally specify a [Container Lifecycle Hooks](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks) for a driver. It is useful in cases where you need a PreStop or PostStart hooks to driver and executor. - -```yaml -spec: - driver: - lifecycle: - preStop: - exec: - command: - - /bin/bash - - -c - - touch /var/run/killspark && sleep 65 -``` -In cases like Spark Streaming or Spark Structured Streaming applications, you can test if a file exists to start a graceful shutdown and stop all streaming queries manually. - - -### Python Support - -Python support can be enabled by setting `.spec.mainApplicationFile` with path to your python application. Optionally, the `.spec.pythonVersion` field can be used to set the major Python version of the docker image used to run the driver and executor containers. Below is an example showing part of a `SparkApplication` specification: - -```yaml -spec: - type: Python - pythonVersion: 2 - mainApplicationFile: local:///opt/spark/examples/src/main/python/pyfiles.py -``` - -Some PySpark applications need additional Python packages to run. Such dependencies are specified using the optional field `.spec.deps.pyFiles`, which translates to the `--py-files` option of the spark-submit command. - -```yaml -spec: - deps: - pyFiles: - - local:///opt/spark/examples/src/main/python/py_container_checks.py - - gs://spark-data/python-dep.zip -``` - -In order to use the dependencies that are hosted remotely, the following PySpark code can be used in Spark 2.4. - -``` -python_dep_file_path = SparkFiles.get("python-dep.zip") -spark.sparkContext.addPyFile(dep_file_path) -``` - -Note that Python binding for PySpark is available in Apache Spark 2.4. - -### Monitoring - -The operator supports using the Spark metric system to expose metrics to a variety of sinks. Particularly, it is able to automatically configure the metric system to expose metrics to [Prometheus](https://prometheus.io/). Specifically, the field `.spec.monitoring` specifies how application monitoring is handled and particularly how metrics are to be reported. The metric system is configured through the configuration file `metrics.properties`, which gets its content from the field `.spec.monitoring.metricsProperties`. The content of [metrics.properties](../spark-docker/conf/metrics.properties) will be used by default if `.spec.monitoring.metricsProperties` is not specified. `.spec.monitoring.metricsPropertiesFile` overwrite the value `spark.metrics.conf` in spark.properties, and will not use content from `.spec.monitoring.metricsProperties`. You can choose to enable or disable reporting driver and executor metrics using the fields `.spec.monitoring.exposeDriverMetrics` and `.spec.monitoring.exposeExecutorMetrics`, respectively. - -Further, the field `.spec.monitoring.prometheus` specifies how metrics are exposed to Prometheus using the [Prometheus JMX exporter](https://github.com/prometheus/jmx_exporter). When `.spec.monitoring.prometheus` is specified, the operator automatically configures the JMX exporter to run as a Java agent. The only required field of `.spec.monitoring.prometheus` is `jmxExporterJar`, which specified the path to the Prometheus JMX exporter Java agent jar in the container. If you use the image `gcr.io/spark-operator/spark:v3.1.1-gcs-prometheus`, the jar is located at `/prometheus/jmx_prometheus_javaagent-0.11.0.jar`. The field `.spec.monitoring.prometheus.port` specifies the port the JMX exporter Java agent binds to and defaults to `8090` if not specified. The field `.spec.monitoring.prometheus.configuration` specifies the content of the configuration to be used with the JMX exporter. The content of [prometheus.yaml](../spark-docker/conf/prometheus.yaml) will be used by default if `.spec.monitoring.prometheus.configuration` is not specified. - -Below is an example that shows how to configure the metric system to expose metrics to Prometheus using the Prometheus JMX exporter. Note that the JMX exporter Java agent jar is listed as a dependency and will be downloaded to where `.spec.dep.jarsDownloadDir` points to in Spark 2.3.x, which is `/var/spark-data/spark-jars` by default. Things are different in Spark 2.4 as dependencies will be downloaded to the local working directory instead in Spark 2.4. A complete example can be found in [examples/spark-pi-prometheus.yaml](../examples/spark-pi-prometheus.yaml). - -```yaml -spec: - deps: - jars: - - http://central.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.11.0/jmx_prometheus_javaagent-0.11.0.jar - monitoring: - exposeDriverMetrics: true - prometheus: - jmxExporterJar: "/var/spark-data/spark-jars/jmx_prometheus_javaagent-0.11.0.jar" -``` - -The operator automatically adds the annotations such as `prometheus.io/scrape=true` on the driver and/or executor pods (depending on the values of `.spec.monitoring.exposeDriverMetrics` and `.spec.monitoring.exposeExecutorMetrics`) so the metrics exposed on the pods can be scraped by the Prometheus server in the same cluster. - -### Dynamic Allocation - -The operator supports a limited form of [Spark Dynamic Resource Allocation](http://spark.apache.org/docs/latest/job-scheduling.html#dynamic-resource-allocation) through the shuffle tracking enhancement introduced in Spark 3.0.0 *without needing an external shuffle service* (not available in the Kubernetes mode). See this [issue](https://issues.apache.org/jira/browse/SPARK-27963) for details on the enhancement. To enable this limited form of dynamic allocation, follow the example below: - -```yaml -spec: - dynamicAllocation: - enabled: true - initialExecutors: 2 - minExecutors: 2 - maxExecutors: 10 -``` - -Note that if dynamic allocation is enabled, the number of executors to request initially is set to the bigger of `.spec.dynamicAllocation.initialExecutors` and `.spec.executor.instances` if both are set. - -## Working with SparkApplications - -### Creating a New SparkApplication - -A `SparkApplication` can be created from a YAML file storing the `SparkApplication` specification using either the `kubectl apply -f ` command or the `sparkctl create ` command. Please refer to the `sparkctl` [README](../sparkctl/README.md#create) for usage of the `sparkctl create` command. Once a `SparkApplication` is successfully created, the operator will receive it and submits the application as configured in the specification to run on the Kubernetes cluster. - -### Deleting a SparkApplication - -A `SparkApplication` can be deleted using either the `kubectl delete ` command or the `sparkctl delete ` command. Please refer to the `sparkctl` [README](../sparkctl/README.md#delete) for usage of the `sparkctl delete` -command. Deleting a `SparkApplication` deletes the Spark application associated with it. If the application is running when the deletion happens, the application is killed and all Kubernetes resources associated with the application are deleted or garbage collected. - -### Updating a SparkApplication - -A `SparkApplication` can be updated using the `kubectl apply -f ` command. When a `SparkApplication` is successfully updated, the operator will receive both the updated and old `SparkApplication` objects. If the specification of the `SparkApplication` has changed, the operator submits the application to run, using the updated specification. If the application is currently running, the operator kills the running application before submitting a new run with the updated specification. There is planned work to enhance the way `SparkApplication` updates are handled. For example, if the change was to increase the number of executor instances, instead of killing the currently running application and starting a new run, it is a much better user experience to incrementally launch the additional executor pods. - -### Checking a SparkApplication - -A `SparkApplication` can be checked using the `kubectl describe sparkapplications ` command. The output of the command shows the specification and status of the `SparkApplication` as well as events associated with it. The events communicate the overall process and errors of the `SparkApplication`. - -### Configuring Automatic Application Restart and Failure Handling - -The operator supports automatic application restart with a configurable `RestartPolicy` using the optional field -`.spec.restartPolicy`. The following is an example of a sample `RestartPolicy`: - - ```yaml - restartPolicy: - type: OnFailure - onFailureRetries: 3 - onFailureRetryInterval: 10 - onSubmissionFailureRetries: 5 - onSubmissionFailureRetryInterval: 20 -``` -The valid types of restartPolicy include `Never`, `OnFailure`, and `Always`. Upon termination of an application, -the operator determines if the application is subject to restart based on its termination state and the -`RestartPolicy` in the specification. If the application is subject to restart, the operator restarts it by -submitting a new run of it. For `OnFailure`, the Operator further supports setting limits on number of retries -via the `onFailureRetries` and `onSubmissionFailureRetries` fields. Additionally, if the submission retries has not been reached, -the operator retries submitting the application using a linear backoff with the interval specified by -`onFailureRetryInterval` and `onSubmissionFailureRetryInterval` which are required for both `OnFailure` and `Always` `RestartPolicy`. -The old resources like driver pod, ui service/ingress etc. are deleted if it still exists before submitting the new run, and a new driver pod is created by the submission -client so effectively the driver gets restarted. - -### Setting TTL for a SparkApplication - -The `v1beta2` version of the `SparkApplication` API starts having TTL support for `SparkApplication`s through a new optional field named `.spec.timeToLiveSeconds`, which if set, defines the Time-To-Live (TTL) duration in seconds for a SparkApplication after its termination. The `SparkApplication` object will be garbage collected if the current time is more than the `.spec.timeToLiveSeconds` since its termination. The example below illustrates how to use the field: - -```yaml -spec: - timeToLiveSeconds: 3600 -``` - -Note that this feature requires that informer cache resync to be enabled, which is true by default with a resync internal of 30 seconds. You can change the resync interval by setting the flag `-resync-interval=`. - -## Running Spark Applications on a Schedule using a ScheduledSparkApplication - -The operator supports running a Spark application on a standard [cron](https://en.wikipedia.org/wiki/Cron) schedule using objects of the `ScheduledSparkApplication` custom resource type. A `ScheduledSparkApplication` object specifies a cron schedule on which the application should run and a `SparkApplication` template from which a `SparkApplication` object for each run of the application is created. The following is an example `ScheduledSparkApplication`: - -```yaml -apiVersion: "sparkoperator.k8s.io/v1beta2" -kind: ScheduledSparkApplication -metadata: - name: spark-pi-scheduled - namespace: default -spec: - schedule: "@every 5m" - concurrencyPolicy: Allow - successfulRunHistoryLimit: 1 - failedRunHistoryLimit: 3 - template: - type: Scala - mode: cluster - image: gcr.io/spark/spark:v3.1.1 - mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.1.1.jar - driver: - cores: 1 - memory: 512m - executor: - cores: 1 - instances: 1 - memory: 512m - restartPolicy: - type: Never -``` - -The concurrency of runs of an application is controlled by `.spec.concurrencyPolicy`, whose valid values are `Allow`, `Forbid`, and `Replace`, with `Allow` being the default. The meanings of each value is described below: -* `Allow`: more than one run of an application are allowed if for example the next run of the application is due even though the previous run has not completed yet. -* `Forbid`: no more than one run of an application is allowed. The next run of the application can only start if the previous run has completed. -* `Replace`: no more than one run of an application is allowed. When the next run of the application is due, the previous run is killed and the next run starts as a replacement. - -A scheduled `ScheduledSparkApplication` can be temporarily suspended (no future scheduled runs of the application will be triggered) by setting `.spec.suspend` to `true`. The schedule can be resumed by removing `.spec.suspend` or setting it to `false`. A `ScheduledSparkApplication` can have names of `SparkApplication` objects for the past runs of the application tracked in the `Status` section as discussed below. The numbers of past successful runs and past failed runs to keep track of are controlled by field `.spec.successfulRunHistoryLimit` and field `.spec.failedRunHistoryLimit`, respectively. The example above allows 1 past successful run and 3 past failed runs to be tracked. - -The `Status` section of a `ScheduledSparkApplication` object shows the time of the last run and the proposed time of the next run of the application, through `.status.lastRun` and `.status.nextRun`, respectively. The names of the `SparkApplication` object for the most recent run (which may or may not be running) of the application are stored in `.status.lastRunName`. The names of `SparkApplication` objects of the past successful runs of the application are stored in `.status.pastSuccessfulRunNames`. Similarly, the names of `SparkApplication` objects of the past failed runs of the application are stored in `.status.pastFailedRunNames`. - -Note that certain restart policies (specified in `.spec.template.restartPolicy`) may not work well with the specified schedule and concurrency policy of a `ScheduledSparkApplication`. For example, a restart policy of `Always` should never be used with a `ScheduledSparkApplication`. In most cases, a restart policy of `OnFailure` may not be a good choice as the next run usually picks up where the previous run left anyway. For these reasons, it's often the right choice to use a restart policy of `Never` as the example above shows. - -## Enabling Leader Election for High Availability - -The operator supports a high-availability (HA) mode, in which there can be more than one replicas of the operator, with only one of the replicas (the leader replica) actively operating. If the leader replica fails, the leader election process is engaged again to determine a new leader from the replicas available. The HA mode can be enabled through an optional leader election process. Leader election is disabled by default but can be enabled via a command-line flag. The following table summarizes the command-line flags relevant to leader election: - -| Flag | Default Value | Description | -| ------------- | ------------- | ------------- | -| `leader-election` | `false` | Whether to enable leader election (or the HA mode) or not. | -| `leader-election-lock-namespace` | `spark-operator` | Kubernetes namespace of the lock resource used for leader election. | -| `leader-election-lock-name` | `spark-operator-lock` | Name of the lock resource used for leader election. | -| `leader-election-lease-duration` | 15 seconds | Leader election lease duration. | -| `leader-election-renew-deadline` | 14 seconds | Leader election renew deadline. | -| `leader-election-retry-period` | 4 seconds | Leader election retry period. | - -## Enabling Resource Quota Enforcement - -The Spark Operator provides limited support for resource quota enforcement using a validating webhook. It will count the resources of non-terminal-phase SparkApplications and Pods, and determine whether a requested SparkApplication will fit given the remaining resources. ResourceQuota scope selectors are not supported, any ResourceQuota object that does not match the entire namespace will be ignored. Like the native Pod quota enforcement, current usage is updated asynchronously, so some overscheduling is possible. - -If you are running Spark applications in namespaces that are subject to resource quota constraints, consider enabling this feature to avoid driver resource starvation. Quota enforcement can be enabled with the command line arguments `-enable-resource-quota-enforcement=true`. It is recommended to also set `-webhook-fail-on-error=true`. - -## Running Multiple Instances Of The Operator Within The Same K8s Cluster - -If you need to run multiple instances of the operator within the same k8s cluster. Therefore, you need to make sure that the running instances should not compete for the same custom resources or pods. You can achieve this: - -Either: -* By specifying a different `namespace` flag for each instance of the operator. - -Or if you want your operator to watch specific resources that may exist in different namespaces: - -* You need to add custom labels on resources by defining for each instance of the operator a different set of labels in `-label-selector-filter (e.g. env=dev,app-type=spark)`. -* Run different `webhook` instances by specifying different `-webhook-config-name` flag for each deployment of the operator. -* Specify different `webhook-svc-name` and/or `webhook-svc-namespace` for each instance of the operator. -* Edit the job that generates the certificates `webhook-init` by specifying the namespace and the service name of each instance of the operator, `e.g. command: ["/usr/bin/gencerts.sh", "-n", "ns-op1", "-s", "spark-op1-webhook", "-p"]`. Where `spark-op1-webhook` should match what you have specified in `webhook-svc-name`. For instance, if you use the following [helm chart](https://github.com/helm/charts/tree/master/incubator/sparkoperator) to deploy the operator you may specify for each instance of the operator a different `--namespace` and `--name-template` arguments to make sure you generate a different certificate for each instance, e.g: -``` -helm install spark-op1 incubator/sparkoperator --namespace ns-op1 -helm install spark-op2 incubator/sparkoperator --namespace ns-op2 -``` -Will run 2 `webhook-init` jobs. Each job executes respectively the command: -``` -command: ["/usr/bin/gencerts.sh", "-n", "ns-op1", "-s", "spark-op1-webhook", "-p"`] -command: ["/usr/bin/gencerts.sh", "-n", "ns-op2", "-s", "spark-op2-webhook", "-p"`] -``` - -* Although resources are already filtered with respect to the specified labels on resources. You may also specify different labels in `-webhook-namespace-selector` and attach these labels to the namespaces on which you want the webhook to listen to. - -## Customizing the Operator - -To customize the operator, you can follow the steps below: - -1. Compile Spark distribution with Kubernetes support as per [Spark documentation](https://spark.apache.org/docs/latest/building-spark.html#building-with-kubernetes-support). -2. Create docker images to be used for Spark with [docker-image tool](https://spark.apache.org/docs/latest/running-on-kubernetes.html#docker-images). -3. Create a new operator image based on the above image. You need to modify the `FROM` tag in the [Dockerfile](https://github.com/kubeflow/spark-operator/blob/master/Dockerfile) with your Spark image. -4. Build and push your operator image built above. -5. Deploy the new image by modifying the [/manifest/spark-operator-install/spark-operator.yaml](https://github.com/kubeflow/spark-operator/blob/master/manifest/spark-operator-install/spark-operator.yaml) file and specifying your operator image. diff --git a/docs/volcano-integration.md b/docs/volcano-integration.md deleted file mode 100644 index 7d67276a94..0000000000 --- a/docs/volcano-integration.md +++ /dev/null @@ -1,94 +0,0 @@ -# Integration with Volcano for Batch Scheduling - -[Volcano](https://github.com/volcano-sh/volcano) is a batch system built on Kubernetes. It provides a suite of mechanisms -currently missing from Kubernetes that are commonly required by many classes -of batch & elastic workloads. -With the integration with Volcano, Spark application pods can be scheduled for better scheduling efficiency. -# Requirements - -## Volcano components - -Before using Kubernetes Operator for Apache Spark, with Volcano enabled, user need to ensure Volcano has been successfully installed in the -same environment, please refer [Quick Start Guide](https://github.com/volcano-sh/volcano#quick-start-guide) for Volcano installation. - -## Install Kubernetes Operator for Apache Spark with Volcano enabled - -Within the help of Helm chart, Kubernetes Operator for Apache Spark with Volcano can be easily installed with the command below: -```bash -$ helm repo add spark-operator https://kubeflow.github.io/spark-operator -$ helm install my-release spark-operator/spark-operator --namespace spark-operator --set batchScheduler.enable=true --set webhook.enable=true -``` - -# Run Spark Application with Volcano scheduler - -Now, we can run a updated version of spark application (with `batchScheduler` configured), for instance: -```yaml -apiVersion: "sparkoperator.k8s.io/v1beta2" -kind: SparkApplication -metadata: - name: spark-pi - namespace: default -spec: - type: Scala - mode: cluster - image: "gcr.io/spark-operator/spark:v3.1.1" - imagePullPolicy: Always - mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-v3.1.1.jar" - sparkVersion: "3.1.1" - batchScheduler: "volcano" #Note: the batch scheduler name must be specified with `volcano` - restartPolicy: - type: Never - volumes: - - name: "test-volume" - hostPath: - path: "/tmp" - type: Directory - driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" - labels: - version: 3.1.1 - serviceAccount: spark - volumeMounts: - - name: "test-volume" - mountPath: "/tmp" - executor: - cores: 1 - instances: 1 - memory: "512m" - labels: - version: 3.1.1 - volumeMounts: - - name: "test-volume" - mountPath: "/tmp" -``` -When running, the Pods Events can be used to verify that whether the pods have been scheduled via Volcano. -``` -Type Reason Age From Message ----- ------ ---- ---- ------- -Normal Scheduled 23s volcano Successfully assigned default/spark-pi-driver to integration-worker2 -``` - -# Technological detail - -If SparkApplication is configured to run with Volcano, there are some details underground that make the two systems integrated: - -1. Kubernetes Operator for Apache Spark's webhook will patch pods' `schedulerName` according to the `batchScheduler` in SparkApplication Spec. -2. Before submitting spark application, Kubernetes Operator for Apache Spark will create a Volcano native resource - `PodGroup`[here](https://github.com/volcano-sh/volcano/blob/a8fb05ce6c6902e366cb419d6630d66fc759121e/pkg/apis/scheduling/v1alpha2/types.go#L93) for the whole application. - and as a brief introduction, most of the Volcano's advanced scheduling features, such as pod delay creation, resource fairness and gang scheduling are all depend on this resource. - Also a new pod annotation named `scheduling.k8s.io/group-name` will be added. -3. Volcano scheduler will take over all of the pods that both have schedulerName and annotation correctly configured for scheduling. - - -Kubernetes Operator for Apache Spark enables end user to have fine-grained controlled on batch scheduling via attribute `BatchSchedulerOptions`. `BatchSchedulerOptions` is a string dictionary -that different batch scheduler can utilize it to expose different attributes. -For now, volcano support these attributes below: - -| Name | Description | example | -|-------|----------------------------------------------------------------------------|----------------------------------------------------------------| -| queue | Used to specify which volcano queue will this spark application belongs to | batchSchedulerOptions:
    queue: "queue1" | -| priorityClassName | Used to specify which priorityClass this spark application will use | batchSchedulerOptions:
    priorityClassName: "pri1" | - From b8c901397c042dbaaa8ff5ab15d16181b4a325e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20S=C3=A1nchez=20P=C3=A1ez?= Date: Wed, 3 Jul 2024 09:35:46 +0200 Subject: [PATCH 74/87] Add PodDisruptionBudget to chart (#2078) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add PodDisruptionBudget to chart Signed-off-by: Carlos Sánchez Páez Signed-off-by: Carlos Sánchez Páez Signed-off-by: Carlos Sánchez Páez * PR comments Signed-off-by: Carlos Sánchez Páez --------- Signed-off-by: Carlos Sánchez Páez Signed-off-by: Carlos Sánchez Páez --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 124 +++++++++--------- .../templates/poddisruptionbudget.yaml | 17 +++ .../tests/poddisruptionbudget_test.yaml | 37 ++++++ charts/spark-operator-chart/values.yaml | 9 ++ 5 files changed, 127 insertions(+), 62 deletions(-) create mode 100644 charts/spark-operator-chart/templates/poddisruptionbudget.yaml create mode 100644 charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 417418170a..b735a995c7 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.4.2 +version: 1.4.3 appVersion: v1beta2-1.6.1-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 8dfe591d01..a5a9c1bdc8 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -77,67 +77,69 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum ## Values -| Key | Type | Default | Description | -|-----|------|---------|-------------| -| affinity | object | `{}` | Affinity for pod assignment | -| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application | -| commonLabels | object | `{}` | Common labels to add to the resources | -| controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage | -| envFrom | list | `[]` | Pod environment variable sources | -| fullnameOverride | string | `""` | String to override release name | -| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository | -| image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. | -| imagePullSecrets | list | `[]` | Image pull secrets | -| ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. | -| istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate | -| labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. | -| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. | -| leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace | -| logLevel | int | `2` | Set higher levels for more verbose logging | -| metrics.enable | bool | `true` | Enable prometheus metric scraping | -| metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint | -| metrics.port | int | `10254` | Metrics port | -| metrics.portName | string | `"metrics"` | Metrics port name | -| metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics | -| nameOverride | string | `""` | String to partially override `spark-operator.fullname` template (will maintain the release name) | -| nodeSelector | object | `{}` | Node labels for pod assignment | -| podAnnotations | object | `{}` | Additional annotations to add to the pod | -| podLabels | object | `{}` | Additional labels to add to the pod | -| podMonitor | object | `{"enable":false,"jobLabel":"spark-operator-podmonitor","labels":{},"podMetricsEndpoint":{"interval":"5s","scheme":"http"}}` | Prometheus pod monitor for operator's pod. | -| podMonitor.enable | bool | `false` | If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. | -| podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from | -| podMonitor.labels | object | `{}` | Pod monitor labels | -| podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | -| podSecurityContext | object | `{}` | Pod security context | -| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. | -| rbac.annotations | object | `{}` | Optional annotations for rbac | -| rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | -| rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | -| rbac.createRole | bool | `true` | Create and use RBAC `Role` resources | -| replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 | -| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | -| resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | -| resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting | -| securityContext | object | `{}` | Operator container security context | -| serviceAccounts.spark.annotations | object | `{}` | Optional annotations for the spark service account | -| serviceAccounts.spark.create | bool | `true` | Create a service account for spark apps | -| serviceAccounts.spark.name | string | `""` | Optional name for the spark service account | -| serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account | -| serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | -| serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | -| sidecars | list | `[]` | Sidecar containers | -| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs | -| tolerations | list | `[]` | List of node taints to tolerate | -| uiService.enable | bool | `true` | Enable UI service creation for Spark application | -| volumeMounts | list | `[]` | | -| volumes | list | `[]` | | -| webhook.enable | bool | `false` | Enable webhook server | -| webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | -| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects | -| webhook.port | int | `8080` | Webhook service port | -| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name | -| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | +| Key | Type | Default | Description | +|-------------------------------------------|--------|------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| affinity | object | `{}` | Affinity for pod assignment | +| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application | +| commonLabels | object | `{}` | Common labels to add to the resources | +| controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage | +| envFrom | list | `[]` | Pod environment variable sources | +| fullnameOverride | string | `""` | String to override release name | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository | +| image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. | +| imagePullSecrets | list | `[]` | Image pull secrets | +| ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. | +| istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate | +| labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. | +| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. | +| leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace | +| logLevel | int | `2` | Set higher levels for more verbose logging | +| metrics.enable | bool | `true` | Enable prometheus metric scraping | +| metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint | +| metrics.port | int | `10254` | Metrics port | +| metrics.portName | string | `"metrics"` | Metrics port name | +| metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics | +| nameOverride | string | `""` | String to partially override `spark-operator.fullname` template (will maintain the release name) | +| nodeSelector | object | `{}` | Node labels for pod assignment | +| podDisruptionBudget.enabled | bool | `false` | Whether to deploy a PodDisruptionBudget | +| podDisruptionBudget.minAvailable | int | `1` | An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction | +| podAnnotations | object | `{}` | Additional annotations to add to the pod | +| podLabels | object | `{}` | Additional labels to add to the pod | +| podMonitor | object | `{"enable":false,"jobLabel":"spark-operator-podmonitor","labels":{},"podMetricsEndpoint":{"interval":"5s","scheme":"http"}}` | Prometheus pod monitor for operator's pod. | +| podMonitor.enable | bool | `false` | If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. | +| podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from | +| podMonitor.labels | object | `{}` | Pod monitor labels | +| podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | +| podSecurityContext | object | `{}` | Pod security context | +| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. | +| rbac.annotations | object | `{}` | Optional annotations for rbac | +| rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | +| rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | +| rbac.createRole | bool | `true` | Create and use RBAC `Role` resources | +| replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 | +| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | +| resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | +| resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting | +| securityContext | object | `{}` | Operator container security context | +| serviceAccounts.spark.annotations | object | `{}` | Optional annotations for the spark service account | +| serviceAccounts.spark.create | bool | `true` | Create a service account for spark apps | +| serviceAccounts.spark.name | string | `""` | Optional name for the spark service account | +| serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account | +| serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | +| serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | +| sidecars | list | `[]` | Sidecar containers | +| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs | +| tolerations | list | `[]` | List of node taints to tolerate | +| uiService.enable | bool | `true` | Enable UI service creation for Spark application | +| volumeMounts | list | `[]` | | +| volumes | list | `[]` | | +| webhook.enable | bool | `false` | Enable webhook server | +| webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | +| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects | +| webhook.port | int | `8080` | Webhook service port | +| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name | +| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | ## Maintainers diff --git a/charts/spark-operator-chart/templates/poddisruptionbudget.yaml b/charts/spark-operator-chart/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000000..317f8bdb9d --- /dev/null +++ b/charts/spark-operator-chart/templates/poddisruptionbudget.yaml @@ -0,0 +1,17 @@ +{{- if $.Values.podDisruptionBudget.enable }} +{{- if (gt (int $.Values.replicaCount) 1) }} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "spark-operator.fullname" . }}-pdb + labels: + {{- include "spark-operator.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "spark-operator.selectorLabels" . | nindent 6 }} + minAvailable: {{ $.Values.podDisruptionBudget.minAvailable }} +{{- else }} +{{- fail "replicaCount must be greater than 1 to enable PodDisruptionBudget" }} +{{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml b/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml new file mode 100644 index 0000000000..3f702fd105 --- /dev/null +++ b/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml @@ -0,0 +1,37 @@ +suite: Test spark operator podDisruptionBudget + +templates: + - poddisruptionbudget.yaml + +release: + name: spark-operator + +tests: + - it: Should not render spark operator podDisruptionBudget if podDisruptionBudget.enable is false + set: + podDisruptionBudget: + enable: false + asserts: + - hasDocuments: + count: 0 + + - it: Should render spark operator podDisruptionBudget if podDisruptionBudget.enable is true + set: + podDisruptionBudget: + enable: true + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: policy/v1 + kind: PodDisruptionBudget + name: spark-operator-podDisruptionBudget + + - it: Should set minAvailable from values + set: + podDisruptionBudget: + enable: true + minAvailable: 3 + asserts: + - equal: + path: spec.template.minAvailable + value: 3 diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index d9f63b6454..6eefe666bf 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -134,6 +134,15 @@ podMonitor: scheme: http interval: 5s +# -- podDisruptionBudget to avoid service degradation +podDisruptionBudget: + # -- Specifies whether to enable pod disruption budget. + # Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) + enable: false + # -- The number of pods that must be available. + # Require `replicaCount` to be greater than 1 + minAvailable: 1 + # nodeSelector -- Node labels for pod assignment nodeSelector: {} From eca3fc8702a18bded7320f7835d47ebc33c7531f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20S=C3=A1nchez=20P=C3=A1ez?= Date: Mon, 22 Jul 2024 06:03:00 +0200 Subject: [PATCH 75/87] Update helm docs (#2081) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Carlos Sánchez Páez --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 129 +++++++++++++------------ 2 files changed, 66 insertions(+), 65 deletions(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index b735a995c7..61935c48fa 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.4.3 +version: 1.4.4 appVersion: v1beta2-1.6.1-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index a5a9c1bdc8..41d71347bc 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.4.2](https://img.shields.io/badge/Version-1.4.2-informational?style=flat-square) ![AppVersion: v1beta2-1.6.1-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.1--3.5.0-informational?style=flat-square) +![Version: 1.4.4](https://img.shields.io/badge/Version-1.4.4-informational?style=flat-square) ![AppVersion: v1beta2-1.6.1-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.1--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator @@ -77,69 +77,70 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum ## Values -| Key | Type | Default | Description | -|-------------------------------------------|--------|------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| affinity | object | `{}` | Affinity for pod assignment | -| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application | -| commonLabels | object | `{}` | Common labels to add to the resources | -| controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage | -| envFrom | list | `[]` | Pod environment variable sources | -| fullnameOverride | string | `""` | String to override release name | -| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository | -| image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. | -| imagePullSecrets | list | `[]` | Image pull secrets | -| ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. | -| istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate | -| labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. | -| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. | -| leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace | -| logLevel | int | `2` | Set higher levels for more verbose logging | -| metrics.enable | bool | `true` | Enable prometheus metric scraping | -| metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint | -| metrics.port | int | `10254` | Metrics port | -| metrics.portName | string | `"metrics"` | Metrics port name | -| metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics | -| nameOverride | string | `""` | String to partially override `spark-operator.fullname` template (will maintain the release name) | -| nodeSelector | object | `{}` | Node labels for pod assignment | -| podDisruptionBudget.enabled | bool | `false` | Whether to deploy a PodDisruptionBudget | -| podDisruptionBudget.minAvailable | int | `1` | An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction | -| podAnnotations | object | `{}` | Additional annotations to add to the pod | -| podLabels | object | `{}` | Additional labels to add to the pod | -| podMonitor | object | `{"enable":false,"jobLabel":"spark-operator-podmonitor","labels":{},"podMetricsEndpoint":{"interval":"5s","scheme":"http"}}` | Prometheus pod monitor for operator's pod. | -| podMonitor.enable | bool | `false` | If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. | -| podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from | -| podMonitor.labels | object | `{}` | Pod monitor labels | -| podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | -| podSecurityContext | object | `{}` | Pod security context | -| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. | -| rbac.annotations | object | `{}` | Optional annotations for rbac | -| rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | -| rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | -| rbac.createRole | bool | `true` | Create and use RBAC `Role` resources | -| replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 | -| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | -| resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | -| resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting | -| securityContext | object | `{}` | Operator container security context | -| serviceAccounts.spark.annotations | object | `{}` | Optional annotations for the spark service account | -| serviceAccounts.spark.create | bool | `true` | Create a service account for spark apps | -| serviceAccounts.spark.name | string | `""` | Optional name for the spark service account | -| serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account | -| serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | -| serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | -| sidecars | list | `[]` | Sidecar containers | -| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs | -| tolerations | list | `[]` | List of node taints to tolerate | -| uiService.enable | bool | `true` | Enable UI service creation for Spark application | -| volumeMounts | list | `[]` | | -| volumes | list | `[]` | | -| webhook.enable | bool | `false` | Enable webhook server | -| webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | -| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects | -| webhook.port | int | `8080` | Webhook service port | -| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name | -| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity for pod assignment | +| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application | +| commonLabels | object | `{}` | Common labels to add to the resources | +| controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage | +| envFrom | list | `[]` | Pod environment variable sources | +| fullnameOverride | string | `""` | String to override release name | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | +| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository | +| image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. | +| imagePullSecrets | list | `[]` | Image pull secrets | +| ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. | +| istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate | +| labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. | +| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. | +| leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace | +| logLevel | int | `2` | Set higher levels for more verbose logging | +| metrics.enable | bool | `true` | Enable prometheus metric scraping | +| metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint | +| metrics.port | int | `10254` | Metrics port | +| metrics.portName | string | `"metrics"` | Metrics port name | +| metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics | +| nameOverride | string | `""` | String to partially override `spark-operator.fullname` template (will maintain the release name) | +| nodeSelector | object | `{}` | Node labels for pod assignment | +| podAnnotations | object | `{}` | Additional annotations to add to the pod | +| podDisruptionBudget | object | `{"enable":false,"minAvailable":1}` | podDisruptionBudget to avoid service degradation | +| podDisruptionBudget.enable | bool | `false` | Specifies whether to enable pod disruption budget. Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) | +| podDisruptionBudget.minAvailable | int | `1` | The number of pods that must be available. Require `replicaCount` to be greater than 1 | +| podLabels | object | `{}` | Additional labels to add to the pod | +| podMonitor | object | `{"enable":false,"jobLabel":"spark-operator-podmonitor","labels":{},"podMetricsEndpoint":{"interval":"5s","scheme":"http"}}` | Prometheus pod monitor for operator's pod. | +| podMonitor.enable | bool | `false` | If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. | +| podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from | +| podMonitor.labels | object | `{}` | Pod monitor labels | +| podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | +| podSecurityContext | object | `{}` | Pod security context | +| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. | +| rbac.annotations | object | `{}` | Optional annotations for rbac | +| rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | +| rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | +| rbac.createRole | bool | `true` | Create and use RBAC `Role` resources | +| replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 | +| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | +| resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | +| resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting | +| securityContext | object | `{}` | Operator container security context | +| serviceAccounts.spark.annotations | object | `{}` | Optional annotations for the spark service account | +| serviceAccounts.spark.create | bool | `true` | Create a service account for spark apps | +| serviceAccounts.spark.name | string | `""` | Optional name for the spark service account | +| serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account | +| serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | +| serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | +| sidecars | list | `[]` | Sidecar containers | +| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs | +| tolerations | list | `[]` | List of node taints to tolerate | +| uiService.enable | bool | `true` | Enable UI service creation for Spark application | +| volumeMounts | list | `[]` | | +| volumes | list | `[]` | | +| webhook.enable | bool | `false` | Enable webhook server | +| webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | +| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects | +| webhook.port | int | `8080` | Webhook service port | +| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name | +| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | ## Maintainers From 8894a4fedc17738c4c1f4e0073ba220402a188d0 Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Mon, 22 Jul 2024 12:05:00 +0800 Subject: [PATCH 76/87] Add workflow for closing stale issues and PRs (#2073) * Add workflow for closing staled issues and PRs Signed-off-by: Yi Chen * Add job permissions Signed-off-by: Yi Chen --------- Signed-off-by: Yi Chen --- .github/workflows/stale.yaml | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 .github/workflows/stale.yaml diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml new file mode 100644 index 0000000000..61d10d9748 --- /dev/null +++ b/.github/workflows/stale.yaml @@ -0,0 +1,35 @@ +name: Close stale issues and PRs + +on: + schedule: + - cron: "0 1 * * *" + +jobs: + stale: + runs-on: ubuntu-latest + + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@v9 + with: + days-before-issue-stale: 60 + days-before-issue-close: 30 + days-before-pr-stale: 60 + days-before-pr-close: 30 + stale-issue-message: > + This issue has been automatically marked as stale because it has been open 60 days with no activity. + Remove stale label or comment or this will be closed in 30 days. + Thank you for your contributions. + close-issue-message: > + This issue has been automatically closed because it has been stalled for 30 days with no activity. + Please comment "/reopen" to reopen it. + stale-pr-message: > + This pull request has been automatically marked as stale because it has been open 60 days with no activity. + Remove stale label or comment or this will be closed in 30 days. + Thank you for your contributions. + close-pr-message: > + This pull request has been automatically closed because it has been stalled for 30 days with no activity. + Please comment "/reopen" to reopen it. From 779ea3debc7885f422a759e6cac6cbcd466c8e5c Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Mon, 22 Jul 2024 13:10:00 +0800 Subject: [PATCH 77/87] Update the process to build api-docs, generate CRD manifests and code (#2046) * Update .gitignore Signed-off-by: Yi Chen * Update .dockerignore Signed-off-by: Yi Chen * Update Makefile Signed-off-by: Yi Chen * Update the process to generate api docs Signed-off-by: Yi Chen * Update the workflow to generate api docs Signed-off-by: Yi Chen * Use controller-gen to generate CRD and deep copy related methods Signed-off-by: Yi Chen * Update helm chart CRDs Signed-off-by: Yi Chen * Update workflow for building spark operator Signed-off-by: Yi Chen * Update README.md Signed-off-by: Yi Chen --------- Signed-off-by: Yi Chen --- .dockerignore | 32 +- .github/workflows/main.yaml | 18 +- .gitignore | 12 +- Makefile | 301 +- README.md | 2 +- charts/spark-operator-chart/Chart.yaml | 4 +- charts/spark-operator-chart/README.md | 2 +- ...tor.k8s.io_scheduledsparkapplications.yaml | 15137 ++++++++++++---- ...parkoperator.k8s.io_sparkapplications.yaml | 15091 +++++++++++---- ...tor.k8s.io_scheduledsparkapplications.yaml | 11611 ++++++++++++ ...parkoperator.k8s.io_sparkapplications.yaml | 11553 ++++++++++++ config/crd/kustomization.yaml | 24 + config/crd/kustomizeconfig.yaml | 19 + docs/api-docs.md | 54 +- hack/api-docs/Dockerfile | 18 - hack/api-docs/api-docs-config.json | 28 - hack/api-docs/config.json | 28 + .../members.tpl | 0 .../{api-docs-template => template}/pkg.tpl | 2 +- .../placeholder.go | 0 .../{api-docs-template => template}/type.tpl | 0 ...-boilerplate.go.txt => boilerplate.go.txt} | 6 +- manifest/crds/kustomization.yaml | 21 - ...tor.k8s.io_scheduledsparkapplications.yaml | 4496 ----- ...parkoperator.k8s.io_sparkapplications.yaml | 4506 ----- .../sparkoperator.k8s.io/v1beta1/types.go | 6 +- .../v1beta1/zz_generated.deepcopy.go | 32 +- .../sparkoperator.k8s.io/v1beta2/types.go | 16 +- .../v1beta2/zz_generated.deepcopy.go | 164 +- 29 files changed, 45869 insertions(+), 17314 deletions(-) create mode 100644 config/crd/bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml create mode 100644 config/crd/bases/sparkoperator.k8s.io_sparkapplications.yaml create mode 100644 config/crd/kustomization.yaml create mode 100644 config/crd/kustomizeconfig.yaml delete mode 100644 hack/api-docs/Dockerfile delete mode 100644 hack/api-docs/api-docs-config.json create mode 100644 hack/api-docs/config.json rename hack/api-docs/{api-docs-template => template}/members.tpl (100%) rename hack/api-docs/{api-docs-template => template}/pkg.tpl (85%) rename hack/api-docs/{api-docs-template => template}/placeholder.go (100%) rename hack/api-docs/{api-docs-template => template}/type.tpl (100%) rename hack/{custom-boilerplate.go.txt => boilerplate.go.txt} (86%) delete mode 100644 manifest/crds/kustomization.yaml delete mode 100644 manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml delete mode 100644 manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml diff --git a/.dockerignore b/.dockerignore index 22d0d82f80..9b0bebd900 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1 +1,31 @@ -vendor +.github/ +.idea/ +.vscode/ +bin/ +charts/ +docs/ +config/ +examples/ +hack/ +manifest/ +spark-docker/ +sparkctl/ +test/ +vendor/ +.dockerignore +.DS_Store +.gitignore +.gitlab-ci.yaml +.golangci.yaml +.pre-commit-config.yaml +ADOPTERS.md +CODE_OF_CONDUCT.md +codecov.ymal +CONTRIBUTING.md +cover.out +Dockerfile +LICENSE +OWNERS +PROJECT +README.md +test.sh diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 40af1cc3b0..ba0ee2a657 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -18,6 +18,11 @@ jobs: with: fetch-depth: "0" + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: The API documentation hasn't changed run: | make build-api-docs @@ -42,7 +47,7 @@ jobs: - name: build sparkctl run: | - make all + make build-sparkctl build-spark-operator: runs-on: ubuntu-latest @@ -57,18 +62,17 @@ jobs: with: go-version-file: "go.mod" - - name: Run gofmt check - run: make fmt-check + - name: Run go fmt check + run: make go-fmt - - name: Run static analysis - run: make static-analysis + - name: Run go vet + run: make go-vet - name: Run unit tests run: make unit-test - name: Build Spark-Operator Docker Image - run: | - docker build -t docker.io/kubeflow/spark-operator:latest . + run: make docker-build IMAGE_TAG=latest - name: Check changes in resources used in docker file run: | diff --git a/.gitignore b/.gitignore index c4f0f0048a..022a3ad542 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,11 @@ -.vscode/ +bin/ vendor/ -spark-operator -.idea/ -**/*.iml +cover.out sparkctl/sparkctl -spark-on-k8s-operator sparkctl/sparkctl-linux-amd64 sparkctl/sparkctl-darwin-amd64 +**/*.iml + +# Various IDEs +.idea/ +.vscode/ \ No newline at end of file diff --git a/Makefile b/Makefile index b7ae2b3781..966e027ddd 100644 --- a/Makefile +++ b/Makefile @@ -1,16 +1,116 @@ - .SILENT: -.PHONY: clean-sparkctl +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +REPO=github.com/kubeflow/spark-operator SPARK_OPERATOR_GOPATH=/go/src/github.com/kubeflow/spark-operator +SPARK_OPERATOR_CHART_PATH=charts/spark-operator-chart +OPERATOR_VERSION ?= $$(grep appVersion $(SPARK_OPERATOR_CHART_PATH)/Chart.yaml | awk '{print $$2}') DEP_VERSION:=`grep DEP_VERSION= Dockerfile | awk -F\" '{print $$2}'` BUILDER=`grep "FROM golang:" Dockerfile | awk '{print $$2}'` UNAME:=`uname | tr '[:upper:]' '[:lower:]'` -REPO=github.com/kubeflow/spark-operator -all: clean-sparkctl build-sparkctl install-sparkctl +# CONTAINER_TOOL defines the container tool to be used for building images. +# Be aware that the target commands are only tested with Docker which is +# scaffolded by default. However, you might want to replace it to use other +# tools. (i.e. podman) +CONTAINER_TOOL ?= docker + +# Image URL to use all building/pushing image targets +IMAGE_REPOSITORY ?= docker.io/kubeflow/spark-operator +IMAGE_TAG ?= $(OPERATOR_VERSION) +OPERATOR_IMAGE ?= $(IMAGE_REPOSITORY):$(IMAGE_TAG) + +##@ General -build-sparkctl: +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk command is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: manifests +manifests: controller-gen ## Generate CustomResourceDefinition, RBAC and WebhookConfiguration manifests. + $(CONTROLLER_GEN) crd rbac:roleName=spark-operator-controller webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." + +.PHONY: update-crd +update-crd: manifests ## Update CRD files in the Helm chart. + cp config/crd/bases/* charts/spark-operator-chart/crds/ + +.PHONY: clean +clean: ## Clean up caches and output. + @echo "cleaning up caches and output" + go clean -cache -testcache -r -x 2>&1 >/dev/null + -rm -rf _output + +.PHONY: go-fmt +go-fmt: ## Run go fmt against code. + @echo "Running go fmt..." + if [ -n "$(shell go fmt ./...)" ]; then \ + echo "Go code is not formatted, need to run \"make go-fmt\" and commit the changes."; \ + false; \ + else \ + echo "Go code is formatted."; \ + fi + +.PHONY: go-vet +go-vet: ## Run go vet against code. + @echo "Running go vet..." + go vet ./... + +.PHONY: lint +lint: golangci-lint ## Run golangci-lint linter. + @echo "Running golangci-lint run..." + $(GOLANGCI_LINT) run + +.PHONY: lint-fix +lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes. + @echo "Running golangci-lint run --fix..." + $(GOLANGCI_LINT) run --fix + +.PHONY: unit-test +unit-test: clean ## Run go unit tests. + @echo "running unit tests" + go test $(shell go list ./... | grep -v /e2e) -coverprofile cover.out + +.PHONY: e2e-test +e2e-test: clean ## Run go integration tests. + @echo "running integration tests" + go test -v ./test/e2e/ --kubeconfig "$(HOME)/.kube/config" --operator-image=docker.io/spark-operator/spark-operator:local + +##@ Build + +.PHONY: build-operator +build-operator: ## Build spark-operator binary. + go build -o bin/spark-operator main.go + +.PHONY: build-sparkctl +build-sparkctl: ## Build sparkctl binary. [ ! -f "sparkctl/sparkctl-darwin-amd64" ] || [ ! -f "sparkctl/sparkctl-linux-amd64" ] && \ echo building using $(BUILDER) && \ docker run -w $(SPARK_OPERATOR_GOPATH) \ @@ -19,10 +119,8 @@ build-sparkctl: cd sparkctl && \ ./build.sh" || true -clean-sparkctl: - rm -f sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64 - -install-sparkctl: | sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64 +.PHONY: install-sparkctl +install-sparkctl: | sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64 ## Install sparkctl binary. @if [ "$(UNAME)" = "linux" ]; then \ echo "installing linux binary to /usr/local/bin/sparkctl"; \ sudo cp sparkctl/sparkctl-linux-amd64 /usr/local/bin/sparkctl; \ @@ -35,52 +133,161 @@ install-sparkctl: | sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64 echo "$(UNAME) not supported"; \ fi -build-api-docs: - docker build -t temp-api-ref-docs hack/api-docs - docker run -v $$(pwd):/repo/ temp-api-ref-docs \ - sh -c "cd /repo/ && /go/bin/gen-crd-api-reference-docs \ - -config /repo/hack/api-docs/api-docs-config.json \ - -api-dir github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2 \ - -template-dir /repo/hack/api-docs/api-docs-template \ - -out-file /repo/docs/api-docs.md" +.PHONY: clean-sparkctl +clean-sparkctl: ## Clean sparkctl binary. + rm -f sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64 + +.PHONY: build-api-docs +build-api-docs: gen-crd-api-reference-docs ## Build api documentaion. + $(GEN_CRD_API_REFERENCE_DOCS) \ + -config hack/api-docs/config.json \ + -api-dir github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2 \ + -template-dir hack/api-docs/template \ + -out-file docs/api-docs.md + +# If you wish to build the operator image targeting other platforms you can use the --platform flag. +# (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it. +# More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +.PHONY: docker-build +docker-build: ## Build docker image with the operator. + $(CONTAINER_TOOL) build -t ${IMAGE_REPOSITORY}:${IMAGE_TAG} . + +.PHONY: docker-push +docker-push: ## Push docker image with the operator. + $(CONTAINER_TOOL) push ${IMAGE_REPOSITORY}:${IMAGE_TAG} + +# PLATFORMS defines the target platforms for the operator image be built to provide support to multiple +# architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: +# - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/ +# - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/ +# - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail) +# To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. +PLATFORMS ?= linux/amd64,linux/arm64 +.PHONY: docker-buildx +docker-buildx: ## Build and push docker image for the operator for cross-platform support. + # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile + sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + - $(CONTAINER_TOOL) buildx create --name spark-operator-builder + $(CONTAINER_TOOL) buildx use spark-operator-builder + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMAGE_REPOSITORY}:${IMAGE_TAG} -f Dockerfile.cross . + - $(CONTAINER_TOOL) buildx rm spark-operator-builder + rm Dockerfile.cross + +##@ Helm -helm-unittest: +.PHONY: detect-crds-drift +detect-crds-drift: + diff -q charts/spark-operator-chart/crds config/crd/bases + +.PHONY: helm-unittest +helm-unittest: helm-unittest-plugin ## Run Helm chart unittests. helm unittest charts/spark-operator-chart --strict --file "tests/**/*_test.yaml" -helm-lint: - docker run --rm --workdir /workspace --volume "$$(pwd):/workspace" quay.io/helmpack/chart-testing:latest ct lint +.PHONY: helm-lint +helm-lint: ## Run Helm chart lint test. + docker run --rm --workdir /workspace --volume "$$(pwd):/workspace" quay.io/helmpack/chart-testing:latest ct lint --target-branch master -helm-docs: +.PHONY: helm-docs +helm-docs: ## Generates markdown documentation for helm charts from requirements and values files. docker run --rm --volume "$$(pwd):/helm-docs" -u "$(id -u)" jnorwood/helm-docs:latest -fmt-check: clean - @echo "running fmt check"; cd "$(dirname $0)"; \ - if [ -n "$(go fmt ./...)" ]; \ - then \ - echo "Go code is not formatted, please run 'go fmt ./...'." >&2; \ - exit 1; \ - else \ - echo "Go code is formatted"; \ - fi +##@ Deployment -detect-crds-drift: - diff -q charts/spark-operator-chart/crds manifest/crds --exclude=kustomization.yaml +ifndef ignore-not-found + ignore-not-found = false +endif -clean: - @echo "cleaning up caches and output" - go clean -cache -testcache -r -x 2>&1 >/dev/null - -rm -rf _output +.PHONY: install-crds +install-crds: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | $(KUBECTL) create -f - -unit-test: clean - @echo "running unit tests" - go test -v ./... -covermode=atomic +.PHONY: uninstall-crds +uninstall-crds: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - -integration-test: clean - @echo "running integration tests" - go test -v ./test/e2e/ --kubeconfig "$(HOME)/.kube/config" --operator-image=gcr.io/spark-operator/spark-operator:local +.PHONY: deploy +deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - + +.PHONY: undeploy +undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - + +##@ Dependencies + +## Location to install dependencies to +LOCALBIN ?= $(shell pwd)/bin +$(LOCALBIN): + mkdir -p $(LOCALBIN) + +## Tool Binaries +KUBECTL ?= kubectl +KUSTOMIZE ?= $(LOCALBIN)/kustomize-$(KUSTOMIZE_VERSION) +CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION) +KIND ?= $(LOCALBIN)/kind-$(KIND_VERSION) +ENVTEST ?= $(LOCALBIN)/setup-envtest-$(ENVTEST_VERSION) +GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION) +GEN_CRD_API_REFERENCE_DOCS ?= $(LOCALBIN)/gen-crd-api-reference-docs-$(GEN_CRD_API_REFERENCE_DOCS_VERSION) +HELM ?= helm +HELM_UNITTEST ?= unittest + +## Tool Versions +KUSTOMIZE_VERSION ?= v5.4.1 +CONTROLLER_TOOLS_VERSION ?= v0.15.0 +KIND_VERSION ?= v0.23.0 +ENVTEST_VERSION ?= release-0.18 +GOLANGCI_LINT_VERSION ?= v1.57.2 +GEN_CRD_API_REFERENCE_DOCS_VERSION ?= v0.3.0 +HELM_UNITTEST_VERSION ?= 0.5.1 + +.PHONY: kustomize +kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. +$(KUSTOMIZE): $(LOCALBIN) + $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION)) + +.PHONY: controller-gen +controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. +$(CONTROLLER_GEN): $(LOCALBIN) + $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION)) + +.PHONY: kind +kind: $(KIND) ## Download kind locally if necessary. +$(KIND): $(LOCALBIN) + $(call go-install-tool,$(KIND),sigs.k8s.io/kind,$(KIND_VERSION)) + +.PHONY: envtest +envtest: $(ENVTEST) ## Download setup-envtest locally if necessary. +$(ENVTEST): $(LOCALBIN) + $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION)) + +.PHONY: golangci-lint +golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary. +$(GOLANGCI_LINT): $(LOCALBIN) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,${GOLANGCI_LINT_VERSION}) + +.PHONY: gen-crd-api-reference-docs +gen-crd-api-reference-docs: $(GEN_CRD_API_REFERENCE_DOCS) ## Download gen-crd-api-reference-docs locally if necessary. +$(GEN_CRD_API_REFERENCE_DOCS): $(LOCALBIN) + $(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs,$(GEN_CRD_API_REFERENCE_DOCS_VERSION)) + +.PHONY: helm-unittest-plugin +helm-unittest-plugin: ## Download helm unittest plugin locally if necessary. + if [ -z "$(shell helm plugin list | grep unittest)" ]; then \ + echo "Installing helm unittest plugin..."; \ + helm plugin install https://github.com/helm-unittest/helm-unittest.git --version $(HELM_UNITTEST_VERSION); \ + fi -static-analysis: - @echo "running go vet" - # echo "Building using $(BUILDER)" - # go vet ./... - go vet $(REPO)... +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary (ideally with version) +# $2 - package url which can be installed +# $3 - specific version of package +define go-install-tool +@[ -f $(1) ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +echo "Downloading $${package}" ;\ +GOBIN=$(LOCALBIN) go install $${package} ;\ +mv "$$(echo "$(1)" | sed "s/-$(3)$$//")" $(1) ;\ +} +endef diff --git a/README.md b/README.md index 48d92caa29..d907569544 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ The Kubernetes Operator for Apache Spark currently supports the following list o **Current API version:** *`v1beta2`* -**If you are currently using the `v1beta1` version of the APIs in your manifests, please update them to use the `v1beta2` version by changing `apiVersion: "sparkoperator.k8s.io/"` to `apiVersion: "sparkoperator.k8s.io/v1beta2"`. You will also need to delete the `previous` version of the CustomResourceDefinitions named `sparkapplications.sparkoperator.k8s.io` and `scheduledsparkapplications.sparkoperator.k8s.io`, and replace them with the `v1beta2` version either by installing the latest version of the operator or by running `kubectl create -f manifest/crds`.** +**If you are currently using the `v1beta1` version of the APIs in your manifests, please update them to use the `v1beta2` version by changing `apiVersion: "sparkoperator.k8s.io/"` to `apiVersion: "sparkoperator.k8s.io/v1beta2"`. You will also need to delete the `previous` version of the CustomResourceDefinitions named `sparkapplications.sparkoperator.k8s.io` and `scheduledsparkapplications.sparkoperator.k8s.io`, and replace them with the `v1beta2` version either by installing the latest version of the operator or by running `kubectl create -f config/crd/bases`.** ## Prerequisites diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 61935c48fa..8e78d3f4ad 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.4.4 -appVersion: v1beta2-1.6.1-3.5.0 +version: 1.4.5 +appVersion: v1beta2-1.6.2-3.5.0 keywords: - spark home: https://github.com/kubeflow/spark-operator diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 41d71347bc..cab86da27c 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.4.4](https://img.shields.io/badge/Version-1.4.4-informational?style=flat-square) ![AppVersion: v1beta2-1.6.1-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.1--3.5.0-informational?style=flat-square) +![Version: 1.4.5](https://img.shields.io/badge/Version-1.4.5-informational?style=flat-square) ![AppVersion: v1beta2-1.6.2-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.2--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index 60e836b083..b37b7a0008 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -1,11 +1,10 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: (unknown) api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 + controller-gen.kubebuilder.io/version: v0.15.0 name: scheduledsparkapplications.sparkoperator.k8s.io spec: group: sparkoperator.k8s.io @@ -18,1104 +17,1865 @@ spec: singular: scheduledsparkapplication scope: Namespaced versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .spec.suspend - name: Suspend - type: boolean - - jsonPath: .status.lastRun - name: Last Run - type: date - - jsonPath: .status.lastRunName - name: Last Run Name - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - concurrencyPolicy: - type: string - failedRunHistoryLimit: - format: int32 - type: integer - schedule: - type: string - successfulRunHistoryLimit: - format: int32 - type: integer - suspend: - type: boolean - template: - properties: - arguments: - items: - type: string - type: array - batchScheduler: + - additionalPrinterColumns: + - jsonPath: .spec.schedule + name: Schedule + type: string + - jsonPath: .spec.suspend + name: Suspend + type: string + - jsonPath: .status.lastRun + name: Last Run + type: date + - jsonPath: .status.lastRunName + name: Last Run Name + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + concurrencyPolicy: + description: ConcurrencyPolicy is the policy governing concurrent + SparkApplication runs. + type: string + failedRunHistoryLimit: + description: |- + FailedRunHistoryLimit is the number of past failed runs of the application to keep. + Defaults to 1. + format: int32 + type: integer + schedule: + description: Schedule is a cron schedule on which the application + should run. + type: string + successfulRunHistoryLimit: + description: |- + SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. + Defaults to 1. + format: int32 + type: integer + suspend: + description: |- + Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. + Defaults to false. + type: boolean + template: + description: Template is a template from which SparkApplication instances + can be created. + properties: + arguments: + description: Arguments is a list of arguments to be passed to + the application. + items: type: string - batchSchedulerOptions: - properties: - priorityClassName: + type: array + batchScheduler: + description: BatchScheduler configures which batch scheduler will + be used for scheduling + type: string + batchSchedulerOptions: + description: BatchSchedulerOptions provides fine-grained control + on how to batch scheduling. + properties: + priorityClassName: + description: PriorityClassName stands for the name of k8s + PriorityClass resource, it's being used in Volcano batch + scheduler. + type: string + queue: + description: Queue stands for the resource queue which the + application belongs to, it's being used in Volcano batch + scheduler. + type: string + resources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. + If specified, volcano scheduler will consider it as the resources requested. + type: object + type: object + deps: + description: Deps captures all possible types of dependencies + of a Spark application. + properties: + excludePackages: + description: |- + ExcludePackages is a list of "groupId:artifactId", to exclude while resolving the + dependencies provided in Packages to avoid dependency conflicts. + items: type: string - queue: + type: array + files: + description: Files is a list of files the Spark application + depends on. + items: type: string - resources: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - deps: - properties: - excludePackages: - items: - type: string - type: array - files: - items: - type: string - type: array - jars: - items: - type: string - type: array - packages: - items: - type: string - type: array - pyFiles: - items: - type: string - type: array - repositories: - items: - type: string - type: array - type: object - driver: - properties: - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: + type: array + jars: + description: Jars is a list of JAR files the Spark application + depends on. + items: + type: string + type: array + packages: + description: |- + Packages is a list of maven coordinates of jars to include on the driver and executor + classpaths. This will search the local maven repo, then maven central and any additional + remote repositories given by the "repositories" option. + Each package should be of the form "groupId:artifactId:version". + items: + type: string + type: array + pyFiles: + description: PyFiles is a list of Python files the Spark application + depends on. + items: + type: string + type: array + repositories: + description: |- + Repositories is a list of additional remote repositories to search for the maven coordinate + given with the "packages" option. + items: + type: string + type: array + type: object + driver: + description: Driver is the driver specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity + settings for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - operator: + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - operator: + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. type: string - type: array - required: - - key - - operator + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. type: string - type: array - required: - - key - - operator + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - type: object - type: array + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer required: - - nodeSelectorTerms + - podAffinityTerm + - weight type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: - matchExpressions: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: type: string - type: object + type: array + required: + - key + - operator type: object - namespaces: - items: - type: string - type: array - topologyKey: + type: array + matchLabels: + additionalProperties: type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: type: string - values: - items: + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. type: string - type: array - required: - - key - - operator + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: - matchExpressions: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: type: string - type: object + type: array + required: + - key + - operator type: object - namespaces: - items: - type: string - type: array - topologyKey: + type: array + matchLabels: + additionalProperties: type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string + type: array + required: + - key + - operator type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array type: object - type: array - coreLimit: - type: string - coreRequest: + type: object + annotations: + additionalProperties: type: string - cores: - format: int32 - minimum: 1 - type: integer - dnsConfig: + description: Annotations are the Kubernetes annotations to + be added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to + which the named objects should be mounted to. properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array + name: + type: string + path: + type: string + required: + - name + - path type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional + type: string + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the driver. + Maps to `spark.kubernetes.driver.request.cores` that is available since Spark 3.0. + type: string + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + dnsConfig: + description: DnsConfig dns settings for the pod, following + the Kubernetes specifications. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. type: string - type: array - command: - items: + value: type: string - type: array - env: - items: + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add + to the pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: + key: + description: The key to select. + type: string name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string - value: + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object required: - - name + - fieldPath type: object - type: array - envFrom: - items: + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: + containerName: + description: 'Container name: required for volumes, + optional for env vars' type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource type: object - type: array - image: + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request + for driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the + Kubernetes specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host + networking for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides + Spec.Image if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - imagePullPolicy: + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - lifecycle: + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. properties: - postStart: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. properties: - exec: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: + key: + description: The key to select. type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean required: - - port + - key type: object - tcpSocket: + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: - host: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array + - fieldPath type: object - httpGet: + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: + containerName: + description: 'Container name: required + for volumes, optional for env vars' type: string - port: + divisor: anyOf: - type: integer - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - scheme: + resource: + description: 'Required: resource to select' type: string required: - - port + - resource type: object - tcpSocket: + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace properties: - host: + key: + description: The key of the secret to + select from. Must be a valid secret + key. type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean required: - - port + - key type: object + x-kubernetes-map-type: atomic type: object + required: + - name type: object - livenessProbe: + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps properties: - exec: + configMapRef: + description: The ConfigMap to select from properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string - required: - - port + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - host: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port + optional: + description: Specify whether the Secret must + be defined + type: boolean type: object - timeoutSeconds: - format: int32 - type: integer + x-kubernetes-map-type: atomic type: object - name: - type: string - ports: - items: + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. type: string - type: array - drop: - items: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object type: object - type: array - volumeMounts: - items: + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - javaOptions: - type: string - kubernetesMaster: - type: string - labels: - additionalProperties: - type: string - type: object - lifecycle: - properties: - postStart: + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: + description: Exec specifies the action to take. properties: command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object httpGet: + description: HTTPGet specifies the http request + to perform. properties: host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. items: + description: HTTPHeader describes a custom + header to be used in HTTP probes properties: name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: + description: The header field value type: string required: - name @@ -1123,49 +1883,212 @@ spec: type: object type: array path: + description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. properties: host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer type: object - preStop: - properties: + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: exec: + description: Exec specifies the action to take. properties: command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object httpGet: + description: HTTPGet specifies the http request + to perform. properties: host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. items: + description: HTTPHeader describes a custom + header to be used in HTTP probes properties: name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: + description: The header field value type: string required: - name @@ -1173,3324 +2096,9516 @@ spec: type: object type: array path: + description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. properties: host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer type: object - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podName: - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' - type: string - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: + resizePolicy: + description: Resources resize policy for the container. items: + description: ContainerResizePolicy represents resource + resize policy for the container. properties: - name: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. type: string - value: + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. type: string required: - - name - - value + - resourceName + - restartPolicy type: object type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ properties: - add: - items: - type: string - type: array - drop: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: - type: string + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object type: object - privileged: - type: boolean - procMount: + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - shareProcessNamespace: - type: boolean - sidecars: - items: - properties: - args: - items: - type: string - type: array - command: - items: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. type: string - type: array - env: - items: + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: - name: + level: + description: Level is SELinux level label that + applies to the container. type: string - value: + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object required: - - name + - type type: object - type: array - envFrom: - items: + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - required: - - port - type: object - tcpSocket: - properties: - host: + value: + description: The header field value type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array + - name + - value type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: + type: array + path: + description: Path to access on the HTTP server. type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name type: object - securityContext: + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean - capabilities: - properties: - add: - items: + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the driver. For instance, + GC settings or other logging. + type: string + kubernetesMaster: + description: |- + KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and + other Kubernetes resources. Default to https://kubernetes.default.svc. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added + to the pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - type: array - drop: - items: + value: + description: The header field value type: string - type: array - type: object - privileged: - type: boolean - procMount: + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. format: int64 type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object + required: + - seconds type: object - startupProbe: + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: - exec: - properties: - command: - items: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 type: integer + required: + - seconds type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for + the pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory + to allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podName: + description: |- + PodName is the name of the driver pod that the user creates. This is used for the + in-cluster client mode in which the user creates a client pod where the driver of + the user application runs. It's an error to set this field if Mode is not + in-cluster-client. + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + type: string + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string - terminationMessagePolicy: + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - tty: + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean - volumeDevices: + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the + pods objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will + be used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add + to the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object + description: Capability represent POSIX capabilities + type + type: string type: array - volumeMounts: + drop: + description: Removed capabilities items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object + description: Capability represent POSIX capabilities + type + type: string type: array - workingDir: - type: string - required: - - name type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: - effect: + level: + description: Level is SELinux level label that applies + to the container. type: string - key: + role: + description: Role is a SELinux role label that applies + to the container. type: string - operator: + type: + description: Type is a SELinux type label that applies + to the container. type: string - tolerationSeconds: - format: int64 - type: integer - value: + user: + description: User is a SELinux user label that applies + to the container. type: string type: object - type: array - volumeMounts: - items: + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: - mountPath: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string - mountPropagation: - type: string - name: + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - readOnly: + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean - subPath: - type: string - subPathExpr: + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string - required: - - mountPath - - name type: object - type: array - type: object - dynamicAllocation: - properties: - enabled: - type: boolean - initialExecutors: - format: int32 - type: integer - maxExecutors: - format: int32 - type: integer - minExecutors: - format: int32 - type: integer - shuffleTrackingTimeout: - format: int64 - type: integer - type: object - executor: - properties: - affinity: + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: |- + ServiceAnnotations defines the annotations to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + serviceLabels: + additionalProperties: + type: string + description: |- + ServiceLabels defines the labels to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that + run along side the main Spark container. + items: + description: A single application container that you want + to run within a pod. properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. type: string required: - - topologyKey + - fieldPath type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' type: string required: - - topologyKey + - resource type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey + x-kubernetes-map-type: atomic type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - deleteOnTermination: - type: boolean - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string + required: + - name type: object type: array - searches: + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: + description: EnvFromSource represents the source of + a set of ConfigMaps properties: - configMapKeyRef: + configMapRef: + description: The ConfigMap to select from properties: - key: - type: string name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: + description: Specify whether the ConfigMap + must be defined type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource type: object - secretKeyRef: + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - key: - type: string name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: + description: Specify whether the Secret must + be defined type: boolean - required: - - key type: object + x-kubernetes-map-type: atomic type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - configMapRef: + exec: + description: Exec specifies the action to take. properties: - name: - type: string - optional: - type: boolean + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array type: object - prefix: - type: string - secretRef: + httpGet: + description: HTTPGet specifies the http request + to perform. properties: - name: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - required: - - port - type: object - tcpSocket: - properties: - host: + value: + description: The header field value type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true required: - - port + - name + - value type: object - type: object - type: object - livenessProbe: + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. format: int32 type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. format: int32 type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort type: object - name: - type: string - ports: - items: + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. format: int32 type: integer - name: - type: string - protocol: + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. type: string required: - - containerPort - - protocol + - port type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - - port + - name type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: - devicePath: + level: + description: Level is SELinux level label that + applies to the container. type: string - name: + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. type: string - required: - - devicePath - - name type: object - type: array - volumeMounts: - items: + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: - mountPath: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string - mountPropagation: + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - name: + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. type: string - readOnly: - type: boolean - subPath: + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. type: string - subPathExpr: + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string - required: - - mountPath - - name type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - instances: - format: int32 - minimum: 1 - type: integer - javaOptions: - type: string - labels: - additionalProperties: - type: string - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer type: object - privileged: + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false type: boolean - procMount: + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. type: string - readOnlyRootFilesystem: + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. type: boolean - runAsGroup: + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed + in ".spec.tolerations" to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer - runAsNonRoot: + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in + ".spec.volumes" to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name type: object - serviceAccount: - type: string - shareProcessNamespace: - type: boolean - sidecars: + type: array + type: object + driverIngressOptions: + description: DriverIngressOptions allows configuring the Service + and the Ingress to expose ports inside Spark Driver + items: + description: DriverIngressConfiguration is for driver ingress + specific configuration parameters. + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs + of annotations that might be added to the ingress object. + i.e. specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL + certificates to the ingress object items: + description: IngressTLS describes the transport layer + security associated with an ingress. properties: - args: - items: - type: string - type: array - command: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. items: type: string type: array - env: + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + ingressURLFormat: + description: IngressURLFormat is the URL for the ingress. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs + of annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLables is a map of key,value pairs of + labels that might be added to the service object. + type: object + servicePort: + description: ServicePort allows configuring the port at + service level that might be different from the targetPort. + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + type: string + serviceType: + description: ServiceType allows configuring the type of + the service. Defaults to ClusterIP. + type: string + required: + - servicePort + - servicePortName + type: object + type: array + dynamicAllocation: + description: |- + DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes + scheduler backend since Spark 3.0. + properties: + enabled: + description: Enabled controls whether dynamic allocation is + enabled or not. + type: boolean + initialExecutors: + description: |- + InitialExecutors is the initial number of executors to request. If .spec.executor.instances + is also set, the initial number of executors is set to the bigger of that and this option. + format: int32 + type: integer + maxExecutors: + description: MaxExecutors is the upper bound for the number + of executors if dynamic allocation is enabled. + format: int32 + type: integer + minExecutors: + description: MinExecutors is the lower bound for the number + of executors if dynamic allocation is enabled. + format: int32 + type: integer + shuffleTrackingTimeout: + description: |- + ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding + shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled). + format: int64 + type: integer + type: object + executor: + description: Executor is the executor specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity + settings for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: - name: - type: string - value: - type: string - valueFrom: + preference: + description: A node selector term, associated + with the corresponding weight. properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object + - preference + - weight type: object type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - value: + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - required: - - port - type: object - tcpSocket: - properties: - host: + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer - name: - type: string - protocol: - type: string required: - - containerPort - - protocol + - podAffinityTerm + - weight type: object type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - - mountPath - - name + - topologyKey type: object type: array - workingDir: - type: string - required: - - name - type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name type: object - type: array - type: object - failureRetries: - format: int32 - type: integer - hadoopConf: - additionalProperties: - type: string - type: object - hadoopConfigMap: - type: string - image: - type: string - imagePullPolicy: - type: string - imagePullSecrets: - items: - type: string - type: array - mainApplicationFile: - type: string - mainClass: - type: string - memoryOverheadFactor: - type: string - mode: - enum: - - cluster - - client - type: string - monitoring: - properties: - exposeDriverMetrics: - type: boolean - exposeExecutorMetrics: - type: boolean - metricsProperties: - type: string - metricsPropertiesFile: + type: object + annotations: + additionalProperties: type: string - prometheus: + description: Annotations are the Kubernetes annotations to + be added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to + which the named objects should be mounted to. properties: - configFile: - type: string - configuration: - type: string - jmxExporterJar: + name: type: string - port: - format: int32 - maximum: 49151 - minimum: 1024 - type: integer - portName: + path: type: string required: - - jmxExporterJar + - name + - path type: object - required: - - exposeDriverMetrics - - exposeExecutorMetrics - type: object - nodeSelector: - additionalProperties: + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional type: string - type: object - proxyUser: - type: string - pythonVersion: - enum: - - "2" - - "3" - type: string - restartPolicy: - properties: - onFailureRetries: - format: int32 - minimum: 0 - type: integer - onFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - onSubmissionFailureRetries: - format: int32 - minimum: 0 - type: integer - onSubmissionFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - type: - enum: - - Never - - Always - - OnFailure - type: string - type: object - retryInterval: - format: int64 - type: integer - sparkConf: - additionalProperties: + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the executors. + Maps to `spark.kubernetes.executor.request.cores` that is available since Spark 2.4. type: string - type: object - sparkConfigMap: - type: string - sparkUIOptions: - properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressAnnotations: - additionalProperties: - type: string - type: object - ingressTLS: - items: - properties: - hosts: - items: - type: string - type: array - secretName: - type: string - type: object - type: array - servicePort: - format: int32 - type: integer - serviceType: - type: string - type: object - driverIngressOptions: - items: + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + deleteOnTermination: + description: |- + DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. + Maps to `spark.kubernetes.executor.deleteOnTermination` that is available since Spark 3.0. + type: boolean + dnsConfig: + description: DnsConfig dns settings for the pod, following + the Kubernetes specifications. properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressURLFormat: - type: string - ingressAnnotations: - additionalProperties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: type: string - type: object - ingressTLS: + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. properties: - hosts: - items: - type: string - type: array - secretName: + name: + description: Required. + type: string + value: type: string type: object type: array - servicePort: - format: int32 - type: integer - servicePortName: - type: string - serviceType: - type: string + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array type: object - type: array - sparkVersion: - type: string - timeToLiveSeconds: - format: int64 - type: integer - type: - enum: - - Java - - Python - - Scala - - R - type: string - volumes: - items: - properties: - awsElasticBlockStore: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - azureDisk: - properties: - cachingMode: - type: string - diskName: - type: string - diskURI: - type: string - fsType: - type: string - kind: - type: string - readOnly: - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - properties: - readOnly: - type: boolean - secretName: - type: string - shareName: - type: string - required: - - secretName - - shareName - type: object - cephfs: - properties: - monitors: - items: - type: string - type: array - path: - type: string - readOnly: - type: boolean - secretFile: - type: string - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - monitors - type: object - cinder: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeID: - type: string - required: - - volumeID - type: object - configMap: - properties: - defaultMode: - format: int32 - type: integer - items: - items: + env: + description: Env carries the environment variables to add + to the pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: key: + description: The key to select. type: string - mode: - format: int32 - type: integer - path: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean required: - key - - path type: object - type: array - name: + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request + for driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the + Kubernetes specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: type: string - optional: - type: boolean - type: object - csi: - properties: - driver: + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host + networking for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides + Spec.Image if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - fsType: + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - nodePublishSecretRef: + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. properties: name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name type: object - readOnly: - type: boolean - volumeAttributes: - additionalProperties: - type: string - type: object - required: - - driver - type: object - downwardAPI: - properties: - defaultMode: - format: int32 - type: integer + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. items: - items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - fieldRef: + exec: + description: Exec specifies the action to take. properties: - apiVersion: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. type: string - fieldPath: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - fieldPath + - port type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: - containerName: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string - divisor: + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - resource: - type: string required: - - resource + - port type: object - required: - - path type: object - type: array - type: object - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - spec: - properties: - accessModes: - items: + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string - type: array - resources: - properties: - requests: + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes properties: - storage: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - type: object - type: object - storageClassName: - type: string - type: object - type: object - type: object - fc: - properties: - fsType: - type: string - lun: - format: int32 - type: integer - readOnly: - type: boolean - targetWWNs: - items: - type: string - type: array - wwids: - items: - type: string - type: array - type: object - flexVolume: - properties: - driver: - type: string - fsType: - type: string - options: - additionalProperties: - type: string - type: object - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - required: - - driver - type: object - flocker: - properties: - datasetName: - type: string - datasetUUID: - type: string - type: object - gcePersistentDisk: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - pdName: - type: string - readOnly: - type: boolean - required: - - pdName - type: object - gitRepo: - properties: - directory: - type: string - repository: - type: string - revision: - type: string - required: - - repository - type: object - glusterfs: - properties: - endpoints: - type: string - path: - type: string - readOnly: - type: boolean - required: - - endpoints - - path - type: object - hostPath: - properties: - path: - type: string - type: - type: string - required: - - path - type: object - iscsi: - properties: - chapAuthDiscovery: - type: boolean - chapAuthSession: - type: boolean - fsType: - type: string - initiatorName: - type: string - iqn: - type: string - iscsiInterface: - type: string - lun: - format: int32 - type: integer - portals: - items: - type: string - type: array - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - targetPortal: - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - type: string - nfs: - properties: - path: - type: string - readOnly: - type: boolean - server: - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - properties: - claimName: - type: string - readOnly: - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - properties: - fsType: - type: string - pdID: - type: string - required: - - pdID - type: object - portworxVolume: - properties: - fsType: - type: string - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - projected: - properties: - defaultMode: - format: int32 - type: integer - sources: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path + value: + description: The header field value + type: string + required: + - name + - value type: object type: array - name: + path: + description: Path to access on the HTTP + server. type: string - optional: - type: boolean - type: object - downwardAPI: - properties: - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - type: boolean + required: + - port type: object - serviceAccountToken: + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. properties: - audience: - type: string - expirationSeconds: + seconds: + description: Seconds is the number of seconds + to sleep. format: int64 type: integer - path: + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - path + - port type: object type: object - type: array - required: - - sources - type: object - quobyte: - properties: - group: - type: string - readOnly: - type: boolean - registry: - type: string - tenant: - type: string - user: - type: string - volume: - type: string - required: - - registry - - volume - type: object - rbd: - properties: - fsType: - type: string - image: - type: string - keyring: - type: string - monitors: - items: - type: string - type: array - pool: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - image - - monitors - type: object - scaleIO: - properties: - fsType: - type: string - gateway: - type: string - protectionDomain: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - sslEnabled: - type: boolean - storageMode: - type: string - storagePool: - type: string - system: - type: string - volumeName: - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - properties: - defaultMode: - format: int32 - type: integer - items: - items: + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. properties: - key: - type: string - mode: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. format: int32 type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - key - - path + - port type: object - type: array - optional: - type: boolean - secretName: - type: string - type: object - storageos: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeName: - type: string - volumeNamespace: - type: string - type: object - vsphereVolume: - properties: - fsType: - type: string - storagePolicyID: - type: string - storagePolicyName: - type: string - volumePath: - type: string + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + instances: + description: Instances is the number of executor instances. + format: int32 + minimum: 1 + type: integer + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the executors. For instance, + GC settings or other logging. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added + to the pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for + the pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory + to allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the + pods objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will + be used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add + to the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string required: - - volumePath + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string type: object + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that + run along side the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed + in ".spec.tolerations" to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in + ".spec.volumes" to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + failureRetries: + description: |- + FailureRetries is the number of times to retry a failed application before giving up. + This is best effort and actual retry attempts can be >= the value specified. + format: int32 + type: integer + hadoopConf: + additionalProperties: + type: string + description: |- + HadoopConf carries user-specified Hadoop configuration properties as they would use the the "--conf" option + in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop + configuration properties. + type: object + hadoopConfigMap: + description: |- + HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. + The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + image: + description: |- + Image is the container image for the driver, executor, and init-container. Any custom container images for the + driver, executor, or init-container takes precedence over this. + type: string + imagePullPolicy: + description: ImagePullPolicy is the image pull policy for the + driver, executor, and init-container. + type: string + imagePullSecrets: + description: ImagePullSecrets is the list of image-pull secrets. + items: + type: string + type: array + mainApplicationFile: + description: MainFile is the path to a bundled JAR, Python, or + R file of the application. + type: string + mainClass: + description: |- + MainClass is the fully-qualified main class of the Spark application. + This only applies to Java/Scala Spark applications. + type: string + memoryOverheadFactor: + description: |- + This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. + For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will + be overridden by `Spec.Driver.MemoryOverhead` and `Spec.Executor.MemoryOverhead` if they are set. + type: string + mode: + description: Mode is the deployment mode of the Spark application. + enum: + - cluster + - client + type: string + monitoring: + description: Monitoring configures how monitoring is handled. + properties: + exposeDriverMetrics: + description: ExposeDriverMetrics specifies whether to expose + metrics on the driver. + type: boolean + exposeExecutorMetrics: + description: ExposeExecutorMetrics specifies whether to expose + metrics on the executors. + type: boolean + metricsProperties: + description: |- + MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. + If not specified, the content in spark-docker/conf/metrics.properties will be used. + type: string + metricsPropertiesFile: + description: |- + MetricsPropertiesFile is the container local path of file metrics.properties for configuring + the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used. + type: string + prometheus: + description: Prometheus is for configuring the Prometheus + JMX exporter. + properties: + configFile: + description: |- + ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. + ConfigFile takes precedence over Configuration, which is shown below. + type: string + configuration: + description: |- + Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. + If not specified, the content in spark-docker/conf/prometheus.yaml will be used. + Configuration has no effect if ConfigFile is set. + type: string + jmxExporterJar: + description: JmxExporterJar is the path to the Prometheus + JMX exporter jar in the container. + type: string + port: + description: |- + Port is the port of the HTTP server run by the Prometheus JMX exporter. + If not specified, 8090 will be used as the default. + format: int32 + maximum: 49151 + minimum: 1024 + type: integer + portName: + description: |- + PortName is the port name of prometheus JMX exporter port. + If not specified, jmx-exporter will be used as the default. + type: string required: - - name + - jmxExporterJar type: object - type: array - required: - - driver - - executor - - sparkVersion - - type - type: object - required: - - schedule - - template - type: object - status: - properties: - lastRun: - format: date-time - nullable: true - type: string - lastRunName: - type: string - nextRun: - format: date-time - nullable: true - type: string - pastFailedRunNames: - items: + required: + - exposeDriverMetrics + - exposeExecutorMetrics + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). + This field will be deprecated in future versions (at SparkApplicationSpec level). + type: object + proxyUser: + description: |- + ProxyUser specifies the user to impersonate when submitting the application. + It maps to the command-line flag "--proxy-user" in spark-submit. + type: string + pythonVersion: + description: |- + This sets the major Python version of the docker + image used to run the driver and executor containers. Can either be 2 or 3, default 2. + enum: + - "2" + - "3" + type: string + restartPolicy: + description: RestartPolicy defines the policy on if and in which + conditions the controller should restart an application. + properties: + onFailureRetries: + description: OnFailureRetries the number of times to retry + running an application before giving up. + format: int32 + minimum: 0 + type: integer + onFailureRetryInterval: + description: OnFailureRetryInterval is the interval in seconds + between retries on failed runs. + format: int64 + minimum: 1 + type: integer + onSubmissionFailureRetries: + description: |- + OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. + This is best effort and actual retry attempts can be >= the value specified due to caching. + These are required if RestartPolicy is OnFailure. + format: int32 + minimum: 0 + type: integer + onSubmissionFailureRetryInterval: + description: OnSubmissionFailureRetryInterval is the interval + in seconds between retries on failed submissions. + format: int64 + minimum: 1 + type: integer + type: + description: Type specifies the RestartPolicyType. + enum: + - Never + - Always + - OnFailure + type: string + type: object + retryInterval: + description: RetryInterval is the unit of intervals in seconds + between submission retries. + format: int64 + type: integer + sparkConf: + additionalProperties: + type: string + description: |- + SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in + spark-submit. + type: object + sparkConfigMap: + description: |- + SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. + The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + sparkUIOptions: + description: SparkUIOptions allows configuring the Service and + the Ingress to expose the sparkUI + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs + of annotations that might be added to the ingress object. + i.e. specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL + certificates to the ingress object + items: + description: IngressTLS describes the transport layer security + associated with an ingress. + properties: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs + of annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLables is a map of key,value pairs of + labels that might be added to the service object. + type: object + servicePort: + description: |- + ServicePort allows configuring the port at service level that might be different from the targetPort. + TargetPort should be the same as the one defined in spark.ui.port + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + Defaults to spark-driver-ui-port. + type: string + serviceType: + description: ServiceType allows configuring the type of the + service. Defaults to ClusterIP. + type: string + type: object + sparkVersion: + description: SparkVersion is the version of Spark the application + uses. type: string - type: array - pastSuccessfulRunNames: - items: + timeToLiveSeconds: + description: |- + TimeToLiveSeconds defines the Time-To-Live (TTL) duration in seconds for this SparkApplication + after its termination. + The SparkApplication object will be garbage collected if the current time is more than the + TimeToLiveSeconds since its termination. + format: int64 + type: integer + type: + description: Type tells the type of the Spark application. + enum: + - Java + - Python + - Scala + - R type: string - type: array - reason: + volumes: + description: Volumes is the list of Kubernetes volumes that can + be mounted by the driver and/or executors. + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in + the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the + blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure managed + data disk (only in managed availability set). defaults + to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the + pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name and namespace + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that + is attached to a kubelet's host machine and then exposed + to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use + for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the + downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 + encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool + associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - driver + - executor + - sparkVersion + - type + type: object + required: + - schedule + - template + type: object + status: + properties: + lastRun: + description: LastRun is the time when the last run of the application + started. + format: date-time + nullable: true + type: string + lastRunName: + description: LastRunName is the name of the SparkApplication for the + most recent run of the application. + type: string + nextRun: + description: NextRun is the time when the next run of the application + will start. + format: date-time + nullable: true + type: string + pastFailedRunNames: + description: PastFailedRunNames keeps the names of SparkApplications + for past failed runs. + items: type: string - scheduleState: + type: array + pastSuccessfulRunNames: + description: PastSuccessfulRunNames keeps the names of SparkApplications + for past successful runs. + items: type: string - type: object - required: - - metadata - - spec - type: object - -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] + type: array + reason: + description: Reason tells why the ScheduledSparkApplication is in + the particular ScheduleState. + type: string + scheduleState: + description: ScheduleState is the current scheduling state of the + application. + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml index c67bb2afaa..c23d69264a 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -1,11 +1,10 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: (unknown) api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 + controller-gen.kubebuilder.io/version: v0.15.0 name: sparkapplications.sparkoperator.k8s.io spec: group: sparkoperator.k8s.io @@ -18,1090 +17,1826 @@ spec: singular: sparkapplication scope: Namespaced versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: { } - additionalPrinterColumns: - - jsonPath: .status.applicationState.state - name: Status - type: string - - jsonPath: .status.executionAttempts - name: Attempts - type: string - - jsonPath: .status.lastSubmissionAttemptTime - name: Start - type: string - - jsonPath: .status.terminationTime - name: Finish - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - arguments: - items: - type: string - type: array - batchScheduler: + - additionalPrinterColumns: + - jsonPath: .status.applicationState.state + name: Status + type: string + - jsonPath: .status.executionAttempts + name: Attempts + type: string + - jsonPath: .status.lastSubmissionAttemptTime + name: Start + type: string + - jsonPath: .status.terminationTime + name: Finish + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SparkApplication represents a Spark application running on and + using Kubernetes as a cluster manager. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + SparkApplicationSpec describes the specification of a Spark application using Kubernetes as a cluster manager. + It carries every pieces of information a spark-submit command takes and recognizes. + properties: + arguments: + description: Arguments is a list of arguments to be passed to the + application. + items: type: string - batchSchedulerOptions: - properties: - priorityClassName: + type: array + batchScheduler: + description: BatchScheduler configures which batch scheduler will + be used for scheduling + type: string + batchSchedulerOptions: + description: BatchSchedulerOptions provides fine-grained control on + how to batch scheduling. + properties: + priorityClassName: + description: PriorityClassName stands for the name of k8s PriorityClass + resource, it's being used in Volcano batch scheduler. + type: string + queue: + description: Queue stands for the resource queue which the application + belongs to, it's being used in Volcano batch scheduler. + type: string + resources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. + If specified, volcano scheduler will consider it as the resources requested. + type: object + type: object + deps: + description: Deps captures all possible types of dependencies of a + Spark application. + properties: + excludePackages: + description: |- + ExcludePackages is a list of "groupId:artifactId", to exclude while resolving the + dependencies provided in Packages to avoid dependency conflicts. + items: type: string - queue: + type: array + files: + description: Files is a list of files the Spark application depends + on. + items: type: string - resources: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - deps: - properties: - excludePackages: - items: - type: string - type: array - files: - items: - type: string - type: array - jars: - items: - type: string - type: array - packages: - items: - type: string - type: array - pyFiles: - items: - type: string - type: array - repositories: - items: - type: string - type: array - type: object - driver: - properties: - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: + type: array + jars: + description: Jars is a list of JAR files the Spark application + depends on. + items: + type: string + type: array + packages: + description: |- + Packages is a list of maven coordinates of jars to include on the driver and executor + classpaths. This will search the local maven repo, then maven central and any additional + remote repositories given by the "repositories" option. + Each package should be of the form "groupId:artifactId:version". + items: + type: string + type: array + pyFiles: + description: PyFiles is a list of Python files the Spark application + depends on. + items: + type: string + type: array + repositories: + description: |- + Repositories is a list of additional remote repositories to search for the maven coordinate + given with the "packages" option. + items: + type: string + type: array + type: object + driver: + description: Driver is the driver specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity settings + for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - operator: + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - operator: + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. type: string - type: array - required: - - key - - operator + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. type: string - type: array - required: - - key - - operator + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - type: object - type: array + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer required: - - nodeSelectorTerms + - podAffinityTerm + - weight type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. properties: - matchExpressions: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: type: string - type: object + type: array + required: + - key + - operator type: object - namespaces: - items: - type: string - type: array - topologyKey: + type: array + matchLabels: + additionalProperties: type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object type: array - matchLabels: - additionalProperties: - type: string - type: object + required: + - key + - operator type: object - namespaces: - items: - type: string - type: array - topologyKey: + type: array + matchLabels: + additionalProperties: type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: - type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object type: array - ip: - type: string type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) properties: - name: - type: string - value: - type: string - valueFrom: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object type: object - secretKeyRef: + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer required: - - name + - podAffinityTerm + - weight type: object type: array - envFrom: + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running properties: - configMapRef: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. properties: - name: - type: string - optional: - type: boolean + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object type: object - prefix: - type: string - secretRef: + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. properties: - name: - type: string - optional: - type: boolean + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey type: object type: array - image: + type: object + type: object + annotations: + additionalProperties: + type: string + description: Annotations are the Kubernetes annotations to be + added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to which + the named objects should be mounted to. + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional + type: string + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the driver. + Maps to `spark.kubernetes.driver.request.cores` that is available since Spark 3.0. + type: string + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + dnsConfig: + description: DnsConfig dns settings for the pod, following the + Kubernetes specifications. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options + of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add to the + pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request for + driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the Kubernetes + specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: type: string - imagePullPolicy: + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host networking + for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides Spec.Image + if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - lifecycle: + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. properties: - postStart: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: + key: + description: The key to select. type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean required: - - port + - key type: object - tcpSocket: + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: - host: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array + - fieldPath type: object - httpGet: + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: + containerName: + description: 'Container name: required for + volumes, optional for env vars' type: string - port: + divisor: anyOf: - type: integer - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - scheme: + resource: + description: 'Required: resource to select' type: string required: - - port + - resource type: object - tcpSocket: + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace properties: - host: + key: + description: The key of the secret to select + from. Must be a valid secret key. type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean required: - - port + - key type: object + x-kubernetes-map-type: atomic type: object + required: + - name type: object - livenessProbe: + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: + configMapRef: + description: The ConfigMap to select from properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string - required: - - port + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - host: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port + optional: + description: Specify whether the Secret must be + defined + type: boolean type: object - timeoutSeconds: - format: int32 - type: integer + x-kubernetes-map-type: atomic type: object - name: - type: string - ports: - items: + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - type: array - drop: - items: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object type: object - type: array - volumeMounts: - items: + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - javaOptions: - type: string - kubernetesMaster: - type: string - labels: - additionalProperties: - type: string - type: object - lifecycle: - properties: - postStart: + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: exec: + description: Exec specifies the action to take. properties: command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object httpGet: + description: HTTPGet specifies the http request to perform. properties: host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. items: + description: HTTPHeader describes a custom header + to be used in HTTP probes properties: name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: + description: The header field value type: string required: - name @@ -1109,49 +1844,211 @@ spec: type: object type: array path: + description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. properties: host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer type: object - preStop: - properties: - exec: + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. properties: command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object httpGet: + description: HTTPGet specifies the http request to perform. properties: host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. items: + description: HTTPHeader describes a custom header + to be used in HTTP probes properties: name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: + description: The header field value type: string required: - name @@ -1159,3348 +2056,9498 @@ spec: type: object type: array path: + description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - port type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. properties: host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer type: object - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podName: - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' - type: string - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: + resizePolicy: + description: Resources resize policy for the container. items: + description: ContainerResizePolicy represents resource + resize policy for the container. properties: - name: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. type: string - value: + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. type: string required: - - name - - value + - resourceName + - restartPolicy type: object type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ properties: - add: - items: - type: string - type: array - drop: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. items: - type: string + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object type: object - privileged: - type: boolean - procMount: + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - shareProcessNamespace: - type: boolean - sidecars: - items: - properties: - args: - items: - type: string - type: array - command: - items: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. type: string - type: array - env: - items: + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: - name: + level: + description: Level is SELinux level label that applies + to the container. type: string - value: + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object required: - - name + - type type: object - type: array - envFrom: - items: + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - required: - - port - type: object - tcpSocket: - properties: - host: + value: + description: The header field value type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array + - name + - value type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: + type: array + path: + description: Path to access on the HTTP server. type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name type: object - securityContext: + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean - capabilities: - properties: - add: - items: + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the driver. For instance, + GC settings or other logging. + type: string + kubernetesMaster: + description: |- + KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and + other Kubernetes resources. Default to https://kubernetes.default.svc. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added to the + pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - type: array - drop: - items: + value: + description: The header field value type: string - type: array - type: object - privileged: - type: boolean - procMount: + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. format: int64 type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object + required: + - seconds type: object - startupProbe: + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: - exec: - properties: - command: - items: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 type: integer + required: + - seconds type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for the + pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory to + allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podName: + description: |- + PodName is the name of the driver pod that the user creates. This is used for the + in-cluster client mode in which the user creates a client pod where the driver of + the user application runs. It's an error to set this field if Mode is not + in-cluster-client. + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + type: string + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string - terminationMessagePolicy: + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - tty: + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean - volumeDevices: + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the pods + objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will be + used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add to + the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object + description: Capability represent POSIX capabilities + type + type: string type: array - volumeMounts: + drop: + description: Removed capabilities items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object + description: Capability represent POSIX capabilities + type + type: string type: array - workingDir: - type: string - required: - - name type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: - effect: + level: + description: Level is SELinux level label that applies + to the container. type: string - key: + role: + description: Role is a SELinux role label that applies + to the container. type: string - operator: + type: + description: Type is a SELinux type label that applies + to the container. type: string - tolerationSeconds: - format: int64 - type: integer - value: + user: + description: User is a SELinux user label that applies + to the container. type: string type: object - type: array - volumeMounts: - items: + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: - mountPath: - type: string - mountPropagation: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string - name: + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - readOnly: + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean - subPath: + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string - subPathExpr: - type: string - required: - - mountPath - - name type: object - type: array - type: object - dynamicAllocation: - properties: - enabled: - type: boolean - initialExecutors: - format: int32 - type: integer - maxExecutors: - format: int32 - type: integer - minExecutors: - format: int32 - type: integer - shuffleTrackingTimeout: - format: int64 - type: integer - type: object - executor: - properties: - affinity: + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: |- + ServiceAnnotations defines the annotations to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + serviceLabels: + additionalProperties: + type: string + description: |- + ServiceLabels defines the labels to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that run + along side the main Spark container. + items: + description: A single application container that you want to + run within a pod. properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. type: string required: - - topologyKey + - fieldPath type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' type: string required: - - topologyKey + - resource type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey + x-kubernetes-map-type: atomic type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - deleteOnTermination: - type: boolean - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string + required: + - name type: object type: array - searches: + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: + description: EnvFromSource represents the source of a + set of ConfigMaps properties: - configMapKeyRef: + configMapRef: + description: The ConfigMap to select from properties: - key: - type: string name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: + description: Specify whether the ConfigMap must + be defined type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource type: object - secretKeyRef: + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from properties: - key: - type: string name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: + description: Specify whether the Secret must be + defined type: boolean - required: - - key type: object + x-kubernetes-map-type: atomic type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - configMapRef: + exec: + description: Exec specifies the action to take. properties: - name: - type: string - optional: - type: boolean + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array type: object - prefix: - type: string - secretRef: + httpGet: + description: HTTPGet specifies the http request + to perform. properties: - name: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string - optional: - type: boolean - type: object + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - required: - - port - type: object - tcpSocket: - properties: - host: + value: + description: The header field value type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true required: - - port + - name + - value type: object - type: object - type: object - livenessProbe: + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. format: int32 type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. format: int32 type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort type: object - name: - type: string - ports: - items: + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. format: int32 type: integer - name: - type: string - protocol: + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. type: string required: - - containerPort - - protocol + - port type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. type: string required: - - port + - name type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. properties: - devicePath: + level: + description: Level is SELinux level label that applies + to the container. type: string - name: + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. type: string - required: - - devicePath - - name type: object - type: array - volumeMounts: - items: + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. properties: - mountPath: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. type: string - mountPropagation: + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. type: string - name: + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. type: string - readOnly: - type: boolean - subPath: + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. type: string - subPathExpr: + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. type: string - required: - - mountPath - - name type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - instances: - format: int32 - minimum: 1 - type: integer - javaOptions: - type: string - labels: - additionalProperties: - type: string - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer type: object - privileged: + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. type: boolean - procMount: + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. type: string - readOnlyRootFilesystem: + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. type: boolean - runAsGroup: + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed in ".spec.tolerations" + to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer - runAsNonRoot: + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in ".spec.volumes" + to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name type: object - serviceAccount: - type: string - shareProcessNamespace: - type: boolean - sidecars: + type: array + type: object + driverIngressOptions: + description: DriverIngressOptions allows configuring the Service and + the Ingress to expose ports inside Spark Driver + items: + description: DriverIngressConfiguration is for driver ingress specific + configuration parameters. + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs + of annotations that might be added to the ingress object. + i.e. specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL certificates + to the ingress object items: + description: IngressTLS describes the transport layer security + associated with an ingress. properties: - args: - items: - type: string - type: array - command: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. items: type: string type: array - env: + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + ingressURLFormat: + description: IngressURLFormat is the URL for the ingress. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs + of annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLables is a map of key,value pairs of labels + that might be added to the service object. + type: object + servicePort: + description: ServicePort allows configuring the port at service + level that might be different from the targetPort. + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + type: string + serviceType: + description: ServiceType allows configuring the type of the + service. Defaults to ClusterIP. + type: string + required: + - servicePort + - servicePortName + type: object + type: array + dynamicAllocation: + description: |- + DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes + scheduler backend since Spark 3.0. + properties: + enabled: + description: Enabled controls whether dynamic allocation is enabled + or not. + type: boolean + initialExecutors: + description: |- + InitialExecutors is the initial number of executors to request. If .spec.executor.instances + is also set, the initial number of executors is set to the bigger of that and this option. + format: int32 + type: integer + maxExecutors: + description: MaxExecutors is the upper bound for the number of + executors if dynamic allocation is enabled. + format: int32 + type: integer + minExecutors: + description: MinExecutors is the lower bound for the number of + executors if dynamic allocation is enabled. + format: int32 + type: integer + shuffleTrackingTimeout: + description: |- + ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding + shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled). + format: int64 + type: integer + type: object + executor: + description: Executor is the executor specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity settings + for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: - name: - type: string - value: - type: string - valueFrom: + preference: + description: A node selector term, associated with + the corresponding weight. properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object + - preference + - weight type: object type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - value: + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - required: - - port - type: object - tcpSocket: - properties: - host: + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. format: int32 type: integer - name: - type: string - protocol: - type: string required: - - containerPort - - protocol + - podAffinityTerm + - weight type: object type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. type: string required: - - mountPath - - name + - topologyKey type: object type: array - workingDir: - type: string - required: - - name - type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name type: object - type: array - type: object - failureRetries: - format: int32 - type: integer - hadoopConf: - additionalProperties: - type: string - type: object - hadoopConfigMap: - type: string - image: - type: string - imagePullPolicy: - type: string - imagePullSecrets: - items: - type: string - type: array - mainApplicationFile: - type: string - mainClass: - type: string - memoryOverheadFactor: - type: string - mode: - enum: - - cluster - - client - type: string - monitoring: - properties: - exposeDriverMetrics: - type: boolean - exposeExecutorMetrics: - type: boolean - metricsProperties: - type: string - metricsPropertiesFile: + type: object + annotations: + additionalProperties: type: string - prometheus: + description: Annotations are the Kubernetes annotations to be + added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to which + the named objects should be mounted to. properties: - configFile: - type: string - configuration: - type: string - jmxExporterJar: + name: type: string - port: - format: int32 - maximum: 49151 - minimum: 1024 - type: integer - portName: + path: type: string required: - - jmxExporterJar + - name + - path type: object - required: - - exposeDriverMetrics - - exposeExecutorMetrics - type: object - nodeSelector: - additionalProperties: + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional type: string - type: object - proxyUser: - type: string - pythonVersion: - enum: - - "2" - - "3" - type: string - restartPolicy: - properties: - onFailureRetries: - format: int32 - minimum: 0 - type: integer - onFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - onSubmissionFailureRetries: - format: int32 - minimum: 0 - type: integer - onSubmissionFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - type: - enum: - - Never - - Always - - OnFailure - type: string - type: object - retryInterval: - format: int64 - type: integer - sparkConf: - additionalProperties: + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the executors. + Maps to `spark.kubernetes.executor.request.cores` that is available since Spark 2.4. type: string - type: object - sparkConfigMap: - type: string - sparkUIOptions: - properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressAnnotations: - additionalProperties: - type: string - type: object - ingressTLS: - items: - properties: - hosts: - items: - type: string - type: array - secretName: - type: string - type: object - type: array - servicePort: - format: int32 - type: integer - servicePortName: - type: string - serviceType: - type: string - type: object - driverIngressOptions: - items: + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + deleteOnTermination: + description: |- + DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. + Maps to `spark.kubernetes.executor.deleteOnTermination` that is available since Spark 3.0. + type: boolean + dnsConfig: + description: DnsConfig dns settings for the pod, following the + Kubernetes specifications. properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressURLFormat: - type: string - ingressAnnotations: - additionalProperties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: type: string - type: object - ingressTLS: + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. items: + description: PodDNSConfigOption defines DNS resolver options + of a pod. properties: - hosts: - items: - type: string - type: array - secretName: + name: + description: Required. + type: string + value: type: string type: object type: array - servicePort: - format: int32 - type: integer - servicePortName: - type: string - serviceType: - type: string - type: object - type: array - sparkVersion: - type: string - timeToLiveSeconds: - format: int64 - type: integer - type: - enum: - - Java - - Python - - Scala - - R - type: string - volumes: - items: - properties: - awsElasticBlockStore: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - azureDisk: - properties: - cachingMode: - type: string - diskName: - type: string - diskURI: - type: string - fsType: - type: string - kind: - type: string - readOnly: - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - properties: - readOnly: - type: boolean - secretName: - type: string - shareName: - type: string - required: - - secretName - - shareName - type: object - cephfs: - properties: - monitors: - items: - type: string - type: array - path: - type: string - readOnly: - type: boolean - secretFile: - type: string - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - monitors - type: object - cinder: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeID: - type: string - required: - - volumeID - type: object - configMap: - properties: - defaultMode: - format: int32 - type: integer - items: - items: + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add to the + pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. properties: key: + description: The key to select. type: string - mode: - format: int32 - type: integer - path: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean required: - key - - path type: object - type: array - name: + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request for + driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the Kubernetes + specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: type: string - optional: - type: boolean - type: object - csi: - properties: - driver: + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host networking + for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides Spec.Image + if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - fsType: + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: type: string - nodePublishSecretRef: + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. properties: name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name type: object - readOnly: - type: boolean - volumeAttributes: - additionalProperties: - type: string - type: object - required: - - driver - type: object - downwardAPI: - properties: - defaultMode: - format: int32 - type: integer + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. items: - items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks properties: - fieldRef: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. properties: - apiVersion: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. type: string - fieldPath: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - fieldPath + - port type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. properties: - containerName: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string - divisor: + port: anyOf: - type: integer - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true - resource: - type: string required: - - resource + - port type: object - required: - - path type: object - type: array - type: object - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - spec: - properties: - accessModes: - items: + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. type: string - type: array - resources: - properties: - requests: + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes properties: - storage: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string - type: object - type: object - storageClassName: - type: string - type: object - type: object - type: object - fc: - properties: - fsType: - type: string - lun: - format: int32 - type: integer - readOnly: - type: boolean - targetWWNs: - items: - type: string - type: array - wwids: - items: - type: string - type: array - type: object - flexVolume: - properties: - driver: - type: string - fsType: - type: string - options: - additionalProperties: - type: string - type: object - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - required: - - driver - type: object - flocker: - properties: - datasetName: - type: string - datasetUUID: - type: string - type: object - gcePersistentDisk: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - pdName: - type: string - readOnly: - type: boolean - required: - - pdName - type: object - gitRepo: - properties: - directory: - type: string - repository: - type: string - revision: - type: string - required: - - repository - type: object - glusterfs: - properties: - endpoints: - type: string - path: - type: string - readOnly: - type: boolean - required: - - endpoints - - path - type: object - hostPath: - properties: - path: - type: string - type: - type: string - required: - - path - type: object - iscsi: - properties: - chapAuthDiscovery: - type: boolean - chapAuthSession: - type: boolean - fsType: - type: string - initiatorName: - type: string - iqn: - type: string - iscsiInterface: - type: string - lun: - format: int32 - type: integer - portals: - items: - type: string - type: array - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - targetPortal: - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - type: string - nfs: - properties: - path: - type: string - readOnly: - type: boolean - server: - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - properties: - claimName: - type: string - readOnly: - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - properties: - fsType: - type: string - pdID: - type: string - required: - - pdID - type: object - portworxVolume: - properties: - fsType: - type: string - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - projected: - properties: - defaultMode: - format: int32 - type: integer - sources: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path + value: + description: The header field value + type: string + required: + - name + - value type: object type: array - name: + path: + description: Path to access on the HTTP server. type: string - optional: - type: boolean - type: object - downwardAPI: - properties: - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string - optional: - type: boolean + required: + - port type: object - serviceAccountToken: + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. properties: - audience: - type: string - expirationSeconds: + seconds: + description: Seconds is the number of seconds + to sleep. format: int64 type: integer - path: + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true required: - - path + - port type: object type: object - type: array - required: - - sources - type: object - quobyte: - properties: - group: - type: string - readOnly: - type: boolean - registry: - type: string - tenant: - type: string - user: - type: string - volume: - type: string - required: - - registry - - volume - type: object - rbd: - properties: - fsType: - type: string - image: - type: string - keyring: - type: string - monitors: - items: - type: string - type: array - pool: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - image - - monitors - type: object - scaleIO: - properties: - fsType: - type: string - gateway: - type: string - protectionDomain: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - sslEnabled: - type: boolean - storageMode: - type: string - storagePool: - type: string - system: - type: string - volumeName: - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - properties: - defaultMode: - format: int32 - type: integer - items: - items: + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. properties: - key: - type: string - mode: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. format: int32 type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. type: string required: - - key - - path + - port type: object - type: array - optional: - type: boolean - secretName: - type: string - type: object - storageos: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeName: - type: string - volumeNamespace: - type: string - type: object - vsphereVolume: - properties: - fsType: + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + instances: + description: Instances is the number of executor instances. + format: int32 + minimum: 1 + type: integer + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the executors. For instance, + GC settings or other logging. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added to the + pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for the + pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory to + allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the pods + objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will be + used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add to + the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that run + along side the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed in ".spec.tolerations" + to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in ".spec.volumes" + to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + failureRetries: + description: |- + FailureRetries is the number of times to retry a failed application before giving up. + This is best effort and actual retry attempts can be >= the value specified. + format: int32 + type: integer + hadoopConf: + additionalProperties: + type: string + description: |- + HadoopConf carries user-specified Hadoop configuration properties as they would use the the "--conf" option + in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop + configuration properties. + type: object + hadoopConfigMap: + description: |- + HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. + The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + image: + description: |- + Image is the container image for the driver, executor, and init-container. Any custom container images for the + driver, executor, or init-container takes precedence over this. + type: string + imagePullPolicy: + description: ImagePullPolicy is the image pull policy for the driver, + executor, and init-container. + type: string + imagePullSecrets: + description: ImagePullSecrets is the list of image-pull secrets. + items: + type: string + type: array + mainApplicationFile: + description: MainFile is the path to a bundled JAR, Python, or R file + of the application. + type: string + mainClass: + description: |- + MainClass is the fully-qualified main class of the Spark application. + This only applies to Java/Scala Spark applications. + type: string + memoryOverheadFactor: + description: |- + This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. + For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will + be overridden by `Spec.Driver.MemoryOverhead` and `Spec.Executor.MemoryOverhead` if they are set. + type: string + mode: + description: Mode is the deployment mode of the Spark application. + enum: + - cluster + - client + type: string + monitoring: + description: Monitoring configures how monitoring is handled. + properties: + exposeDriverMetrics: + description: ExposeDriverMetrics specifies whether to expose metrics + on the driver. + type: boolean + exposeExecutorMetrics: + description: ExposeExecutorMetrics specifies whether to expose + metrics on the executors. + type: boolean + metricsProperties: + description: |- + MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. + If not specified, the content in spark-docker/conf/metrics.properties will be used. + type: string + metricsPropertiesFile: + description: |- + MetricsPropertiesFile is the container local path of file metrics.properties for configuring + the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used. + type: string + prometheus: + description: Prometheus is for configuring the Prometheus JMX + exporter. + properties: + configFile: + description: |- + ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. + ConfigFile takes precedence over Configuration, which is shown below. + type: string + configuration: + description: |- + Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. + If not specified, the content in spark-docker/conf/prometheus.yaml will be used. + Configuration has no effect if ConfigFile is set. + type: string + jmxExporterJar: + description: JmxExporterJar is the path to the Prometheus + JMX exporter jar in the container. + type: string + port: + description: |- + Port is the port of the HTTP server run by the Prometheus JMX exporter. + If not specified, 8090 will be used as the default. + format: int32 + maximum: 49151 + minimum: 1024 + type: integer + portName: + description: |- + PortName is the port name of prometheus JMX exporter port. + If not specified, jmx-exporter will be used as the default. + type: string + required: + - jmxExporterJar + type: object + required: + - exposeDriverMetrics + - exposeExecutorMetrics + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). + This field will be deprecated in future versions (at SparkApplicationSpec level). + type: object + proxyUser: + description: |- + ProxyUser specifies the user to impersonate when submitting the application. + It maps to the command-line flag "--proxy-user" in spark-submit. + type: string + pythonVersion: + description: |- + This sets the major Python version of the docker + image used to run the driver and executor containers. Can either be 2 or 3, default 2. + enum: + - "2" + - "3" + type: string + restartPolicy: + description: RestartPolicy defines the policy on if and in which conditions + the controller should restart an application. + properties: + onFailureRetries: + description: OnFailureRetries the number of times to retry running + an application before giving up. + format: int32 + minimum: 0 + type: integer + onFailureRetryInterval: + description: OnFailureRetryInterval is the interval in seconds + between retries on failed runs. + format: int64 + minimum: 1 + type: integer + onSubmissionFailureRetries: + description: |- + OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. + This is best effort and actual retry attempts can be >= the value specified due to caching. + These are required if RestartPolicy is OnFailure. + format: int32 + minimum: 0 + type: integer + onSubmissionFailureRetryInterval: + description: OnSubmissionFailureRetryInterval is the interval + in seconds between retries on failed submissions. + format: int64 + minimum: 1 + type: integer + type: + description: Type specifies the RestartPolicyType. + enum: + - Never + - Always + - OnFailure + type: string + type: object + retryInterval: + description: RetryInterval is the unit of intervals in seconds between + submission retries. + format: int64 + type: integer + sparkConf: + additionalProperties: + type: string + description: |- + SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in + spark-submit. + type: object + sparkConfigMap: + description: |- + SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. + The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + sparkUIOptions: + description: SparkUIOptions allows configuring the Service and the + Ingress to expose the sparkUI + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs of + annotations that might be added to the ingress object. i.e. + specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL certificates + to the ingress object + items: + description: IngressTLS describes the transport layer security + associated with an ingress. + properties: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs of + annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLables is a map of key,value pairs of labels + that might be added to the service object. + type: object + servicePort: + description: |- + ServicePort allows configuring the port at service level that might be different from the targetPort. + TargetPort should be the same as the one defined in spark.ui.port + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + Defaults to spark-driver-ui-port. + type: string + serviceType: + description: ServiceType allows configuring the type of the service. + Defaults to ClusterIP. + type: string + type: object + sparkVersion: + description: SparkVersion is the version of Spark the application + uses. + type: string + timeToLiveSeconds: + description: |- + TimeToLiveSeconds defines the Time-To-Live (TTL) duration in seconds for this SparkApplication + after its termination. + The SparkApplication object will be garbage collected if the current time is more than the + TimeToLiveSeconds since its termination. + format: int64 + type: integer + type: + description: Type tells the type of the Spark application. + enum: + - Java + - Python + - Scala + - R + type: string + volumes: + description: Volumes is the list of Kubernetes volumes that can be + mounted by the driver and/or executors. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: type: string - storagePolicyID: + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: type: string - storagePolicyName: + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: type: string - volumePath: + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - required: - - driver - - executor - - sparkVersion - - type - type: object - status: - properties: - applicationState: - properties: - errorMessage: - type: string - state: + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object required: - - state - type: object - driverInfo: - properties: - podName: - type: string - webUIAddress: - type: string - webUIIngressAddress: - type: string - webUIIngressName: - type: string - webUIPort: - format: int32 - type: integer - webUIServiceName: - type: string + - name type: object - executionAttempts: - format: int32 - type: integer - executorState: - additionalProperties: + type: array + required: + - driver + - executor + - sparkVersion + - type + type: object + status: + description: SparkApplicationStatus describes the current status of a + Spark application. + properties: + applicationState: + description: AppState tells the overall application state. + properties: + errorMessage: type: string - type: object - lastSubmissionAttemptTime: - format: date-time - nullable: true - type: string - sparkApplicationId: - type: string - submissionAttempts: - format: int32 - type: integer - submissionID: - type: string - terminationTime: - format: date-time - nullable: true + state: + description: ApplicationStateType represents the type of the current + state of an application. + type: string + required: + - state + type: object + driverInfo: + description: DriverInfo has information about the driver. + properties: + podName: + type: string + webUIAddress: + type: string + webUIIngressAddress: + type: string + webUIIngressName: + description: Ingress Details if an ingress for the UI was created. + type: string + webUIPort: + description: UI Details for the UI created via ClusterIP service + accessible from within the cluster. + format: int32 + type: integer + webUIServiceName: + type: string + type: object + executionAttempts: + description: |- + ExecutionAttempts is the total number of attempts to run a submitted application to completion. + Incremented upon each attempted run of the application and reset upon invalidation. + format: int32 + type: integer + executorState: + additionalProperties: + description: ExecutorState tells the current state of an executor. type: string - required: - - driverInfo - type: object - required: - - metadata - - spec - type: object -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] + description: ExecutorState records the state of executors by executor + Pod names. + type: object + lastSubmissionAttemptTime: + description: LastSubmissionAttemptTime is the time for the last application + submission attempt. + format: date-time + nullable: true + type: string + sparkApplicationId: + description: SparkApplicationID is set by the spark-distribution(via + spark.app.id config) on the driver and executor pods + type: string + submissionAttempts: + description: |- + SubmissionAttempts is the total number of attempts to submit an application to run. + Incremented upon each attempted submission of the application and reset upon invalidation and rerun. + format: int32 + type: integer + submissionID: + description: SubmissionID is a unique ID of the current submission + of the application. + type: string + terminationTime: + description: CompletionTime is the time when the application runs + to completion if it does. + format: date-time + nullable: true + type: string + required: + - driverInfo + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/config/crd/bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml new file mode 100644 index 0000000000..b37b7a0008 --- /dev/null +++ b/config/crd/bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -0,0 +1,11611 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 + controller-gen.kubebuilder.io/version: v0.15.0 + name: scheduledsparkapplications.sparkoperator.k8s.io +spec: + group: sparkoperator.k8s.io + names: + kind: ScheduledSparkApplication + listKind: ScheduledSparkApplicationList + plural: scheduledsparkapplications + shortNames: + - scheduledsparkapp + singular: scheduledsparkapplication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.schedule + name: Schedule + type: string + - jsonPath: .spec.suspend + name: Suspend + type: string + - jsonPath: .status.lastRun + name: Last Run + type: date + - jsonPath: .status.lastRunName + name: Last Run Name + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + concurrencyPolicy: + description: ConcurrencyPolicy is the policy governing concurrent + SparkApplication runs. + type: string + failedRunHistoryLimit: + description: |- + FailedRunHistoryLimit is the number of past failed runs of the application to keep. + Defaults to 1. + format: int32 + type: integer + schedule: + description: Schedule is a cron schedule on which the application + should run. + type: string + successfulRunHistoryLimit: + description: |- + SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. + Defaults to 1. + format: int32 + type: integer + suspend: + description: |- + Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. + Defaults to false. + type: boolean + template: + description: Template is a template from which SparkApplication instances + can be created. + properties: + arguments: + description: Arguments is a list of arguments to be passed to + the application. + items: + type: string + type: array + batchScheduler: + description: BatchScheduler configures which batch scheduler will + be used for scheduling + type: string + batchSchedulerOptions: + description: BatchSchedulerOptions provides fine-grained control + on how to batch scheduling. + properties: + priorityClassName: + description: PriorityClassName stands for the name of k8s + PriorityClass resource, it's being used in Volcano batch + scheduler. + type: string + queue: + description: Queue stands for the resource queue which the + application belongs to, it's being used in Volcano batch + scheduler. + type: string + resources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. + If specified, volcano scheduler will consider it as the resources requested. + type: object + type: object + deps: + description: Deps captures all possible types of dependencies + of a Spark application. + properties: + excludePackages: + description: |- + ExcludePackages is a list of "groupId:artifactId", to exclude while resolving the + dependencies provided in Packages to avoid dependency conflicts. + items: + type: string + type: array + files: + description: Files is a list of files the Spark application + depends on. + items: + type: string + type: array + jars: + description: Jars is a list of JAR files the Spark application + depends on. + items: + type: string + type: array + packages: + description: |- + Packages is a list of maven coordinates of jars to include on the driver and executor + classpaths. This will search the local maven repo, then maven central and any additional + remote repositories given by the "repositories" option. + Each package should be of the form "groupId:artifactId:version". + items: + type: string + type: array + pyFiles: + description: PyFiles is a list of Python files the Spark application + depends on. + items: + type: string + type: array + repositories: + description: |- + Repositories is a list of additional remote repositories to search for the maven coordinate + given with the "packages" option. + items: + type: string + type: array + type: object + driver: + description: Driver is the driver specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity + settings for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + description: Annotations are the Kubernetes annotations to + be added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to + which the named objects should be mounted to. + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional + type: string + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the driver. + Maps to `spark.kubernetes.driver.request.cores` that is available since Spark 3.0. + type: string + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + dnsConfig: + description: DnsConfig dns settings for the pod, following + the Kubernetes specifications. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add + to the pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request + for driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the + Kubernetes specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host + networking for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides + Spec.Image if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the driver. For instance, + GC settings or other logging. + type: string + kubernetesMaster: + description: |- + KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and + other Kubernetes resources. Default to https://kubernetes.default.svc. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added + to the pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for + the pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory + to allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podName: + description: |- + PodName is the name of the driver pod that the user creates. This is used for the + in-cluster client mode in which the user creates a client pod where the driver of + the user application runs. It's an error to set this field if Mode is not + in-cluster-client. + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + type: string + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the + pods objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will + be used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add + to the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: |- + ServiceAnnotations defines the annotations to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + serviceLabels: + additionalProperties: + type: string + description: |- + ServiceLabels defines the labels to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that + run along side the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed + in ".spec.tolerations" to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in + ".spec.volumes" to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + driverIngressOptions: + description: DriverIngressOptions allows configuring the Service + and the Ingress to expose ports inside Spark Driver + items: + description: DriverIngressConfiguration is for driver ingress + specific configuration parameters. + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs + of annotations that might be added to the ingress object. + i.e. specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL + certificates to the ingress object + items: + description: IngressTLS describes the transport layer + security associated with an ingress. + properties: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + ingressURLFormat: + description: IngressURLFormat is the URL for the ingress. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs + of annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLables is a map of key,value pairs of + labels that might be added to the service object. + type: object + servicePort: + description: ServicePort allows configuring the port at + service level that might be different from the targetPort. + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + type: string + serviceType: + description: ServiceType allows configuring the type of + the service. Defaults to ClusterIP. + type: string + required: + - servicePort + - servicePortName + type: object + type: array + dynamicAllocation: + description: |- + DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes + scheduler backend since Spark 3.0. + properties: + enabled: + description: Enabled controls whether dynamic allocation is + enabled or not. + type: boolean + initialExecutors: + description: |- + InitialExecutors is the initial number of executors to request. If .spec.executor.instances + is also set, the initial number of executors is set to the bigger of that and this option. + format: int32 + type: integer + maxExecutors: + description: MaxExecutors is the upper bound for the number + of executors if dynamic allocation is enabled. + format: int32 + type: integer + minExecutors: + description: MinExecutors is the lower bound for the number + of executors if dynamic allocation is enabled. + format: int32 + type: integer + shuffleTrackingTimeout: + description: |- + ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding + shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled). + format: int64 + type: integer + type: object + executor: + description: Executor is the executor specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity + settings for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + description: Annotations are the Kubernetes annotations to + be added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to + which the named objects should be mounted to. + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional + type: string + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the executors. + Maps to `spark.kubernetes.executor.request.cores` that is available since Spark 2.4. + type: string + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + deleteOnTermination: + description: |- + DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. + Maps to `spark.kubernetes.executor.deleteOnTermination` that is available since Spark 3.0. + type: boolean + dnsConfig: + description: DnsConfig dns settings for the pod, following + the Kubernetes specifications. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver + options of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add + to the pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request + for driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the + Kubernetes specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host + networking for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides + Spec.Image if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + instances: + description: Instances is the number of executor instances. + format: int32 + minimum: 1 + type: integer + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the executors. For instance, + GC settings or other logging. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added + to the pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to + perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the + container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for + the pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory + to allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the + pods objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will + be used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add + to the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that + run along side the main Spark container. + items: + description: A single application container that you want + to run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the + request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP + server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port + in a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external + port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that + applies to the container. + type: string + role: + description: Role is a SELinux role label that + applies to the container. + type: string + type: + description: Type is a SELinux type label that + applies to the container. + type: string + user: + description: User is a SELinux user label that + applies to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving + a GRPC port. + properties: + port: + description: Port number of the gRPC service. + Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a + raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of + the container that the device will be mapped + to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a + Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed + in ".spec.tolerations" to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in + ".spec.volumes" to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + failureRetries: + description: |- + FailureRetries is the number of times to retry a failed application before giving up. + This is best effort and actual retry attempts can be >= the value specified. + format: int32 + type: integer + hadoopConf: + additionalProperties: + type: string + description: |- + HadoopConf carries user-specified Hadoop configuration properties as they would use the the "--conf" option + in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop + configuration properties. + type: object + hadoopConfigMap: + description: |- + HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. + The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + image: + description: |- + Image is the container image for the driver, executor, and init-container. Any custom container images for the + driver, executor, or init-container takes precedence over this. + type: string + imagePullPolicy: + description: ImagePullPolicy is the image pull policy for the + driver, executor, and init-container. + type: string + imagePullSecrets: + description: ImagePullSecrets is the list of image-pull secrets. + items: + type: string + type: array + mainApplicationFile: + description: MainFile is the path to a bundled JAR, Python, or + R file of the application. + type: string + mainClass: + description: |- + MainClass is the fully-qualified main class of the Spark application. + This only applies to Java/Scala Spark applications. + type: string + memoryOverheadFactor: + description: |- + This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. + For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will + be overridden by `Spec.Driver.MemoryOverhead` and `Spec.Executor.MemoryOverhead` if they are set. + type: string + mode: + description: Mode is the deployment mode of the Spark application. + enum: + - cluster + - client + type: string + monitoring: + description: Monitoring configures how monitoring is handled. + properties: + exposeDriverMetrics: + description: ExposeDriverMetrics specifies whether to expose + metrics on the driver. + type: boolean + exposeExecutorMetrics: + description: ExposeExecutorMetrics specifies whether to expose + metrics on the executors. + type: boolean + metricsProperties: + description: |- + MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. + If not specified, the content in spark-docker/conf/metrics.properties will be used. + type: string + metricsPropertiesFile: + description: |- + MetricsPropertiesFile is the container local path of file metrics.properties for configuring + the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used. + type: string + prometheus: + description: Prometheus is for configuring the Prometheus + JMX exporter. + properties: + configFile: + description: |- + ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. + ConfigFile takes precedence over Configuration, which is shown below. + type: string + configuration: + description: |- + Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. + If not specified, the content in spark-docker/conf/prometheus.yaml will be used. + Configuration has no effect if ConfigFile is set. + type: string + jmxExporterJar: + description: JmxExporterJar is the path to the Prometheus + JMX exporter jar in the container. + type: string + port: + description: |- + Port is the port of the HTTP server run by the Prometheus JMX exporter. + If not specified, 8090 will be used as the default. + format: int32 + maximum: 49151 + minimum: 1024 + type: integer + portName: + description: |- + PortName is the port name of prometheus JMX exporter port. + If not specified, jmx-exporter will be used as the default. + type: string + required: + - jmxExporterJar + type: object + required: + - exposeDriverMetrics + - exposeExecutorMetrics + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). + This field will be deprecated in future versions (at SparkApplicationSpec level). + type: object + proxyUser: + description: |- + ProxyUser specifies the user to impersonate when submitting the application. + It maps to the command-line flag "--proxy-user" in spark-submit. + type: string + pythonVersion: + description: |- + This sets the major Python version of the docker + image used to run the driver and executor containers. Can either be 2 or 3, default 2. + enum: + - "2" + - "3" + type: string + restartPolicy: + description: RestartPolicy defines the policy on if and in which + conditions the controller should restart an application. + properties: + onFailureRetries: + description: OnFailureRetries the number of times to retry + running an application before giving up. + format: int32 + minimum: 0 + type: integer + onFailureRetryInterval: + description: OnFailureRetryInterval is the interval in seconds + between retries on failed runs. + format: int64 + minimum: 1 + type: integer + onSubmissionFailureRetries: + description: |- + OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. + This is best effort and actual retry attempts can be >= the value specified due to caching. + These are required if RestartPolicy is OnFailure. + format: int32 + minimum: 0 + type: integer + onSubmissionFailureRetryInterval: + description: OnSubmissionFailureRetryInterval is the interval + in seconds between retries on failed submissions. + format: int64 + minimum: 1 + type: integer + type: + description: Type specifies the RestartPolicyType. + enum: + - Never + - Always + - OnFailure + type: string + type: object + retryInterval: + description: RetryInterval is the unit of intervals in seconds + between submission retries. + format: int64 + type: integer + sparkConf: + additionalProperties: + type: string + description: |- + SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in + spark-submit. + type: object + sparkConfigMap: + description: |- + SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. + The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + sparkUIOptions: + description: SparkUIOptions allows configuring the Service and + the Ingress to expose the sparkUI + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs + of annotations that might be added to the ingress object. + i.e. specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL + certificates to the ingress object + items: + description: IngressTLS describes the transport layer security + associated with an ingress. + properties: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs + of annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLables is a map of key,value pairs of + labels that might be added to the service object. + type: object + servicePort: + description: |- + ServicePort allows configuring the port at service level that might be different from the targetPort. + TargetPort should be the same as the one defined in spark.ui.port + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + Defaults to spark-driver-ui-port. + type: string + serviceType: + description: ServiceType allows configuring the type of the + service. Defaults to ClusterIP. + type: string + type: object + sparkVersion: + description: SparkVersion is the version of Spark the application + uses. + type: string + timeToLiveSeconds: + description: |- + TimeToLiveSeconds defines the Time-To-Live (TTL) duration in seconds for this SparkApplication + after its termination. + The SparkApplication object will be garbage collected if the current time is more than the + TimeToLiveSeconds since its termination. + format: int64 + type: integer + type: + description: Type tells the type of the Spark application. + enum: + - Java + - Python + - Scala + - R + type: string + volumes: + description: Volumes is the list of Kubernetes volumes that can + be mounted by the driver and/or executors. + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in + the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the + blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure managed + data disk (only in managed availability set). defaults + to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the + pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name and namespace + are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path. Must + be utf-8 encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that + is attached to a kubelet's host machine and then exposed + to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use + for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the + downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path. Must be utf-8 + encoded. The first item of the relative + path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool + associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - driver + - executor + - sparkVersion + - type + type: object + required: + - schedule + - template + type: object + status: + properties: + lastRun: + description: LastRun is the time when the last run of the application + started. + format: date-time + nullable: true + type: string + lastRunName: + description: LastRunName is the name of the SparkApplication for the + most recent run of the application. + type: string + nextRun: + description: NextRun is the time when the next run of the application + will start. + format: date-time + nullable: true + type: string + pastFailedRunNames: + description: PastFailedRunNames keeps the names of SparkApplications + for past failed runs. + items: + type: string + type: array + pastSuccessfulRunNames: + description: PastSuccessfulRunNames keeps the names of SparkApplications + for past successful runs. + items: + type: string + type: array + reason: + description: Reason tells why the ScheduledSparkApplication is in + the particular ScheduleState. + type: string + scheduleState: + description: ScheduleState is the current scheduling state of the + application. + type: string + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/sparkoperator.k8s.io_sparkapplications.yaml b/config/crd/bases/sparkoperator.k8s.io_sparkapplications.yaml new file mode 100644 index 0000000000..c23d69264a --- /dev/null +++ b/config/crd/bases/sparkoperator.k8s.io_sparkapplications.yaml @@ -0,0 +1,11553 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 + controller-gen.kubebuilder.io/version: v0.15.0 + name: sparkapplications.sparkoperator.k8s.io +spec: + group: sparkoperator.k8s.io + names: + kind: SparkApplication + listKind: SparkApplicationList + plural: sparkapplications + shortNames: + - sparkapp + singular: sparkapplication + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.applicationState.state + name: Status + type: string + - jsonPath: .status.executionAttempts + name: Attempts + type: string + - jsonPath: .status.lastSubmissionAttemptTime + name: Start + type: string + - jsonPath: .status.terminationTime + name: Finish + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: SparkApplication represents a Spark application running on and + using Kubernetes as a cluster manager. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + SparkApplicationSpec describes the specification of a Spark application using Kubernetes as a cluster manager. + It carries every pieces of information a spark-submit command takes and recognizes. + properties: + arguments: + description: Arguments is a list of arguments to be passed to the + application. + items: + type: string + type: array + batchScheduler: + description: BatchScheduler configures which batch scheduler will + be used for scheduling + type: string + batchSchedulerOptions: + description: BatchSchedulerOptions provides fine-grained control on + how to batch scheduling. + properties: + priorityClassName: + description: PriorityClassName stands for the name of k8s PriorityClass + resource, it's being used in Volcano batch scheduler. + type: string + queue: + description: Queue stands for the resource queue which the application + belongs to, it's being used in Volcano batch scheduler. + type: string + resources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. + If specified, volcano scheduler will consider it as the resources requested. + type: object + type: object + deps: + description: Deps captures all possible types of dependencies of a + Spark application. + properties: + excludePackages: + description: |- + ExcludePackages is a list of "groupId:artifactId", to exclude while resolving the + dependencies provided in Packages to avoid dependency conflicts. + items: + type: string + type: array + files: + description: Files is a list of files the Spark application depends + on. + items: + type: string + type: array + jars: + description: Jars is a list of JAR files the Spark application + depends on. + items: + type: string + type: array + packages: + description: |- + Packages is a list of maven coordinates of jars to include on the driver and executor + classpaths. This will search the local maven repo, then maven central and any additional + remote repositories given by the "repositories" option. + Each package should be of the form "groupId:artifactId:version". + items: + type: string + type: array + pyFiles: + description: PyFiles is a list of Python files the Spark application + depends on. + items: + type: string + type: array + repositories: + description: |- + Repositories is a list of additional remote repositories to search for the maven coordinate + given with the "packages" option. + items: + type: string + type: array + type: object + driver: + description: Driver is the driver specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity settings + for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + description: Annotations are the Kubernetes annotations to be + added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to which + the named objects should be mounted to. + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional + type: string + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the driver. + Maps to `spark.kubernetes.driver.request.cores` that is available since Spark 3.0. + type: string + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + dnsConfig: + description: DnsConfig dns settings for the pod, following the + Kubernetes specifications. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options + of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add to the + pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request for + driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the Kubernetes + specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host networking + for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides Spec.Image + if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the driver. For instance, + GC settings or other logging. + type: string + kubernetesMaster: + description: |- + KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and + other Kubernetes resources. Default to https://kubernetes.default.svc. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added to the + pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for the + pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory to + allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podName: + description: |- + PodName is the name of the driver pod that the user creates. This is used for the + in-cluster client mode in which the user creates a client pod where the driver of + the user application runs. It's an error to set this field if Mode is not + in-cluster-client. + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' + type: string + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the pods + objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will be + used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add to + the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: |- + ServiceAnnotations defines the annotations to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + serviceLabels: + additionalProperties: + type: string + description: |- + ServiceLabels defines the labels to be added to the Kubernetes headless service used by + executors to connect to the driver. + type: object + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that run + along side the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed in ".spec.tolerations" + to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in ".spec.volumes" + to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + driverIngressOptions: + description: DriverIngressOptions allows configuring the Service and + the Ingress to expose ports inside Spark Driver + items: + description: DriverIngressConfiguration is for driver ingress specific + configuration parameters. + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs + of annotations that might be added to the ingress object. + i.e. specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL certificates + to the ingress object + items: + description: IngressTLS describes the transport layer security + associated with an ingress. + properties: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + ingressURLFormat: + description: IngressURLFormat is the URL for the ingress. + type: string + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs + of annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLables is a map of key,value pairs of labels + that might be added to the service object. + type: object + servicePort: + description: ServicePort allows configuring the port at service + level that might be different from the targetPort. + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + type: string + serviceType: + description: ServiceType allows configuring the type of the + service. Defaults to ClusterIP. + type: string + required: + - servicePort + - servicePortName + type: object + type: array + dynamicAllocation: + description: |- + DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes + scheduler backend since Spark 3.0. + properties: + enabled: + description: Enabled controls whether dynamic allocation is enabled + or not. + type: boolean + initialExecutors: + description: |- + InitialExecutors is the initial number of executors to request. If .spec.executor.instances + is also set, the initial number of executors is set to the bigger of that and this option. + format: int32 + type: integer + maxExecutors: + description: MaxExecutors is the upper bound for the number of + executors if dynamic allocation is enabled. + format: int32 + type: integer + minExecutors: + description: MinExecutors is the lower bound for the number of + executors if dynamic allocation is enabled. + format: int32 + type: integer + shuffleTrackingTimeout: + description: |- + ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding + shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled). + format: int64 + type: integer + type: object + executor: + description: Executor is the executor specification. + properties: + affinity: + description: Affinity specifies the affinity/anti-affinity settings + for the pod. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + Also, MatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. + Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. + This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate. + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + description: Annotations are the Kubernetes annotations to be + added to the pod. + type: object + configMaps: + description: ConfigMaps carries information of other ConfigMaps + to add to the pod. + items: + description: NamePath is a pair of a name and a path to which + the named objects should be mounted to. + properties: + name: + type: string + path: + type: string + required: + - name + - path + type: object + type: array + coreLimit: + description: |- + CoreLimit specifies a hard limit on CPU cores for the pod. + Optional + type: string + coreRequest: + description: |- + CoreRequest is the physical CPU core request for the executors. + Maps to `spark.kubernetes.executor.request.cores` that is available since Spark 2.4. + type: string + cores: + description: Cores maps to `spark.driver.cores` or `spark.executor.cores` + for the driver and executors, respectively. + format: int32 + minimum: 1 + type: integer + deleteOnTermination: + description: |- + DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. + Maps to `spark.kubernetes.executor.deleteOnTermination` that is available since Spark 3.0. + type: boolean + dnsConfig: + description: DnsConfig dns settings for the pod, following the + Kubernetes specifications. + properties: + nameservers: + description: |- + A list of DNS name server IP addresses. + This will be appended to the base nameservers generated from DNSPolicy. + Duplicated nameservers will be removed. + items: + type: string + type: array + options: + description: |- + A list of DNS resolver options. + This will be merged with the base options generated from DNSPolicy. + Duplicated entries will be removed. Resolution options given in Options + will override those that appear in the base DNSPolicy. + items: + description: PodDNSConfigOption defines DNS resolver options + of a pod. + properties: + name: + description: Required. + type: string + value: + type: string + type: object + type: array + searches: + description: |- + A list of DNS search domains for host-name lookup. + This will be appended to the base search paths generated from DNSPolicy. + Duplicated search paths will be removed. + items: + type: string + type: array + type: object + env: + description: Env carries the environment variables to add to the + pod. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + envSecretKeyRefs: + additionalProperties: + description: NameKey represents the name and key of a SecretKeyRef. + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + description: |- + EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. + Deprecated. Consider using `env` instead. + type: object + envVars: + additionalProperties: + type: string + description: |- + EnvVars carries the environment variables to add to the pod. + Deprecated. Consider using `env` instead. + type: object + gpu: + description: GPU specifies GPU requirement for the pod. + properties: + name: + description: 'Name is GPU resource name, such as: nvidia.com/gpu + or amd.com/gpu' + type: string + quantity: + description: Quantity is the number of GPUs to request for + driver or executor. + format: int64 + type: integer + required: + - name + - quantity + type: object + hostAliases: + description: HostAliases settings for the pod, following the Kubernetes + specifications. + items: + description: |- + HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the + pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + type: object + type: array + hostNetwork: + description: HostNetwork indicates whether to request host networking + for the pod or not. + type: boolean + image: + description: Image is the container image to use. Overrides Spec.Image + if set. + type: string + initContainers: + description: InitContainers is a list of init-containers that + run to completion before the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + instances: + description: Instances is the number of executor instances. + format: int32 + minimum: 1 + type: integer + javaOptions: + description: |- + JavaOptions is a string of extra JVM options to pass to the executors. For instance, + GC settings or other logging. + type: string + labels: + additionalProperties: + type: string + description: Labels are the Kubernetes labels to be added to the + pod. + type: object + lifecycle: + description: Lifecycle for running preStop or postStart commands + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that the container + should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + memory: + description: Memory is the amount of memory to request for the + pod. + type: string + memoryOverhead: + description: MemoryOverhead is the amount of off-heap memory to + allocate in cluster mode, in MiB unless otherwise specified. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at SparkApplication level (which will be deprecated). + type: object + podSecurityContext: + description: PodSecurityContext specifies the PodSecurityContext + to apply. + properties: + fsGroup: + description: |- + A special supplemental group that applies to all containers in a pod. + Some volume types allow the Kubelet to change the ownership of that volume + to be owned by the pod: + + + 1. The owning GID will be the FSGroup + 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + 3. The permission bits are OR'd with rw-rw---- + + + If unset, the Kubelet will not modify the ownership and permissions of any volume. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. This field will only apply to + volume types which support fsGroup based ownership(and permissions). + It will have no effect on ephemeral volume types such as: secret, configmaps + and emptydir. + Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. + Note that this field cannot be set when spec.os.name is windows. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in SecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence + for that container. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to all containers. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in SecurityContext. If set in + both SecurityContext and PodSecurityContext, the value specified in SecurityContext + takes precedence for that container. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsGroup (if specified), and group memberships + defined in the container image for the uid of the container process. If unspecified, + no additional groups are added to any container. Note that group memberships + defined in the container image for the uid of the container process are still effective, + even if they are not included in this list. + Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: |- + Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + sysctls (by the container runtime) might fail to launch. + Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options within a container's SecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + ports: + description: Ports settings for the pods, following the Kubernetes + specifications. + items: + description: Port represents the port definition in the pods + objects. + properties: + containerPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + - name + - protocol + type: object + type: array + schedulerName: + description: SchedulerName specifies the scheduler that will be + used for scheduling + type: string + secrets: + description: Secrets carries information of secrets to add to + the pod. + items: + description: SecretInfo captures information of a secret. + properties: + name: + type: string + path: + type: string + secretType: + description: SecretType tells the type of a secret. + type: string + required: + - name + - path + - secretType + type: object + type: array + securityContext: + description: SecurityContext specifies the container's SecurityContext + to apply. + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccount: + description: ServiceAccount is the name of the custom Kubernetes + service account used by the pod. + type: string + shareProcessNamespace: + description: ShareProcessNamespace settings for the pod, following + the Kubernetes specifications. + type: boolean + sidecars: + description: Sidecars is a list of sidecar containers that run + along side the main Spark container. + items: + description: A single application container that you want to + run within a pod. + properties: + args: + description: |- + Arguments to the entrypoint. + The container image's CMD is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + command: + description: |- + Entrypoint array. Not executed within a shell. + The container image's ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + of whether the variable exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + items: + type: string + type: array + env: + description: |- + List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: |- + List of sources to populate environment variables in the container. + The keys defined within a source must be a C_IDENTIFIER. All invalid keys + will be reported as an event when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Cannot be updated. + items: + description: EnvFromSource represents the source of a + set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: |- + Container image name. + More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management to default or override + container images in workload controllers like Deployments and StatefulSets. + type: string + imagePullPolicy: + description: |- + Image pull policy. + One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + type: string + lifecycle: + description: |- + Actions that the management system should take in response to container lifecycle events. + Cannot be updated. + properties: + postStart: + description: |- + PostStart is called immediately after a container is created. If the handler fails, + the container is terminated and restarted according to its restart policy. + Other management of the container blocks until the hook completes. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: |- + PreStop is called immediately before a container is terminated due to an + API request or management event such as liveness/startup probe failure, + preemption, resource contention, etc. The handler is not called if the + container crashes or exits. The Pod's termination grace period countdown begins before the + PreStop hook is executed. Regardless of the outcome of the handler, the + container will eventually terminate within the Pod's termination grace + period (unless delayed by finalizers). Other management of the container blocks until the hook completes + or until the termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request + to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom + header to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + sleep: + description: Sleep represents the duration that + the container should sleep before being terminated. + properties: + seconds: + description: Seconds is the number of seconds + to sleep. + format: int64 + type: integer + required: + - seconds + type: object + tcpSocket: + description: |- + Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept + for the backward compatibility. There are no validation of this field and + lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect + to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: |- + Periodic probe of container liveness. + Container will be restarted if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + name: + description: |- + Name of the container specified as a DNS_LABEL. + Each container in a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: |- + List of ports to expose from the container. Not specifying a port here + DOES NOT prevent that port from being exposed. Any port which is + listening on the default "0.0.0.0" address inside a container will be + accessible from the network. + Modifying this array with strategic merge patch may corrupt the data. + For more information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in + a single container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port + to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: |- + Periodic probe of container service readiness. + Container will be removed from service endpoints if the probe fails. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource + resize policy for the container. + properties: + resourceName: + description: |- + Name of the resource to which this resource resize policy applies. + Supported values: cpu, memory. + type: string + restartPolicy: + description: |- + Restart policy to apply when specified resource is resized. + If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: |- + Compute Resources required by this container. + Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in + PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + restartPolicy: + description: |- + RestartPolicy defines the restart behavior of individual containers in a pod. + This field may only be set for init containers, and the only allowed value is "Always". + For non-init containers or when this field is not specified, + the restart behavior is defined by the Pod's restart policy and the container type. + Setting the RestartPolicy as "Always" for the init container will have the following effect: + this init container will be continually restarted on + exit until all regular containers have terminated. Once all regular + containers have completed, all init containers with restartPolicy "Always" + will be shut down. This lifecycle differs from normal init containers and + is often referred to as a "sidecar" container. Although this init + container still starts in the init container sequence, it does not wait + for the container to complete before proceeding to the next init + container. Instead, the next init container starts immediately after this + init container is started, or after any startupProbe has successfully + completed. + type: string + securityContext: + description: |- + SecurityContext defines the security options the container should be run with. + If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + properties: + allowPrivilegeEscalation: + description: |- + AllowPrivilegeEscalation controls whether a process can gain more + privileges than its parent process. This bool directly controls if + the no_new_privs flag will be set on the container process. + AllowPrivilegeEscalation is true always when the container is: + 1) run as Privileged + 2) has CAP_SYS_ADMIN + Note that this field cannot be set when spec.os.name is windows. + type: boolean + capabilities: + description: |- + The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the container runtime. + Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: |- + Run container in privileged mode. + Processes in privileged containers are essentially equivalent to root on the host. + Defaults to false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: |- + procMount denotes the type of proc mount to use for the containers. + The default is DefaultProcMount which uses the container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType feature flag to be enabled. + Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: |- + Whether this container has a read-only root filesystem. + Default is false. + Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: |- + Indicates that the container must run as a non-root user. + If true, the Kubelet will validate the image at runtime to ensure that it + does not run as UID 0 (root) and fail to start the container if it does. + If unset or false, no such validation will be performed. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: |- + The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random SELinux context for each + container. May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by this container. If seccomp options are + provided at both the pod & container level, the container options + override the pod options. + Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: |- + localhostProfile indicates a profile defined in a file on the node should be used. + The profile must be preconfigured on the node to work. + Must be a descending path, relative to the kubelet's configured seccomp profile location. + Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: |- + type indicates which kind of seccomp profile will be applied. + Valid options are: + + + Localhost - a profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile should be used. + Unconfined - no profile should be applied. + type: string + required: + - type + type: object + windowsOptions: + description: |- + The Windows specific settings applied to all containers. + If unspecified, the options from the PodSecurityContext will be used. + If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name + of the GMSA credential spec to use. + type: string + hostProcess: + description: |- + HostProcess determines if a container should be run as a 'Host Process' container. + All of a Pod's containers must have the same effective HostProcess value + (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). + In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: |- + The UserName in Windows to run the entrypoint of the container process. + Defaults to the user specified in image metadata if unspecified. + May also be set in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: |- + StartupProbe indicates that the Pod has successfully initialized. + If specified, no other probes are executed until this completes successfully. + If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + when it might take a long time to load data or warm a cache, than during steady-state operation. + This cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: |- + Command is the command line to execute inside the container, the working directory for the + command is root ('/') in the container's filesystem. The command is simply exec'd, it is + not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + a shell, you need to explicitly call out to that shell. + Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. + Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC + port. + properties: + port: + description: Port number of the gRPC service. Number + must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: |- + Service is the name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + + + If this is not specified, the default behavior is defined by gRPC. + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: |- + Host name to connect to, defaults to the pod IP. You probably want to set + "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: |- + The header field name. + This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Name or number of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: |- + Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: |- + Number of seconds after the container has started before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + periodSeconds: + description: |- + How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: |- + Minimum consecutive successes for the probe to be considered successful after having failed. + Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving + a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, + defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: |- + Number or name of the port to access on the container. + Number must be in the range 1 to 65535. + Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: |- + Optional duration in seconds the pod needs to terminate gracefully upon probe failure. + The grace period is the duration in seconds after the processes running in the pod are sent + a termination signal and the time when the processes are forcibly halted with a kill signal. + Set this value longer than the expected cleanup time for your process. + If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this + value overrides the value provided by the pod spec. + Value must be non-negative integer. The value zero indicates stop immediately via + the kill signal (no opportunity to shut down). + This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: |- + Number of seconds after which the probe times out. + Defaults to 1 second. Minimum value is 1. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + format: int32 + type: integer + type: object + stdin: + description: |- + Whether this container should allocate a buffer for stdin in the container runtime. If this + is not set, reads from stdin in the container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: |- + Whether the container runtime should close the stdin channel after it has been opened by + a single attach. When stdin is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + first client attaches to stdin, and then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container is restarted. If this + flag is false, a container processes that reads from stdin will never receive an EOF. + Default is false + type: boolean + terminationMessagePath: + description: |- + Optional: Path at which the file to which the container's termination message + will be written is mounted into the container's filesystem. + Message written is intended to be brief final status, such as an assertion failure message. + Will be truncated by the node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated. + type: string + terminationMessagePolicy: + description: |- + Indicate how the termination message should be populated. File will use the contents of + terminationMessagePath to populate the container status message on both success and failure. + FallbackToLogsOnError will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + Defaults to File. + Cannot be updated. + type: string + tty: + description: |- + Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices + to be used by the container. + items: + description: volumeDevice describes a mapping of a raw + block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the + container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: |- + Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: |- + Container's working directory. + If not specified, the container runtime's default will be used, which + might be configured in the container image. + Cannot be updated. + type: string + required: + - name + type: object + type: array + terminationGracePeriodSeconds: + description: Termination grace period seconds for the pod + format: int64 + type: integer + tolerations: + description: Tolerations specifies the tolerations listed in ".spec.tolerations" + to be applied to the pod. + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + volumeMounts: + description: VolumeMounts specifies the volumes listed in ".spec.volumes" + to mount into the main container's filesystem. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + failureRetries: + description: |- + FailureRetries is the number of times to retry a failed application before giving up. + This is best effort and actual retry attempts can be >= the value specified. + format: int32 + type: integer + hadoopConf: + additionalProperties: + type: string + description: |- + HadoopConf carries user-specified Hadoop configuration properties as they would use the the "--conf" option + in spark-submit. The SparkApplication controller automatically adds prefix "spark.hadoop." to Hadoop + configuration properties. + type: object + hadoopConfigMap: + description: |- + HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. + The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + image: + description: |- + Image is the container image for the driver, executor, and init-container. Any custom container images for the + driver, executor, or init-container takes precedence over this. + type: string + imagePullPolicy: + description: ImagePullPolicy is the image pull policy for the driver, + executor, and init-container. + type: string + imagePullSecrets: + description: ImagePullSecrets is the list of image-pull secrets. + items: + type: string + type: array + mainApplicationFile: + description: MainFile is the path to a bundled JAR, Python, or R file + of the application. + type: string + mainClass: + description: |- + MainClass is the fully-qualified main class of the Spark application. + This only applies to Java/Scala Spark applications. + type: string + memoryOverheadFactor: + description: |- + This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. + For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will + be overridden by `Spec.Driver.MemoryOverhead` and `Spec.Executor.MemoryOverhead` if they are set. + type: string + mode: + description: Mode is the deployment mode of the Spark application. + enum: + - cluster + - client + type: string + monitoring: + description: Monitoring configures how monitoring is handled. + properties: + exposeDriverMetrics: + description: ExposeDriverMetrics specifies whether to expose metrics + on the driver. + type: boolean + exposeExecutorMetrics: + description: ExposeExecutorMetrics specifies whether to expose + metrics on the executors. + type: boolean + metricsProperties: + description: |- + MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. + If not specified, the content in spark-docker/conf/metrics.properties will be used. + type: string + metricsPropertiesFile: + description: |- + MetricsPropertiesFile is the container local path of file metrics.properties for configuring + the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used. + type: string + prometheus: + description: Prometheus is for configuring the Prometheus JMX + exporter. + properties: + configFile: + description: |- + ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. + ConfigFile takes precedence over Configuration, which is shown below. + type: string + configuration: + description: |- + Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. + If not specified, the content in spark-docker/conf/prometheus.yaml will be used. + Configuration has no effect if ConfigFile is set. + type: string + jmxExporterJar: + description: JmxExporterJar is the path to the Prometheus + JMX exporter jar in the container. + type: string + port: + description: |- + Port is the port of the HTTP server run by the Prometheus JMX exporter. + If not specified, 8090 will be used as the default. + format: int32 + maximum: 49151 + minimum: 1024 + type: integer + portName: + description: |- + PortName is the port name of prometheus JMX exporter port. + If not specified, jmx-exporter will be used as the default. + type: string + required: + - jmxExporterJar + type: object + required: + - exposeDriverMetrics + - exposeExecutorMetrics + type: object + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. + This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). + This field will be deprecated in future versions (at SparkApplicationSpec level). + type: object + proxyUser: + description: |- + ProxyUser specifies the user to impersonate when submitting the application. + It maps to the command-line flag "--proxy-user" in spark-submit. + type: string + pythonVersion: + description: |- + This sets the major Python version of the docker + image used to run the driver and executor containers. Can either be 2 or 3, default 2. + enum: + - "2" + - "3" + type: string + restartPolicy: + description: RestartPolicy defines the policy on if and in which conditions + the controller should restart an application. + properties: + onFailureRetries: + description: OnFailureRetries the number of times to retry running + an application before giving up. + format: int32 + minimum: 0 + type: integer + onFailureRetryInterval: + description: OnFailureRetryInterval is the interval in seconds + between retries on failed runs. + format: int64 + minimum: 1 + type: integer + onSubmissionFailureRetries: + description: |- + OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. + This is best effort and actual retry attempts can be >= the value specified due to caching. + These are required if RestartPolicy is OnFailure. + format: int32 + minimum: 0 + type: integer + onSubmissionFailureRetryInterval: + description: OnSubmissionFailureRetryInterval is the interval + in seconds between retries on failed submissions. + format: int64 + minimum: 1 + type: integer + type: + description: Type specifies the RestartPolicyType. + enum: + - Never + - Always + - OnFailure + type: string + type: object + retryInterval: + description: RetryInterval is the unit of intervals in seconds between + submission retries. + format: int64 + type: integer + sparkConf: + additionalProperties: + type: string + description: |- + SparkConf carries user-specified Spark configuration properties as they would use the "--conf" option in + spark-submit. + type: object + sparkConfigMap: + description: |- + SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. + The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to. + type: string + sparkUIOptions: + description: SparkUIOptions allows configuring the Service and the + Ingress to expose the sparkUI + properties: + ingressAnnotations: + additionalProperties: + type: string + description: IngressAnnotations is a map of key,value pairs of + annotations that might be added to the ingress object. i.e. + specify nginx as ingress.class + type: object + ingressTLS: + description: TlsHosts is useful If we need to declare SSL certificates + to the ingress object + items: + description: IngressTLS describes the transport layer security + associated with an ingress. + properties: + hosts: + description: |- + hosts is a list of hosts included in the TLS certificate. The values in + this list must match the name/s used in the tlsSecret. Defaults to the + wildcard host setting for the loadbalancer controller fulfilling this + Ingress, if left unspecified. + items: + type: string + type: array + x-kubernetes-list-type: atomic + secretName: + description: |- + secretName is the name of the secret used to terminate TLS traffic on + port 443. Field is left optional to allow TLS routing based on SNI + hostname alone. If the SNI host in a listener conflicts with the "Host" + header field used by an IngressRule, the SNI host is used for termination + and value of the "Host" header is used for routing. + type: string + type: object + type: array + serviceAnnotations: + additionalProperties: + type: string + description: ServiceAnnotations is a map of key,value pairs of + annotations that might be added to the service object. + type: object + serviceLabels: + additionalProperties: + type: string + description: ServiceLables is a map of key,value pairs of labels + that might be added to the service object. + type: object + servicePort: + description: |- + ServicePort allows configuring the port at service level that might be different from the targetPort. + TargetPort should be the same as the one defined in spark.ui.port + format: int32 + type: integer + servicePortName: + description: |- + ServicePortName allows configuring the name of the service port. + This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP. + Defaults to spark-driver-ui-port. + type: string + serviceType: + description: ServiceType allows configuring the type of the service. + Defaults to ClusterIP. + type: string + type: object + sparkVersion: + description: SparkVersion is the version of Spark the application + uses. + type: string + timeToLiveSeconds: + description: |- + TimeToLiveSeconds defines the Time-To-Live (TTL) duration in seconds for this SparkApplication + after its termination. + The SparkApplication object will be garbage collected if the current time is more than the + TimeToLiveSeconds since its termination. + format: int64 + type: integer + type: + description: Type tells the type of the Spark application. + enum: + - Java + - Python + - Scala + - R + type: string + volumes: + description: Volumes is the list of Kubernetes volumes that can be + mounted by the driver and/or executors. + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, + Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the + blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob + storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single blob + disk per storage account Managed: azure managed data + disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains + Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, + rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents ephemeral + storage that is handled by certain external CSI drivers (Beta + feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being + referenced + type: string + name: + description: Name is the name of resource being + referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over volumes + to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for + this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra + command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This + is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- + TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + mount host directories as read/write. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target + and initiator authentication + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a list of + label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume root + to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI + data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data + to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional field specify whether the + Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about + the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from compromising the machine + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated + with the protection domain. + type: string + system: + description: system is the name of the storage system as + configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere + volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + required: + - driver + - executor + - sparkVersion + - type + type: object + status: + description: SparkApplicationStatus describes the current status of a + Spark application. + properties: + applicationState: + description: AppState tells the overall application state. + properties: + errorMessage: + type: string + state: + description: ApplicationStateType represents the type of the current + state of an application. + type: string + required: + - state + type: object + driverInfo: + description: DriverInfo has information about the driver. + properties: + podName: + type: string + webUIAddress: + type: string + webUIIngressAddress: + type: string + webUIIngressName: + description: Ingress Details if an ingress for the UI was created. + type: string + webUIPort: + description: UI Details for the UI created via ClusterIP service + accessible from within the cluster. + format: int32 + type: integer + webUIServiceName: + type: string + type: object + executionAttempts: + description: |- + ExecutionAttempts is the total number of attempts to run a submitted application to completion. + Incremented upon each attempted run of the application and reset upon invalidation. + format: int32 + type: integer + executorState: + additionalProperties: + description: ExecutorState tells the current state of an executor. + type: string + description: ExecutorState records the state of executors by executor + Pod names. + type: object + lastSubmissionAttemptTime: + description: LastSubmissionAttemptTime is the time for the last application + submission attempt. + format: date-time + nullable: true + type: string + sparkApplicationId: + description: SparkApplicationID is set by the spark-distribution(via + spark.app.id config) on the driver and executor pods + type: string + submissionAttempts: + description: |- + SubmissionAttempts is the total number of attempts to submit an application to run. + Incremented upon each attempted submission of the application and reset upon invalidation and rerun. + format: int32 + type: integer + submissionID: + description: SubmissionID is a unique ID of the current submission + of the application. + type: string + terminationTime: + description: CompletionTime is the time when the application runs + to completion if it does. + format: date-time + nullable: true + type: string + required: + - driverInfo + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml new file mode 100644 index 0000000000..44fe0ace56 --- /dev/null +++ b/config/crd/kustomization.yaml @@ -0,0 +1,24 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/sparkoperator.k8s.io_sparkapplications.yaml +- bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml +# +kubebuilder:scaffold:crdkustomizeresource + +patches: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +# +kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +#- path: patches/cainjection_in_sparkapplications.yaml +#- path: patches/cainjection_in_scheduledsparkapplications.yaml +# +kubebuilder:scaffold:crdkustomizecainjectionpatch + +# [WEBHOOK] To enable webhook, uncomment the following section +# the following config is for teaching kustomize how to do kustomization for CRDs. + +#configurations: +#- kustomizeconfig.yaml diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml new file mode 100644 index 0000000000..ec5c150a9d --- /dev/null +++ b/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/docs/api-docs.md b/docs/api-docs.md index 2455515276..69695fc062 100644 --- a/docs/api-docs.md +++ b/docs/api-docs.md @@ -47,7 +47,7 @@ string metadata
-
+ Kubernetes meta/v1.ObjectMeta @@ -197,7 +197,7 @@ string metadata
- + Kubernetes meta/v1.ObjectMeta @@ -401,7 +401,7 @@ The controller will add environment variable HADOOP_CONF_DIR to the path where t volumes
- + []Kubernetes core/v1.Volume @@ -768,7 +768,7 @@ string resources
- + Kubernetes core/v1.ResourceList @@ -1048,7 +1048,7 @@ This may be useful for sidecar proxies like Envoy injected by Istio which requir serviceType
- + Kubernetes core/v1.ServiceType @@ -1109,7 +1109,7 @@ map[string]string ingressTLS
- + []Kubernetes networking/v1.IngressTLS @@ -1197,7 +1197,7 @@ GC settings or other logging.

lifecycle
- + Kubernetes core/v1.Lifecycle @@ -1438,7 +1438,7 @@ GC settings or other logging.

lifecycle
- + Kubernetes core/v1.Lifecycle @@ -2066,7 +2066,7 @@ Defaults to 1.

lastRun
- + Kubernetes meta/v1.Time @@ -2079,7 +2079,7 @@ Kubernetes meta/v1.Time nextRun
- + Kubernetes meta/v1.Time @@ -2423,7 +2423,7 @@ The controller will add environment variable HADOOP_CONF_DIR to the path where t volumes
- + []Kubernetes core/v1.Volume @@ -2694,7 +2694,7 @@ string lastSubmissionAttemptTime
- + Kubernetes meta/v1.Time @@ -2707,7 +2707,7 @@ Kubernetes meta/v1.Time terminationTime
- + Kubernetes meta/v1.Time @@ -2929,7 +2929,7 @@ string env
- + []Kubernetes core/v1.EnvVar @@ -2956,7 +2956,7 @@ Deprecated. Consider using env instead.

envFrom
- + []Kubernetes core/v1.EnvFromSource @@ -3009,7 +3009,7 @@ map[string]string volumeMounts
- + []Kubernetes core/v1.VolumeMount @@ -3023,7 +3023,7 @@ map[string]string affinity
- + Kubernetes core/v1.Affinity @@ -3037,7 +3037,7 @@ Kubernetes core/v1.Affinity tolerations
- + []Kubernetes core/v1.Toleration @@ -3051,7 +3051,7 @@ Kubernetes core/v1.Affinity podSecurityContext
- + Kubernetes core/v1.PodSecurityContext @@ -3065,7 +3065,7 @@ Kubernetes core/v1.PodSecurityContext securityContext
- + Kubernetes core/v1.SecurityContext @@ -3091,7 +3091,7 @@ string sidecars
- + []Kubernetes core/v1.Container @@ -3105,7 +3105,7 @@ string initContainers
- + []Kubernetes core/v1.Container @@ -3144,7 +3144,7 @@ This field is mutually exclusive with nodeSelector at SparkApplication level (wh dnsConfig
- + Kubernetes core/v1.PodDNSConfig @@ -3182,7 +3182,7 @@ string hostAliases
- + []Kubernetes core/v1.HostAlias @@ -3253,7 +3253,7 @@ Defaults to spark-driver-ui-port.

serviceType
- + Kubernetes core/v1.ServiceType @@ -3303,7 +3303,7 @@ map[string]string ingressTLS
- + []Kubernetes networking/v1.IngressTLS @@ -3317,5 +3317,5 @@ map[string]string

-Generated with https://github.com/ahmetb/gen-crd-api-reference-docs.git on git commit ccf856504caaeac38151b57a950d3f8a7942b9db. +Generated with gen-crd-api-reference-docs.

diff --git a/hack/api-docs/Dockerfile b/hack/api-docs/Dockerfile deleted file mode 100644 index 15de4a02fd..0000000000 --- a/hack/api-docs/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# -# Copyright 2021 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -FROM golang:1.22.2-alpine -RUN go install github.com/ahmetb/gen-crd-api-reference-docs@latest diff --git a/hack/api-docs/api-docs-config.json b/hack/api-docs/api-docs-config.json deleted file mode 100644 index e8dafceab8..0000000000 --- a/hack/api-docs/api-docs-config.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "hideMemberFields": [ - "TypeMeta" - ], - "hideTypePatterns": [ - "ParseError$", - "List$" - ], - "externalPackages": [ - { - "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$", - "docsURLTemplate": "https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration" - }, - { - "typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/", - "docsURLTemplate": "https://v1-18.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#{{lower .TypeIdentifier}}-{{arrIndex .PackageSegments -1}}-{{arrIndex .PackageSegments -2}}" - }, - { - "typeMatchPrefix": "^github\\.com/knative/pkg/apis/duck/", - "docsURLTemplate": "https://pkg.go.dev/github.com/knative/pkg/apis/duck/{{arrIndex .PackageSegments -1}}#{{.TypeIdentifier}}" - } - ], - "typeDisplayNamePrefixOverrides": { - "k8s.io/api/": "Kubernetes ", - "k8s.io/apimachinery/pkg/apis/": "Kubernetes " - }, - "markdownDisabled": false -} \ No newline at end of file diff --git a/hack/api-docs/config.json b/hack/api-docs/config.json new file mode 100644 index 0000000000..bdeefef440 --- /dev/null +++ b/hack/api-docs/config.json @@ -0,0 +1,28 @@ +{ + "hideMemberFields": [ + "TypeMeta" + ], + "hideTypePatterns": [ + "ParseError$", + "List$" + ], + "externalPackages": [ + { + "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$", + "docsURLTemplate": "https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration" + }, + { + "typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/", + "docsURLTemplate": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#{{lower .TypeIdentifier}}-{{arrIndex .PackageSegments -1}}-{{arrIndex .PackageSegments -2}}" + }, + { + "typeMatchPrefix": "^github\\.com/knative/pkg/apis/duck/", + "docsURLTemplate": "https://pkg.go.dev/github.com/knative/pkg/apis/duck/{{arrIndex .PackageSegments -1}}#{{.TypeIdentifier}}" + } + ], + "typeDisplayNamePrefixOverrides": { + "k8s.io/api/": "Kubernetes ", + "k8s.io/apimachinery/pkg/apis/": "Kubernetes " + }, + "markdownDisabled": false +} \ No newline at end of file diff --git a/hack/api-docs/api-docs-template/members.tpl b/hack/api-docs/template/members.tpl similarity index 100% rename from hack/api-docs/api-docs-template/members.tpl rename to hack/api-docs/template/members.tpl diff --git a/hack/api-docs/api-docs-template/pkg.tpl b/hack/api-docs/template/pkg.tpl similarity index 85% rename from hack/api-docs/api-docs-template/pkg.tpl rename to hack/api-docs/template/pkg.tpl index b038856024..45afec04c0 100644 --- a/hack/api-docs/api-docs-template/pkg.tpl +++ b/hack/api-docs/template/pkg.tpl @@ -42,7 +42,7 @@ {{ end }}

- Generated with https://github.com/ahmetb/gen-crd-api-reference-docs.git on git commit ccf856504caaeac38151b57a950d3f8a7942b9db. + Generated with gen-crd-api-reference-docs.

{{ end }} \ No newline at end of file diff --git a/hack/api-docs/api-docs-template/placeholder.go b/hack/api-docs/template/placeholder.go similarity index 100% rename from hack/api-docs/api-docs-template/placeholder.go rename to hack/api-docs/template/placeholder.go diff --git a/hack/api-docs/api-docs-template/type.tpl b/hack/api-docs/template/type.tpl similarity index 100% rename from hack/api-docs/api-docs-template/type.tpl rename to hack/api-docs/template/type.tpl diff --git a/hack/custom-boilerplate.go.txt b/hack/boilerplate.go.txt similarity index 86% rename from hack/custom-boilerplate.go.txt rename to hack/boilerplate.go.txt index 3f8b10a226..c4bd8c3931 100644 --- a/hack/custom-boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,7 +1,5 @@ -// Code generated by k8s code-generator DO NOT EDIT. - /* -Copyright 2018 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,4 +12,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ +*/ \ No newline at end of file diff --git a/manifest/crds/kustomization.yaml b/manifest/crds/kustomization.yaml deleted file mode 100644 index ed4efc5a82..0000000000 --- a/manifest/crds/kustomization.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -resources: - - sparkoperator.k8s.io_sparkapplications.yaml - - sparkoperator.k8s.io_scheduledsparkapplications.yaml diff --git a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml deleted file mode 100644 index 60e836b083..0000000000 --- a/manifest/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ /dev/null @@ -1,4496 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: (unknown) - api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 - name: scheduledsparkapplications.sparkoperator.k8s.io -spec: - group: sparkoperator.k8s.io - names: - kind: ScheduledSparkApplication - listKind: ScheduledSparkApplicationList - plural: scheduledsparkapplications - shortNames: - - scheduledsparkapp - singular: scheduledsparkapplication - scope: Namespaced - versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: {} - additionalPrinterColumns: - - jsonPath: .spec.schedule - name: Schedule - type: string - - jsonPath: .spec.suspend - name: Suspend - type: boolean - - jsonPath: .status.lastRun - name: Last Run - type: date - - jsonPath: .status.lastRunName - name: Last Run Name - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - concurrencyPolicy: - type: string - failedRunHistoryLimit: - format: int32 - type: integer - schedule: - type: string - successfulRunHistoryLimit: - format: int32 - type: integer - suspend: - type: boolean - template: - properties: - arguments: - items: - type: string - type: array - batchScheduler: - type: string - batchSchedulerOptions: - properties: - priorityClassName: - type: string - queue: - type: string - resources: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - deps: - properties: - excludePackages: - items: - type: string - type: array - files: - items: - type: string - type: array - jars: - items: - type: string - type: array - packages: - items: - type: string - type: array - pyFiles: - items: - type: string - type: array - repositories: - items: - type: string - type: array - type: object - driver: - properties: - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: - type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - javaOptions: - type: string - kubernetesMaster: - type: string - labels: - additionalProperties: - type: string - type: object - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podName: - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' - type: string - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - shareProcessNamespace: - type: boolean - sidecars: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - type: object - dynamicAllocation: - properties: - enabled: - type: boolean - initialExecutors: - format: int32 - type: integer - maxExecutors: - format: int32 - type: integer - minExecutors: - format: int32 - type: integer - shuffleTrackingTimeout: - format: int64 - type: integer - type: object - executor: - properties: - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - deleteOnTermination: - type: boolean - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: - type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - instances: - format: int32 - minimum: 1 - type: integer - javaOptions: - type: string - labels: - additionalProperties: - type: string - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - shareProcessNamespace: - type: boolean - sidecars: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - type: object - failureRetries: - format: int32 - type: integer - hadoopConf: - additionalProperties: - type: string - type: object - hadoopConfigMap: - type: string - image: - type: string - imagePullPolicy: - type: string - imagePullSecrets: - items: - type: string - type: array - mainApplicationFile: - type: string - mainClass: - type: string - memoryOverheadFactor: - type: string - mode: - enum: - - cluster - - client - type: string - monitoring: - properties: - exposeDriverMetrics: - type: boolean - exposeExecutorMetrics: - type: boolean - metricsProperties: - type: string - metricsPropertiesFile: - type: string - prometheus: - properties: - configFile: - type: string - configuration: - type: string - jmxExporterJar: - type: string - port: - format: int32 - maximum: 49151 - minimum: 1024 - type: integer - portName: - type: string - required: - - jmxExporterJar - type: object - required: - - exposeDriverMetrics - - exposeExecutorMetrics - type: object - nodeSelector: - additionalProperties: - type: string - type: object - proxyUser: - type: string - pythonVersion: - enum: - - "2" - - "3" - type: string - restartPolicy: - properties: - onFailureRetries: - format: int32 - minimum: 0 - type: integer - onFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - onSubmissionFailureRetries: - format: int32 - minimum: 0 - type: integer - onSubmissionFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - type: - enum: - - Never - - Always - - OnFailure - type: string - type: object - retryInterval: - format: int64 - type: integer - sparkConf: - additionalProperties: - type: string - type: object - sparkConfigMap: - type: string - sparkUIOptions: - properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressAnnotations: - additionalProperties: - type: string - type: object - ingressTLS: - items: - properties: - hosts: - items: - type: string - type: array - secretName: - type: string - type: object - type: array - servicePort: - format: int32 - type: integer - serviceType: - type: string - type: object - driverIngressOptions: - items: - properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressURLFormat: - type: string - ingressAnnotations: - additionalProperties: - type: string - type: object - ingressTLS: - items: - properties: - hosts: - items: - type: string - type: array - secretName: - type: string - type: object - type: array - servicePort: - format: int32 - type: integer - servicePortName: - type: string - serviceType: - type: string - type: object - type: array - sparkVersion: - type: string - timeToLiveSeconds: - format: int64 - type: integer - type: - enum: - - Java - - Python - - Scala - - R - type: string - volumes: - items: - properties: - awsElasticBlockStore: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - azureDisk: - properties: - cachingMode: - type: string - diskName: - type: string - diskURI: - type: string - fsType: - type: string - kind: - type: string - readOnly: - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - properties: - readOnly: - type: boolean - secretName: - type: string - shareName: - type: string - required: - - secretName - - shareName - type: object - cephfs: - properties: - monitors: - items: - type: string - type: array - path: - type: string - readOnly: - type: boolean - secretFile: - type: string - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - monitors - type: object - cinder: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeID: - type: string - required: - - volumeID - type: object - configMap: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - csi: - properties: - driver: - type: string - fsType: - type: string - nodePublishSecretRef: - properties: - name: - type: string - type: object - readOnly: - type: boolean - volumeAttributes: - additionalProperties: - type: string - type: object - required: - - driver - type: object - downwardAPI: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - spec: - properties: - accessModes: - items: - type: string - type: array - resources: - properties: - requests: - properties: - storage: - type: string - type: object - type: object - storageClassName: - type: string - type: object - type: object - type: object - fc: - properties: - fsType: - type: string - lun: - format: int32 - type: integer - readOnly: - type: boolean - targetWWNs: - items: - type: string - type: array - wwids: - items: - type: string - type: array - type: object - flexVolume: - properties: - driver: - type: string - fsType: - type: string - options: - additionalProperties: - type: string - type: object - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - required: - - driver - type: object - flocker: - properties: - datasetName: - type: string - datasetUUID: - type: string - type: object - gcePersistentDisk: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - pdName: - type: string - readOnly: - type: boolean - required: - - pdName - type: object - gitRepo: - properties: - directory: - type: string - repository: - type: string - revision: - type: string - required: - - repository - type: object - glusterfs: - properties: - endpoints: - type: string - path: - type: string - readOnly: - type: boolean - required: - - endpoints - - path - type: object - hostPath: - properties: - path: - type: string - type: - type: string - required: - - path - type: object - iscsi: - properties: - chapAuthDiscovery: - type: boolean - chapAuthSession: - type: boolean - fsType: - type: string - initiatorName: - type: string - iqn: - type: string - iscsiInterface: - type: string - lun: - format: int32 - type: integer - portals: - items: - type: string - type: array - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - targetPortal: - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - type: string - nfs: - properties: - path: - type: string - readOnly: - type: boolean - server: - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - properties: - claimName: - type: string - readOnly: - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - properties: - fsType: - type: string - pdID: - type: string - required: - - pdID - type: object - portworxVolume: - properties: - fsType: - type: string - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - projected: - properties: - defaultMode: - format: int32 - type: integer - sources: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - downwardAPI: - properties: - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - serviceAccountToken: - properties: - audience: - type: string - expirationSeconds: - format: int64 - type: integer - path: - type: string - required: - - path - type: object - type: object - type: array - required: - - sources - type: object - quobyte: - properties: - group: - type: string - readOnly: - type: boolean - registry: - type: string - tenant: - type: string - user: - type: string - volume: - type: string - required: - - registry - - volume - type: object - rbd: - properties: - fsType: - type: string - image: - type: string - keyring: - type: string - monitors: - items: - type: string - type: array - pool: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - image - - monitors - type: object - scaleIO: - properties: - fsType: - type: string - gateway: - type: string - protectionDomain: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - sslEnabled: - type: boolean - storageMode: - type: string - storagePool: - type: string - system: - type: string - volumeName: - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - optional: - type: boolean - secretName: - type: string - type: object - storageos: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeName: - type: string - volumeNamespace: - type: string - type: object - vsphereVolume: - properties: - fsType: - type: string - storagePolicyID: - type: string - storagePolicyName: - type: string - volumePath: - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - required: - - driver - - executor - - sparkVersion - - type - type: object - required: - - schedule - - template - type: object - status: - properties: - lastRun: - format: date-time - nullable: true - type: string - lastRunName: - type: string - nextRun: - format: date-time - nullable: true - type: string - pastFailedRunNames: - items: - type: string - type: array - pastSuccessfulRunNames: - items: - type: string - type: array - reason: - type: string - scheduleState: - type: string - type: object - required: - - metadata - - spec - type: object - -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml b/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml deleted file mode 100644 index c67bb2afaa..0000000000 --- a/manifest/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ /dev/null @@ -1,4506 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: (unknown) - api-approved.kubernetes.io: https://github.com/kubeflow/spark-operator/pull/1298 - name: sparkapplications.sparkoperator.k8s.io -spec: - group: sparkoperator.k8s.io - names: - kind: SparkApplication - listKind: SparkApplicationList - plural: sparkapplications - shortNames: - - sparkapp - singular: sparkapplication - scope: Namespaced - versions: - - name: v1beta2 - served: true - storage: true - subresources: - status: { } - additionalPrinterColumns: - - jsonPath: .status.applicationState.state - name: Status - type: string - - jsonPath: .status.executionAttempts - name: Attempts - type: string - - jsonPath: .status.lastSubmissionAttemptTime - name: Start - type: string - - jsonPath: .status.terminationTime - name: Finish - type: string - - jsonPath: .metadata.creationTimestamp - name: Age - type: date - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - arguments: - items: - type: string - type: array - batchScheduler: - type: string - batchSchedulerOptions: - properties: - priorityClassName: - type: string - queue: - type: string - resources: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - deps: - properties: - excludePackages: - items: - type: string - type: array - files: - items: - type: string - type: array - jars: - items: - type: string - type: array - packages: - items: - type: string - type: array - pyFiles: - items: - type: string - type: array - repositories: - items: - type: string - type: array - type: object - driver: - properties: - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: - type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - javaOptions: - type: string - kubernetesMaster: - type: string - labels: - additionalProperties: - type: string - type: object - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podName: - pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' - type: string - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - shareProcessNamespace: - type: boolean - sidecars: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - type: object - dynamicAllocation: - properties: - enabled: - type: boolean - initialExecutors: - format: int32 - type: integer - maxExecutors: - format: int32 - type: integer - minExecutors: - format: int32 - type: integer - shuffleTrackingTimeout: - format: int64 - type: integer - type: object - executor: - properties: - affinity: - properties: - nodeAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - preference: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - properties: - nodeSelectorTerms: - items: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - properties: - preferredDuringSchedulingIgnoredDuringExecution: - items: - properties: - podAffinityTerm: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - weight: - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - items: - properties: - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - namespaces: - items: - type: string - type: array - topologyKey: - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - annotations: - additionalProperties: - type: string - type: object - configMaps: - items: - properties: - name: - type: string - path: - type: string - required: - - name - - path - type: object - type: array - coreLimit: - type: string - coreRequest: - type: string - cores: - format: int32 - minimum: 1 - type: integer - deleteOnTermination: - type: boolean - dnsConfig: - properties: - nameservers: - items: - type: string - type: array - options: - items: - properties: - name: - type: string - value: - type: string - type: object - type: array - searches: - items: - type: string - type: array - type: object - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - envSecretKeyRefs: - additionalProperties: - properties: - key: - type: string - name: - type: string - required: - - key - - name - type: object - type: object - envVars: - additionalProperties: - type: string - type: object - gpu: - properties: - name: - type: string - quantity: - format: int64 - type: integer - required: - - name - - quantity - type: object - hostAliases: - items: - properties: - hostnames: - items: - type: string - type: array - ip: - type: string - type: object - type: array - hostNetwork: - type: boolean - image: - type: string - initContainers: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - restartPolicy: - type: string - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - instances: - format: int32 - minimum: 1 - type: integer - javaOptions: - type: string - labels: - additionalProperties: - type: string - type: object - memory: - type: string - memoryOverhead: - type: string - nodeSelector: - additionalProperties: - type: string - type: object - podSecurityContext: - properties: - fsGroup: - format: int64 - type: integer - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - supplementalGroups: - items: - format: int64 - type: integer - type: array - sysctls: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - schedulerName: - type: string - secrets: - items: - properties: - name: - type: string - path: - type: string - secretType: - type: string - required: - - name - - path - - secretType - type: object - type: array - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - serviceAccount: - type: string - shareProcessNamespace: - type: boolean - sidecars: - items: - properties: - args: - items: - type: string - type: array - command: - items: - type: string - type: array - env: - items: - properties: - name: - type: string - value: - type: string - valueFrom: - properties: - configMapKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - secretKeyRef: - properties: - key: - type: string - name: - type: string - optional: - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - items: - properties: - configMapRef: - properties: - name: - type: string - optional: - type: boolean - type: object - prefix: - type: string - secretRef: - properties: - name: - type: string - optional: - type: boolean - type: object - type: object - type: array - image: - type: string - imagePullPolicy: - type: string - lifecycle: - properties: - postStart: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - name: - type: string - ports: - items: - properties: - containerPort: - format: int32 - type: integer - hostIP: - type: string - hostPort: - format: int32 - type: integer - name: - type: string - protocol: - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - resources: - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - type: object - securityContext: - properties: - seccompProfile: - type: object - properties: - type: - type: string - allowPrivilegeEscalation: - type: boolean - capabilities: - properties: - add: - items: - type: string - type: array - drop: - items: - type: string - type: array - type: object - privileged: - type: boolean - procMount: - type: string - readOnlyRootFilesystem: - type: boolean - runAsGroup: - format: int64 - type: integer - runAsNonRoot: - type: boolean - runAsUser: - format: int64 - type: integer - seLinuxOptions: - properties: - level: - type: string - role: - type: string - type: - type: string - user: - type: string - type: object - windowsOptions: - properties: - gmsaCredentialSpec: - type: string - gmsaCredentialSpecName: - type: string - runAsUserName: - type: string - type: object - type: object - startupProbe: - properties: - exec: - properties: - command: - items: - type: string - type: array - type: object - failureThreshold: - format: int32 - type: integer - httpGet: - properties: - host: - type: string - httpHeaders: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - path: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - scheme: - type: string - required: - - port - type: object - initialDelaySeconds: - format: int32 - type: integer - periodSeconds: - format: int32 - type: integer - successThreshold: - format: int32 - type: integer - tcpSocket: - properties: - host: - type: string - port: - anyOf: - - type: integer - - type: string - x-kubernetes-int-or-string: true - required: - - port - type: object - timeoutSeconds: - format: int32 - type: integer - type: object - stdin: - type: boolean - stdinOnce: - type: boolean - terminationMessagePath: - type: string - terminationMessagePolicy: - type: string - tty: - type: boolean - volumeDevices: - items: - properties: - devicePath: - type: string - name: - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - type: string - required: - - name - type: object - type: array - terminationGracePeriodSeconds: - format: int64 - type: integer - tolerations: - items: - properties: - effect: - type: string - key: - type: string - operator: - type: string - tolerationSeconds: - format: int64 - type: integer - value: - type: string - type: object - type: array - volumeMounts: - items: - properties: - mountPath: - type: string - mountPropagation: - type: string - name: - type: string - readOnly: - type: boolean - subPath: - type: string - subPathExpr: - type: string - required: - - mountPath - - name - type: object - type: array - type: object - failureRetries: - format: int32 - type: integer - hadoopConf: - additionalProperties: - type: string - type: object - hadoopConfigMap: - type: string - image: - type: string - imagePullPolicy: - type: string - imagePullSecrets: - items: - type: string - type: array - mainApplicationFile: - type: string - mainClass: - type: string - memoryOverheadFactor: - type: string - mode: - enum: - - cluster - - client - type: string - monitoring: - properties: - exposeDriverMetrics: - type: boolean - exposeExecutorMetrics: - type: boolean - metricsProperties: - type: string - metricsPropertiesFile: - type: string - prometheus: - properties: - configFile: - type: string - configuration: - type: string - jmxExporterJar: - type: string - port: - format: int32 - maximum: 49151 - minimum: 1024 - type: integer - portName: - type: string - required: - - jmxExporterJar - type: object - required: - - exposeDriverMetrics - - exposeExecutorMetrics - type: object - nodeSelector: - additionalProperties: - type: string - type: object - proxyUser: - type: string - pythonVersion: - enum: - - "2" - - "3" - type: string - restartPolicy: - properties: - onFailureRetries: - format: int32 - minimum: 0 - type: integer - onFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - onSubmissionFailureRetries: - format: int32 - minimum: 0 - type: integer - onSubmissionFailureRetryInterval: - format: int64 - minimum: 1 - type: integer - type: - enum: - - Never - - Always - - OnFailure - type: string - type: object - retryInterval: - format: int64 - type: integer - sparkConf: - additionalProperties: - type: string - type: object - sparkConfigMap: - type: string - sparkUIOptions: - properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressAnnotations: - additionalProperties: - type: string - type: object - ingressTLS: - items: - properties: - hosts: - items: - type: string - type: array - secretName: - type: string - type: object - type: array - servicePort: - format: int32 - type: integer - servicePortName: - type: string - serviceType: - type: string - type: object - driverIngressOptions: - items: - properties: - serviceAnnotations: - additionalProperties: - type: string - type: object - serviceLabels: - additionalProperties: - type: string - type: object - ingressURLFormat: - type: string - ingressAnnotations: - additionalProperties: - type: string - type: object - ingressTLS: - items: - properties: - hosts: - items: - type: string - type: array - secretName: - type: string - type: object - type: array - servicePort: - format: int32 - type: integer - servicePortName: - type: string - serviceType: - type: string - type: object - type: array - sparkVersion: - type: string - timeToLiveSeconds: - format: int64 - type: integer - type: - enum: - - Java - - Python - - Scala - - R - type: string - volumes: - items: - properties: - awsElasticBlockStore: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - azureDisk: - properties: - cachingMode: - type: string - diskName: - type: string - diskURI: - type: string - fsType: - type: string - kind: - type: string - readOnly: - type: boolean - required: - - diskName - - diskURI - type: object - azureFile: - properties: - readOnly: - type: boolean - secretName: - type: string - shareName: - type: string - required: - - secretName - - shareName - type: object - cephfs: - properties: - monitors: - items: - type: string - type: array - path: - type: string - readOnly: - type: boolean - secretFile: - type: string - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - monitors - type: object - cinder: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeID: - type: string - required: - - volumeID - type: object - configMap: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - csi: - properties: - driver: - type: string - fsType: - type: string - nodePublishSecretRef: - properties: - name: - type: string - type: object - readOnly: - type: boolean - volumeAttributes: - additionalProperties: - type: string - type: object - required: - - driver - type: object - downwardAPI: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - emptyDir: - properties: - medium: - type: string - sizeLimit: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - type: object - ephemeral: - properties: - volumeClaimTemplate: - properties: - spec: - properties: - accessModes: - items: - type: string - type: array - resources: - properties: - requests: - properties: - storage: - type: string - type: object - type: object - storageClassName: - type: string - type: object - type: object - type: object - fc: - properties: - fsType: - type: string - lun: - format: int32 - type: integer - readOnly: - type: boolean - targetWWNs: - items: - type: string - type: array - wwids: - items: - type: string - type: array - type: object - flexVolume: - properties: - driver: - type: string - fsType: - type: string - options: - additionalProperties: - type: string - type: object - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - required: - - driver - type: object - flocker: - properties: - datasetName: - type: string - datasetUUID: - type: string - type: object - gcePersistentDisk: - properties: - fsType: - type: string - partition: - format: int32 - type: integer - pdName: - type: string - readOnly: - type: boolean - required: - - pdName - type: object - gitRepo: - properties: - directory: - type: string - repository: - type: string - revision: - type: string - required: - - repository - type: object - glusterfs: - properties: - endpoints: - type: string - path: - type: string - readOnly: - type: boolean - required: - - endpoints - - path - type: object - hostPath: - properties: - path: - type: string - type: - type: string - required: - - path - type: object - iscsi: - properties: - chapAuthDiscovery: - type: boolean - chapAuthSession: - type: boolean - fsType: - type: string - initiatorName: - type: string - iqn: - type: string - iscsiInterface: - type: string - lun: - format: int32 - type: integer - portals: - items: - type: string - type: array - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - targetPortal: - type: string - required: - - iqn - - lun - - targetPortal - type: object - name: - type: string - nfs: - properties: - path: - type: string - readOnly: - type: boolean - server: - type: string - required: - - path - - server - type: object - persistentVolumeClaim: - properties: - claimName: - type: string - readOnly: - type: boolean - required: - - claimName - type: object - photonPersistentDisk: - properties: - fsType: - type: string - pdID: - type: string - required: - - pdID - type: object - portworxVolume: - properties: - fsType: - type: string - readOnly: - type: boolean - volumeID: - type: string - required: - - volumeID - type: object - projected: - properties: - defaultMode: - format: int32 - type: integer - sources: - items: - properties: - configMap: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - downwardAPI: - properties: - items: - items: - properties: - fieldRef: - properties: - apiVersion: - type: string - fieldPath: - type: string - required: - - fieldPath - type: object - mode: - format: int32 - type: integer - path: - type: string - resourceFieldRef: - properties: - containerName: - type: string - divisor: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - type: string - required: - - resource - type: object - required: - - path - type: object - type: array - type: object - secret: - properties: - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - name: - type: string - optional: - type: boolean - type: object - serviceAccountToken: - properties: - audience: - type: string - expirationSeconds: - format: int64 - type: integer - path: - type: string - required: - - path - type: object - type: object - type: array - required: - - sources - type: object - quobyte: - properties: - group: - type: string - readOnly: - type: boolean - registry: - type: string - tenant: - type: string - user: - type: string - volume: - type: string - required: - - registry - - volume - type: object - rbd: - properties: - fsType: - type: string - image: - type: string - keyring: - type: string - monitors: - items: - type: string - type: array - pool: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - user: - type: string - required: - - image - - monitors - type: object - scaleIO: - properties: - fsType: - type: string - gateway: - type: string - protectionDomain: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - sslEnabled: - type: boolean - storageMode: - type: string - storagePool: - type: string - system: - type: string - volumeName: - type: string - required: - - gateway - - secretRef - - system - type: object - secret: - properties: - defaultMode: - format: int32 - type: integer - items: - items: - properties: - key: - type: string - mode: - format: int32 - type: integer - path: - type: string - required: - - key - - path - type: object - type: array - optional: - type: boolean - secretName: - type: string - type: object - storageos: - properties: - fsType: - type: string - readOnly: - type: boolean - secretRef: - properties: - name: - type: string - type: object - volumeName: - type: string - volumeNamespace: - type: string - type: object - vsphereVolume: - properties: - fsType: - type: string - storagePolicyID: - type: string - storagePolicyName: - type: string - volumePath: - type: string - required: - - volumePath - type: object - required: - - name - type: object - type: array - required: - - driver - - executor - - sparkVersion - - type - type: object - status: - properties: - applicationState: - properties: - errorMessage: - type: string - state: - type: string - required: - - state - type: object - driverInfo: - properties: - podName: - type: string - webUIAddress: - type: string - webUIIngressAddress: - type: string - webUIIngressName: - type: string - webUIPort: - format: int32 - type: integer - webUIServiceName: - type: string - type: object - executionAttempts: - format: int32 - type: integer - executorState: - additionalProperties: - type: string - type: object - lastSubmissionAttemptTime: - format: date-time - nullable: true - type: string - sparkApplicationId: - type: string - submissionAttempts: - format: int32 - type: integer - submissionID: - type: string - terminationTime: - format: date-time - nullable: true - type: string - required: - - driverInfo - type: object - required: - - metadata - - spec - type: object -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go b/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go index e2d697d73c..84654927d8 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ +// +kubebuilder:skip + package v1beta1 import ( @@ -409,7 +411,7 @@ type SparkPodSpec struct { // DriverSpec is specification of the driver. type DriverSpec struct { - SparkPodSpec + SparkPodSpec `json:",inline"` // PodName is the name of the driver pod that the user creates. This is used for the // in-cluster client mode in which the user creates a client pod where the driver of // the user application runs. It's an error to set this field if Mode is not @@ -426,7 +428,7 @@ type DriverSpec struct { // ExecutorSpec is specification of the executor. type ExecutorSpec struct { - SparkPodSpec + SparkPodSpec `json:",inline"` // Instances is the number of executor instances. // Optional. Instances *int32 `json:"instances,omitempty"` diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go b/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go index 7d7c09d03c..4bd7d6ed69 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go @@ -1,10 +1,7 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by k8s code-generator DO NOT EDIT. /* -Copyright 2018 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,19 +16,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1beta1 import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ApplicationState) DeepCopyInto(out *ApplicationState) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationState. @@ -82,7 +78,6 @@ func (in *Dependencies) DeepCopyInto(out *Dependencies) { *out = new(int32) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependencies. @@ -98,7 +93,6 @@ func (in *Dependencies) DeepCopy() *Dependencies { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DriverInfo) DeepCopyInto(out *DriverInfo) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverInfo. @@ -130,7 +124,6 @@ func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverSpec. @@ -162,7 +155,6 @@ func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorSpec. @@ -178,7 +170,6 @@ func (in *ExecutorSpec) DeepCopy() *ExecutorSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GPUSpec) DeepCopyInto(out *GPUSpec) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUSpec. @@ -204,7 +195,6 @@ func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) { *out = new(PrometheusSpec) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec. @@ -220,7 +210,6 @@ func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NameKey) DeepCopyInto(out *NameKey) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameKey. @@ -236,7 +225,6 @@ func (in *NameKey) DeepCopy() *NameKey { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamePath) DeepCopyInto(out *NamePath) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamePath. @@ -267,7 +255,6 @@ func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec. @@ -303,7 +290,6 @@ func (in *RestartPolicy) DeepCopyInto(out *RestartPolicy) { *out = new(int64) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartPolicy. @@ -323,7 +309,6 @@ func (in *ScheduledSparkApplication) DeepCopyInto(out *ScheduledSparkApplication in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplication. @@ -356,7 +341,6 @@ func (in *ScheduledSparkApplicationList) DeepCopyInto(out *ScheduledSparkApplica (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationList. @@ -396,7 +380,6 @@ func (in *ScheduledSparkApplicationSpec) DeepCopyInto(out *ScheduledSparkApplica *out = new(int32) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationSpec. @@ -424,7 +407,6 @@ func (in *ScheduledSparkApplicationStatus) DeepCopyInto(out *ScheduledSparkAppli *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationStatus. @@ -440,7 +422,6 @@ func (in *ScheduledSparkApplicationStatus) DeepCopy() *ScheduledSparkApplication // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretInfo) DeepCopyInto(out *SecretInfo) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInfo. @@ -460,7 +441,6 @@ func (in *SparkApplication) DeepCopyInto(out *SparkApplication) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplication. @@ -493,7 +473,6 @@ func (in *SparkApplicationList) DeepCopyInto(out *SparkApplicationList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationList. @@ -624,7 +603,6 @@ func (in *SparkApplicationSpec) DeepCopyInto(out *SparkApplicationSpec) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationSpec. @@ -651,7 +629,6 @@ func (in *SparkApplicationStatus) DeepCopyInto(out *SparkApplicationStatus) { (*out)[key] = val } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationStatus. @@ -788,7 +765,6 @@ func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) { *out = new(v1.PodDNSConfig) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPodSpec. diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go index ca009e739e..3fe9e30622 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go @@ -84,8 +84,14 @@ const ( // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:defaulter-gen=true -// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubeflow/spark-operator/pull/1298" // +kubebuilder:resource:scope=Namespaced,shortName=scheduledsparkapp,singular=scheduledsparkapplication +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=.spec.schedule,name=Schedule,type=string +// +kubebuilder:printcolumn:JSONPath=.spec.suspend,name=Suspend,type=string +// +kubebuilder:printcolumn:JSONPath=.status.lastRun,name=Last Run,type=date +// +kubebuilder:printcolumn:JSONPath=.status.lastRunName,name=Last Run Name,type=string +// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date type ScheduledSparkApplication struct { metav1.TypeMeta `json:",inline"` @@ -165,8 +171,14 @@ type ScheduledSparkApplicationList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:defaulter-gen=true -// +kubebuilder:subresource:status +// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubeflow/spark-operator/pull/1298" // +kubebuilder:resource:scope=Namespaced,shortName=sparkapp,singular=sparkapplication +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=.status.applicationState.state,name=Status,type=string +// +kubebuilder:printcolumn:JSONPath=.status.executionAttempts,name=Attempts,type=string +// +kubebuilder:printcolumn:JSONPath=.status.lastSubmissionAttemptTime,name=Start,type=string +// +kubebuilder:printcolumn:JSONPath=.status.terminationTime,name=Finish,type=string +// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date // SparkApplication represents a Spark application running on and using Kubernetes as a cluster manager. type SparkApplication struct { diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go b/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go index 0b15feb0a0..ffe6107d50 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go +++ b/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go @@ -1,10 +1,7 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by k8s code-generator DO NOT EDIT. /* -Copyright 2018 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,20 +16,19 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1beta2 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ApplicationState) DeepCopyInto(out *ApplicationState) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationState. @@ -65,7 +61,6 @@ func (in *BatchSchedulerConfiguration) DeepCopyInto(out *BatchSchedulerConfigura (*out)[key] = val.DeepCopy() } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BatchSchedulerConfiguration. @@ -111,7 +106,6 @@ func (in *Dependencies) DeepCopyInto(out *Dependencies) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependencies. @@ -127,7 +121,6 @@ func (in *Dependencies) DeepCopy() *Dependencies { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DriverInfo) DeepCopyInto(out *DriverInfo) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverInfo. @@ -140,6 +133,64 @@ func (in *DriverInfo) DeepCopy() *DriverInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DriverIngressConfiguration) DeepCopyInto(out *DriverIngressConfiguration) { + *out = *in + if in.ServicePort != nil { + in, out := &in.ServicePort, &out.ServicePort + *out = new(int32) + **out = **in + } + if in.ServicePortName != nil { + in, out := &in.ServicePortName, &out.ServicePortName + *out = new(string) + **out = **in + } + if in.ServiceType != nil { + in, out := &in.ServiceType, &out.ServiceType + *out = new(v1.ServiceType) + **out = **in + } + if in.ServiceAnnotations != nil { + in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ServiceLabels != nil { + in, out := &in.ServiceLabels, &out.ServiceLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.IngressAnnotations != nil { + in, out := &in.IngressAnnotations, &out.IngressAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.IngressTLS != nil { + in, out := &in.IngressTLS, &out.IngressTLS + *out = make([]networkingv1.IngressTLS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverIngressConfiguration. +func (in *DriverIngressConfiguration) DeepCopy() *DriverIngressConfiguration { + if in == nil { + return nil + } + out := new(DriverIngressConfiguration) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { *out = *in @@ -176,12 +227,18 @@ func (in *DriverSpec) DeepCopyInto(out *DriverSpec) { (*out)[key] = val } } + if in.ServiceLabels != nil { + in, out := &in.ServiceLabels, &out.ServiceLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]Port, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverSpec. @@ -217,7 +274,6 @@ func (in *DynamicAllocation) DeepCopyInto(out *DynamicAllocation) { *out = new(int64) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicAllocation. @@ -249,6 +305,11 @@ func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) { *out = new(string) **out = **in } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(v1.Lifecycle) + (*in).DeepCopyInto(*out) + } if in.DeleteOnTermination != nil { in, out := &in.DeleteOnTermination, &out.DeleteOnTermination *out = new(bool) @@ -259,7 +320,6 @@ func (in *ExecutorSpec) DeepCopyInto(out *ExecutorSpec) { *out = make([]Port, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecutorSpec. @@ -275,7 +335,6 @@ func (in *ExecutorSpec) DeepCopy() *ExecutorSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GPUSpec) DeepCopyInto(out *GPUSpec) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPUSpec. @@ -306,7 +365,6 @@ func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) { *out = new(PrometheusSpec) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec. @@ -322,7 +380,6 @@ func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NameKey) DeepCopyInto(out *NameKey) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameKey. @@ -338,7 +395,6 @@ func (in *NameKey) DeepCopy() *NameKey { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamePath) DeepCopyInto(out *NamePath) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamePath. @@ -354,7 +410,6 @@ func (in *NamePath) DeepCopy() *NamePath { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Port) DeepCopyInto(out *Port) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Port. @@ -390,7 +445,6 @@ func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) { *out = new(string) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec. @@ -426,7 +480,6 @@ func (in *RestartPolicy) DeepCopyInto(out *RestartPolicy) { *out = new(int64) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartPolicy. @@ -446,7 +499,6 @@ func (in *ScheduledSparkApplication) DeepCopyInto(out *ScheduledSparkApplication in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplication. @@ -479,7 +531,6 @@ func (in *ScheduledSparkApplicationList) DeepCopyInto(out *ScheduledSparkApplica (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationList. @@ -519,7 +570,6 @@ func (in *ScheduledSparkApplicationSpec) DeepCopyInto(out *ScheduledSparkApplica *out = new(int32) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationSpec. @@ -547,7 +597,6 @@ func (in *ScheduledSparkApplicationStatus) DeepCopyInto(out *ScheduledSparkAppli *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledSparkApplicationStatus. @@ -563,7 +612,6 @@ func (in *ScheduledSparkApplicationStatus) DeepCopy() *ScheduledSparkApplication // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecretInfo) DeepCopyInto(out *SecretInfo) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretInfo. @@ -583,7 +631,6 @@ func (in *SparkApplication) DeepCopyInto(out *SparkApplication) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplication. @@ -616,7 +663,6 @@ func (in *SparkApplicationList) DeepCopyInto(out *SparkApplicationList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationList. @@ -774,7 +820,6 @@ func (in *SparkApplicationSpec) DeepCopyInto(out *SparkApplicationSpec) { *out = new(DynamicAllocation) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationSpec. @@ -801,7 +846,6 @@ func (in *SparkApplicationStatus) DeepCopyInto(out *SparkApplicationStatus) { (*out)[key] = val } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkApplicationStatus. @@ -986,7 +1030,6 @@ func (in *SparkPodSpec) DeepCopyInto(out *SparkPodSpec) { *out = new(bool) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkPodSpec. @@ -1024,59 +1067,13 @@ func (in *SparkUIConfiguration) DeepCopyInto(out *SparkUIConfiguration) { (*out)[key] = val } } - if in.IngressAnnotations != nil { - in, out := &in.IngressAnnotations, &out.IngressAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.IngressTLS != nil { - in, out := &in.IngressTLS, &out.IngressTLS - *out = make([]networkingv1.IngressTLS, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkUIConfiguration. -func (in *SparkUIConfiguration) DeepCopy() *SparkUIConfiguration { - if in == nil { - return nil - } - out := new(SparkUIConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DriverIngressConfiguration) DeepCopyInto(out *DriverIngressConfiguration) { - *out = *in - if in.ServicePort != nil { - in, out := &in.ServicePort, &out.ServicePort - *out = new(int32) - **out = **in - } - if in.ServicePortName != nil { - in, out := &in.ServicePortName, &out.ServicePortName - *out = new(string) - **out = **in - } - if in.ServiceType != nil { - in, out := &in.ServiceType, &out.ServiceType - *out = new(v1.ServiceType) - **out = **in - } - if in.ServiceAnnotations != nil { - in, out := &in.ServiceAnnotations, &out.ServiceAnnotations + if in.ServiceLabels != nil { + in, out := &in.ServiceLabels, &out.ServiceLabels *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } - out.IngressURLFormat = in.IngressURLFormat if in.IngressAnnotations != nil { in, out := &in.IngressAnnotations, &out.IngressAnnotations *out = make(map[string]string, len(*in)) @@ -1091,15 +1088,14 @@ func (in *DriverIngressConfiguration) DeepCopyInto(out *DriverIngressConfigurati (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverIngressConfiguration. -func (in *DriverIngressConfiguration) DeepCopy() *DriverIngressConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SparkUIConfiguration. +func (in *SparkUIConfiguration) DeepCopy() *SparkUIConfiguration { if in == nil { return nil } - out := new(DriverIngressConfiguration) + out := new(SparkUIConfiguration) in.DeepCopyInto(out) return out -} \ No newline at end of file +} From bc9dcc2dfb6ced0611f7198b795670873b700c20 Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Mon, 22 Jul 2024 13:18:01 +0800 Subject: [PATCH 78/87] Add CHANGELOG.md file and use python script to generate it automatically (#2087) Signed-off-by: Yi Chen --- CHANGELOG.md | 424 +++++++++++++++++++++++++++++++++++++ hack/generate-changelog.py | 72 +++++++ 2 files changed, 496 insertions(+) create mode 100644 CHANGELOG.md create mode 100644 hack/generate-changelog.py diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..40568efac5 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,424 @@ +# Changelog + +## [spark-operator-chart-1.4.5](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.4.5) (2024-07-22) + +- Update the process to build api-docs, generate CRD manifests and code ([#2046](https://github.com/kubeflow/spark-operator/pull/2046) by [@ChenYi015](https://github.com/ChenYi015)) +- Add workflow for closing stale issues and PRs ([#2073](https://github.com/kubeflow/spark-operator/pull/2073) by [@ChenYi015](https://github.com/ChenYi015)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.4.4...spark-operator-chart-1.4.5) + +## [spark-operator-chart-1.4.4](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.4.4) (2024-07-22) + +- Update helm docs ([#2081](https://github.com/kubeflow/spark-operator/pull/2081) by [@csp33](https://github.com/csp33)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.4.3...spark-operator-chart-1.4.4) + +## [spark-operator-chart-1.4.3](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.4.3) (2024-07-03) + +- Add PodDisruptionBudget to chart ([#2078](https://github.com/kubeflow/spark-operator/pull/2078) by [@csp33](https://github.com/csp33)) +- Update README and documentation ([#2047](https://github.com/kubeflow/spark-operator/pull/2047) by [@ChenYi015](https://github.com/ChenYi015)) +- Add code of conduct and update contributor guide ([#2074](https://github.com/kubeflow/spark-operator/pull/2074) by [@ChenYi015](https://github.com/ChenYi015)) +- Remove .gitlab-ci.yml ([#2069](https://github.com/kubeflow/spark-operator/pull/2069) by [@jacobsalway](https://github.com/jacobsalway)) +- Modified README.MD as per changes discussed on ([#2066](https://github.com/kubeflow/spark-operator/pull/2066) by [@vikas-saxena02](https://github.com/vikas-saxena02)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.4.2...spark-operator-chart-1.4.3) + +## [spark-operator-chart-1.4.2](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.4.2) (2024-06-17) + +- Support objectSelector on mutating webhook ([#2058](https://github.com/kubeflow/spark-operator/pull/2058) by [@Cian911](https://github.com/Cian911)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.4.1...spark-operator-chart-1.4.2) + +## [spark-operator-chart-1.4.1](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.4.1) (2024-06-15) + +- Adding an option to set the priority class for spark-operator pod ([#2043](https://github.com/kubeflow/spark-operator/pull/2043) by [@pkgajulapalli](https://github.com/pkgajulapalli)) +- Update minikube version in CI ([#2059](https://github.com/kubeflow/spark-operator/pull/2059) by [@Cian911](https://github.com/Cian911)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.4.0...spark-operator-chart-1.4.1) + +## [spark-operator-chart-1.4.0](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.4.0) (2024-06-05) + +- Certifictes are generated by operator rather than gencerts.sh ([#2016](https://github.com/kubeflow/spark-operator/pull/2016) by [@ChenYi015](https://github.com/ChenYi015)) +- Add ChenYi015 as spark-operator reviewer ([#2045](https://github.com/kubeflow/spark-operator/pull/2045) by [@ChenYi015](https://github.com/ChenYi015)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.3.2...spark-operator-chart-1.4.0) + +## [spark-operator-chart-1.3.2](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.3.2) (2024-06-05) + +- Bump appVersion to v1beta2-1.5.0-3.5.0 ([#2044](https://github.com/kubeflow/spark-operator/pull/2044) by [@ChenYi015](https://github.com/ChenYi015)) +- Add restartPolicy field to SparkApplication Driver/Executor initContainers CRDs ([#2022](https://github.com/kubeflow/spark-operator/pull/2022) by [@mschroering](https://github.com/mschroering)) +- :memo: Add Inter&Co to who-is-using.md ([#2040](https://github.com/kubeflow/spark-operator/pull/2040) by [@ignitz](https://github.com/ignitz)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.3.1...spark-operator-chart-1.3.2) + +## [spark-operator-chart-1.3.1](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.3.1) (2024-05-31) + +- Chart: add POD_NAME env for leader election ([#2039](https://github.com/kubeflow/spark-operator/pull/2039) by [@Aakcht](https://github.com/Aakcht)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.3.0...spark-operator-chart-1.3.1) + +## [spark-operator-chart-1.3.0](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.3.0) (2024-05-20) + +- Support exposing extra TCP ports in Spark Driver via K8s Ingress ([#1998](https://github.com/kubeflow/spark-operator/pull/1998) by [@hiboyang](https://github.com/hiboyang)) +- Fixes a bug with dynamic allocation forcing the executor count to be 1 even when minExecutors is set to 0 ([#1979](https://github.com/kubeflow/spark-operator/pull/1979) by [@peter-mcclonski](https://github.com/peter-mcclonski)) +- Remove outdated PySpark experimental warning in example ([#2014](https://github.com/kubeflow/spark-operator/pull/2014) by [@andrejpk](https://github.com/andrejpk)) +- Update Spark Job Namespace docs ([#2000](https://github.com/kubeflow/spark-operator/pull/2000) by [@matthewrossi](https://github.com/matthewrossi)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.15...spark-operator-chart-1.3.0) + +## [spark-operator-chart-1.2.15](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.15) (2024-05-07) + +- Fix examples ([#2010](https://github.com/kubeflow/spark-operator/pull/2010) by [@peter-mcclonski](https://github.com/peter-mcclonski)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.14...spark-operator-chart-1.2.15) + +## [spark-operator-chart-1.2.14](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.14) (2024-04-26) + +- feat: add support for service labels on driver-svc ([#1985](https://github.com/kubeflow/spark-operator/pull/1985) by [@Cian911](https://github.com/Cian911)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.13...spark-operator-chart-1.2.14) + +## [spark-operator-chart-1.2.13](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.13) (2024-04-24) + +- fix(chart): remove operator namespace default for job namespaces value ([#1989](https://github.com/kubeflow/spark-operator/pull/1989) by [@t3mi](https://github.com/t3mi)) +- Fix Docker Hub Credentials in CI ([#2003](https://github.com/kubeflow/spark-operator/pull/2003) by [@andreyvelich](https://github.com/andreyvelich)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.12...spark-operator-chart-1.2.13) + +## [spark-operator-chart-1.2.12](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.12) (2024-04-19) + +- Add emptyDir sizeLimit support for local dirs ([#1993](https://github.com/kubeflow/spark-operator/pull/1993) by [@jacobsalway](https://github.com/jacobsalway)) +- fix: Removed `publish-image` dependency on publishing the helm chart ([#1995](https://github.com/kubeflow/spark-operator/pull/1995) by [@vara-bonthu](https://github.com/vara-bonthu)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.11...spark-operator-chart-1.2.12) + +## [spark-operator-chart-1.2.11](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.11) (2024-04-19) + +- fix: Update Github workflow to publish Helm charts on chart changes, irrespective of image change ([#1992](https://github.com/kubeflow/spark-operator/pull/1992) by [@vara-bonthu](https://github.com/vara-bonthu)) +- chore: Add Timo to user list ([#1615](https://github.com/kubeflow/spark-operator/pull/1615) by [@vanducng](https://github.com/vanducng)) +- Update spark operator permissions for CRD ([#1973](https://github.com/kubeflow/spark-operator/pull/1973) by [@ChenYi015](https://github.com/ChenYi015)) +- fix spark-rbac ([#1986](https://github.com/kubeflow/spark-operator/pull/1986) by [@Aransh](https://github.com/Aransh)) +- Use Kubeflow Docker Hub for Spark Operator Image ([#1974](https://github.com/kubeflow/spark-operator/pull/1974) by [@andreyvelich](https://github.com/andreyvelich)) +- fix: fixed serviceaccount annotations ([#1972](https://github.com/kubeflow/spark-operator/pull/1972) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.7...spark-operator-chart-1.2.11) + +## [spark-operator-chart-1.2.7](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.7) (2024-04-16) + +- fix: upgraded k8s deps ([#1983](https://github.com/kubeflow/spark-operator/pull/1983) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk)) +- chore: remove k8s.io/kubernetes replaces and adapt to v1.29.3 apis ([#1968](https://github.com/kubeflow/spark-operator/pull/1968) by [@ajayk](https://github.com/ajayk)) +- Add some helm chart unit tests and fix spark service account render failure when extra annotations are specified ([#1967](https://github.com/kubeflow/spark-operator/pull/1967) by [@ChenYi015](https://github.com/ChenYi015)) +- feat: Doc updates, Issue and PR templates are added ([#1970](https://github.com/kubeflow/spark-operator/pull/1970) by [@vara-bonthu](https://github.com/vara-bonthu)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.5...spark-operator-chart-1.2.7) + +## [spark-operator-chart-1.2.5](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.5) (2024-04-14) + +- fixed docker image tag and updated chart docs ([#1969](https://github.com/kubeflow/spark-operator/pull/1969) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.2.4...spark-operator-chart-1.2.5) + +## [spark-operator-chart-1.2.4](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.2.4) (2024-04-13) + +- publish chart independently, incremented both chart and image versions to trigger build of both ([#1964](https://github.com/kubeflow/spark-operator/pull/1964) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk)) +- Update helm chart README ([#1958](https://github.com/kubeflow/spark-operator/pull/1958) by [@ChenYi015](https://github.com/ChenYi015)) +- fix: add containerPort declaration for webhook in helm chart ([#1961](https://github.com/kubeflow/spark-operator/pull/1961) by [@zevisert](https://github.com/zevisert)) +- added id for a build job to fix digests artifact creation ([#1963](https://github.com/kubeflow/spark-operator/pull/1963) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk)) +- support multiple namespaces ([#1955](https://github.com/kubeflow/spark-operator/pull/1955) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk)) +- chore: replace GoogleCloudPlatform/spark-on-k8s-operator with kubeflow/spark-operator ([#1937](https://github.com/kubeflow/spark-operator/pull/1937) by [@zevisert](https://github.com/zevisert)) +- Chart: add patch permissions for spark operator SA to support spark 3.5.0 ([#1884](https://github.com/kubeflow/spark-operator/pull/1884) by [@Aakcht](https://github.com/Aakcht)) +- Cleanup after golang upgrade ([#1956](https://github.com/kubeflow/spark-operator/pull/1956) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk)) +- feat: add support for custom service labels ([#1952](https://github.com/kubeflow/spark-operator/pull/1952) by [@Cian911](https://github.com/Cian911)) +- upgraded golang and dependencies ([#1954](https://github.com/kubeflow/spark-operator/pull/1954) by [@AndrewChubatiuk](https://github.com/AndrewChubatiuk)) +- README for installing operator using kustomize with custom namespace and service name ([#1778](https://github.com/kubeflow/spark-operator/pull/1778) by [@shahsiddharth08](https://github.com/shahsiddharth08)) +- BUGFIX: Added cancel method to fix context leak ([#1917](https://github.com/kubeflow/spark-operator/pull/1917) by [@fazledyn-or](https://github.com/fazledyn-or)) +- remove unmatched quotes from user-guide.md ([#1584](https://github.com/kubeflow/spark-operator/pull/1584) by [@taeyeopkim1](https://github.com/taeyeopkim1)) +- Add PVC permission to Operator role ([#1889](https://github.com/kubeflow/spark-operator/pull/1889) by [@wyangsun](https://github.com/wyangsun)) +- Allow to set webhook job resource limits (#1429,#1300) ([#1946](https://github.com/kubeflow/spark-operator/pull/1946) by [@karbyshevds](https://github.com/karbyshevds)) +- Create OWNERS ([#1927](https://github.com/kubeflow/spark-operator/pull/1927) by [@zijianjoy](https://github.com/zijianjoy)) +- fix: fix issue #1723 about spark-operator not working with volcano on OCP ([#1724](https://github.com/kubeflow/spark-operator/pull/1724) by [@disaster37](https://github.com/disaster37)) +- Add Rokt to who-is-using.md ([#1867](https://github.com/kubeflow/spark-operator/pull/1867) by [@jacobsalway](https://github.com/jacobsalway)) +- Handle invalid API resources in discovery ([#1758](https://github.com/kubeflow/spark-operator/pull/1758) by [@wiltonsr](https://github.com/wiltonsr)) +- Fix docs for Volcano integration ([#1719](https://github.com/kubeflow/spark-operator/pull/1719) by [@VVKot](https://github.com/VVKot)) +- Added qualytics to who is using ([#1736](https://github.com/kubeflow/spark-operator/pull/1736) by [@josecsotomorales](https://github.com/josecsotomorales)) +- Allowing optional annotation on rbac ([#1770](https://github.com/kubeflow/spark-operator/pull/1770) by [@cxfcxf](https://github.com/cxfcxf)) +- Support `seccompProfile` in Spark application CRD and fix pre-commit jobs ([#1768](https://github.com/kubeflow/spark-operator/pull/1768) by [@ordukhanian](https://github.com/ordukhanian)) +- Updating webhook docs to also mention eks ([#1763](https://github.com/kubeflow/spark-operator/pull/1763) by [@JunaidChaudry](https://github.com/JunaidChaudry)) +- Link to helm docs fixed ([#1783](https://github.com/kubeflow/spark-operator/pull/1783) by [@haron](https://github.com/haron)) +- Improve getMasterURL() to add [] to IPv6 if needed ([#1825](https://github.com/kubeflow/spark-operator/pull/1825) by [@LittleWat](https://github.com/LittleWat)) +- Add envFrom to operator deployment ([#1785](https://github.com/kubeflow/spark-operator/pull/1785) by [@matschaffer-roblox](https://github.com/matschaffer-roblox)) +- Expand ingress docs a bit ([#1806](https://github.com/kubeflow/spark-operator/pull/1806) by [@matschaffer-roblox](https://github.com/matschaffer-roblox)) +- Optional sidecars for operator pod ([#1754](https://github.com/kubeflow/spark-operator/pull/1754) by [@qq157755587](https://github.com/qq157755587)) +- Add Roblox to who-is ([#1784](https://github.com/kubeflow/spark-operator/pull/1784) by [@matschaffer-roblox](https://github.com/matschaffer-roblox)) +- Molex started using spark K8 operator. ([#1714](https://github.com/kubeflow/spark-operator/pull/1714) by [@AshishPushpSingh](https://github.com/AshishPushpSingh)) +- Extra helm chart labels ([#1669](https://github.com/kubeflow/spark-operator/pull/1669) by [@kvanzuijlen](https://github.com/kvanzuijlen)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.27...spark-operator-chart-1.2.4) + +## [spark-operator-chart-1.1.27](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.27) (2023-03-17) + +- Added permissions for leader election #1635 ([#1647](https://github.com/kubeflow/spark-operator/pull/1647) by [@ordukhanian](https://github.com/ordukhanian)) +- Fix #1393 : fix tolerations block in wrong segment for webhook jobs ([#1633](https://github.com/kubeflow/spark-operator/pull/1633) by [@zhiminglim](https://github.com/zhiminglim)) +- add dependabot ([#1629](https://github.com/kubeflow/spark-operator/pull/1629) by [@monotek](https://github.com/monotek)) +- Add support for `ephemeral.volumeClaimTemplate` in helm chart CRDs ([#1661](https://github.com/kubeflow/spark-operator/pull/1661) by [@ArshiAAkhavan](https://github.com/ArshiAAkhavan)) +- Add Kognita to "Who is using" ([#1637](https://github.com/kubeflow/spark-operator/pull/1637) by [@claudino-kognita](https://github.com/claudino-kognita)) +- add lifecycle to executor ([#1674](https://github.com/kubeflow/spark-operator/pull/1674) by [@tiechengsu](https://github.com/tiechengsu)) +- Fix signal handling for non-leader processes ([#1680](https://github.com/kubeflow/spark-operator/pull/1680) by [@antonipp](https://github.com/antonipp)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.26...spark-operator-chart-1.1.27) + +## [spark-operator-chart-1.1.26](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.26) (2022-10-25) + +- update go to 1.19 + k8s.io libs to v0.25.3 ([#1630](https://github.com/kubeflow/spark-operator/pull/1630) by [@ImpSy](https://github.com/ImpSy)) +- Update README - secrets and sidecars need mutating webhooks ([#1550](https://github.com/kubeflow/spark-operator/pull/1550) by [@djdillon](https://github.com/djdillon)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.25...spark-operator-chart-1.1.26) + +## [spark-operator-chart-1.1.25](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.25) (2022-06-08) + +- Webhook init and cleanup should respect nodeSelector ([#1545](https://github.com/kubeflow/spark-operator/pull/1545) by [@erikcw](https://github.com/erikcw)) +- rename unit tests to integration tests in Makefile#integration-test ([#1539](https://github.com/kubeflow/spark-operator/pull/1539) by [@dcoliversun](https://github.com/dcoliversun)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.24...spark-operator-chart-1.1.25) + +## [spark-operator-chart-1.1.24](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.24) (2022-06-01) + +- Fix: use V1 api for CRDs for volcano integration ([#1540](https://github.com/kubeflow/spark-operator/pull/1540) by [@Aakcht](https://github.com/Aakcht)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.23...spark-operator-chart-1.1.24) + +## [spark-operator-chart-1.1.23](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.23) (2022-05-18) + +- fix: add pre-upgrade hook to rbac resources ([#1511](https://github.com/kubeflow/spark-operator/pull/1511) by [@cwyl02](https://github.com/cwyl02)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.22...spark-operator-chart-1.1.23) + +## [spark-operator-chart-1.1.22](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.22) (2022-05-16) + +- Fixes issue #1467 (issue when deleting SparkApplication without metrics server) ([#1530](https://github.com/kubeflow/spark-operator/pull/1530) by [@aneagoe](https://github.com/aneagoe)) +- Implement --logs and --delete flags on 'sparkctl create' and a timeout on 'sparkctl log' to wait a pod startup ([#1506](https://github.com/kubeflow/spark-operator/pull/1506) by [@alaurentinoofficial](https://github.com/alaurentinoofficial)) +- Fix Spark UI URL in app status ([#1518](https://github.com/kubeflow/spark-operator/pull/1518) by [@gtopper](https://github.com/gtopper)) +- remove quotes from yaml file ([#1524](https://github.com/kubeflow/spark-operator/pull/1524) by [@zencircle](https://github.com/zencircle)) +- Added missing manifest yaml, point the manifest to the right direction ([#1504](https://github.com/kubeflow/spark-operator/pull/1504) by [@RonZhang724](https://github.com/RonZhang724)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.21...spark-operator-chart-1.1.22) + +## [spark-operator-chart-1.1.21](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.21) (2022-05-12) + +- Ensure that driver is deleted prior to sparkapplication resubmission ([#1521](https://github.com/kubeflow/spark-operator/pull/1521) by [@khorshuheng](https://github.com/khorshuheng)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.20...spark-operator-chart-1.1.21) + +## [spark-operator-chart-1.1.20](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.20) (2022-04-11) + +- Add ingress-class-name controller flag ([#1482](https://github.com/kubeflow/spark-operator/pull/1482) by [@voyvodov](https://github.com/voyvodov)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.19...spark-operator-chart-1.1.20) + +## [spark-operator-chart-1.1.19](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.19) (2022-02-14) + +- Add Operator volumes and volumeMounts in chart ([#1475](https://github.com/kubeflow/spark-operator/pull/1475) by [@ocworld](https://github.com/ocworld)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.18...spark-operator-chart-1.1.19) + +## [spark-operator-chart-1.1.18](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.18) (2022-02-13) + +- Updated default registry to ghcr.io ([#1454](https://github.com/kubeflow/spark-operator/pull/1454) by [@aneagoe](https://github.com/aneagoe)) +- Github actions workflow fix for Helm chart deployment ([#1456](https://github.com/kubeflow/spark-operator/pull/1456) by [@vara-bonthu](https://github.com/vara-bonthu)) +- Kubernetes v1.22 extensions/v1beta1 API removal ([#1427](https://github.com/kubeflow/spark-operator/pull/1427) by [@aneagoe](https://github.com/aneagoe)) +- Fixes an issue with github action in job build-spark-operator ([#1452](https://github.com/kubeflow/spark-operator/pull/1452) by [@aneagoe](https://github.com/aneagoe)) +- use github container registry instead of gcr.io for releases ([#1422](https://github.com/kubeflow/spark-operator/pull/1422) by [@TomHellier](https://github.com/TomHellier)) +- Fixes an error that was preventing the pods from being mutated ([#1421](https://github.com/kubeflow/spark-operator/pull/1421) by [@ssullivan](https://github.com/ssullivan)) +- Make github actions more feature complete ([#1418](https://github.com/kubeflow/spark-operator/pull/1418) by [@TomHellier](https://github.com/TomHellier)) +- Resolves an error when deploying the webhook where the k8s api indica… ([#1413](https://github.com/kubeflow/spark-operator/pull/1413) by [@ssullivan](https://github.com/ssullivan)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.15...spark-operator-chart-1.1.18) + +## [spark-operator-chart-1.1.15](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.15) (2021-12-02) + +- Add docker build to github action ([#1415](https://github.com/kubeflow/spark-operator/pull/1415) by [@TomHellier](https://github.com/TomHellier)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.14...spark-operator-chart-1.1.15) + +## [spark-operator-chart-1.1.14](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.14) (2021-11-30) + +- Updating API version of admissionregistration.k8s.io ([#1401](https://github.com/kubeflow/spark-operator/pull/1401) by [@sairamankumar2](https://github.com/sairamankumar2)) +- Add C2FO to who is using ([#1391](https://github.com/kubeflow/spark-operator/pull/1391) by [@vanhoale](https://github.com/vanhoale)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.13...spark-operator-chart-1.1.14) + +## [spark-operator-chart-1.1.13](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.13) (2021-11-18) + +- delete-service-accounts-and-roles-before-creation ([#1384](https://github.com/kubeflow/spark-operator/pull/1384) by [@TiansuYu](https://github.com/TiansuYu)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.12...spark-operator-chart-1.1.13) + +## [spark-operator-chart-1.1.12](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.12) (2021-11-14) + +- webhook timeout variable ([#1387](https://github.com/kubeflow/spark-operator/pull/1387) by [@sairamankumar2](https://github.com/sairamankumar2)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.11...spark-operator-chart-1.1.12) + +## [spark-operator-chart-1.1.11](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.11) (2021-11-12) + +- [FIX] add service account access to persistentvolumeclaims ([#1390](https://github.com/kubeflow/spark-operator/pull/1390) by [@mschroering](https://github.com/mschroering)) +- Add DeepCure to who is using ([#1389](https://github.com/kubeflow/spark-operator/pull/1389) by [@mschroering](https://github.com/mschroering)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.10...spark-operator-chart-1.1.11) + +## [spark-operator-chart-1.1.10](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.10) (2021-11-09) + +- Add custom toleration support for webhook jobs ([#1383](https://github.com/kubeflow/spark-operator/pull/1383) by [@korjek](https://github.com/korjek)) +- fix container name in addsecuritycontext patch ([#1377](https://github.com/kubeflow/spark-operator/pull/1377) by [@lybavsky](https://github.com/lybavsky)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.9...spark-operator-chart-1.1.10) + +## [spark-operator-chart-1.1.9](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.9) (2021-11-01) + +- `Role` and `RoleBinding` not installed for `webhook-init` in Helm `pre-hook` ([#1379](https://github.com/kubeflow/spark-operator/pull/1379) by [@zzvara](https://github.com/zzvara)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.8...spark-operator-chart-1.1.9) + +## [spark-operator-chart-1.1.8](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.8) (2021-10-26) + +- Regenerate deleted cert after upgrade ([#1373](https://github.com/kubeflow/spark-operator/pull/1373) by [@simplylizz](https://github.com/simplylizz)) +- Make manifests usable by Kustomize ([#1367](https://github.com/kubeflow/spark-operator/pull/1367) by [@karpoftea](https://github.com/karpoftea)) +- #1329 update the operator to allow subpaths to be used with the spark ui ingress. ([#1330](https://github.com/kubeflow/spark-operator/pull/1330) by [@TomHellier](https://github.com/TomHellier)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.7...spark-operator-chart-1.1.8) + +## [spark-operator-chart-1.1.7](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.7) (2021-10-21) + +- serviceAccount annotations ([#1350](https://github.com/kubeflow/spark-operator/pull/1350) by [@moskitone](https://github.com/moskitone)) +- Update Dockerfile ([#1369](https://github.com/kubeflow/spark-operator/pull/1369) by [@Sadagopan88](https://github.com/Sadagopan88)) +- [FIX] tolerations are not directly present in Driver(/Executor)Spec ([#1365](https://github.com/kubeflow/spark-operator/pull/1365) by [@s-pedamallu](https://github.com/s-pedamallu)) +- fix running metrics for application deletion ([#1358](https://github.com/kubeflow/spark-operator/pull/1358) by [@Aakcht](https://github.com/Aakcht)) +- Update who-is-using.md ([#1338](https://github.com/kubeflow/spark-operator/pull/1338) by [@Juandavi1](https://github.com/Juandavi1)) +- Update who-is-using.md ([#1082](https://github.com/kubeflow/spark-operator/pull/1082) by [@Juandavi1](https://github.com/Juandavi1)) +- Add support for executor service account ([#1322](https://github.com/kubeflow/spark-operator/pull/1322) by [@bbenzikry](https://github.com/bbenzikry)) +- fix NPE introduce on #1280 ([#1325](https://github.com/kubeflow/spark-operator/pull/1325) by [@ImpSy](https://github.com/ImpSy)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.6...spark-operator-chart-1.1.7) + +## [spark-operator-chart-1.1.6](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.6) (2021-08-04) + +- Add hook deletion policy for spark-operator service account ([#1313](https://github.com/kubeflow/spark-operator/pull/1313) by [@pdrastil](https://github.com/pdrastil)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.5...spark-operator-chart-1.1.6) + +## [spark-operator-chart-1.1.5](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.5) (2021-07-28) + +- Add user defined pod labels ([#1288](https://github.com/kubeflow/spark-operator/pull/1288) by [@pdrastil](https://github.com/pdrastil)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.4...spark-operator-chart-1.1.5) + +## [spark-operator-chart-1.1.4](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.4) (2021-07-25) + +- Migrate CRDs from v1beta1 to v1. Add additionalPrinterColumns ([#1298](https://github.com/kubeflow/spark-operator/pull/1298) by [@drazul](https://github.com/drazul)) +- Explain "signal: kill" errors during submission ([#1292](https://github.com/kubeflow/spark-operator/pull/1292) by [@zzvara](https://github.com/zzvara)) +- fix the invalid repo address ([#1291](https://github.com/kubeflow/spark-operator/pull/1291) by [@william-wang](https://github.com/william-wang)) +- add failure context to recordExecutorEvent ([#1280](https://github.com/kubeflow/spark-operator/pull/1280) by [@ImpSy](https://github.com/ImpSy)) +- Update pythonVersion to fix example ([#1284](https://github.com/kubeflow/spark-operator/pull/1284) by [@stratus](https://github.com/stratus)) +- add crds drift check between chart/ and manifest/ ([#1272](https://github.com/kubeflow/spark-operator/pull/1272) by [@ImpSy](https://github.com/ImpSy)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.3...spark-operator-chart-1.1.4) + +## [spark-operator-chart-1.1.3](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.3) (2021-05-25) + +- Allow user to specify service annotation on Spark UI service ([#1264](https://github.com/kubeflow/spark-operator/pull/1264) by [@khorshuheng](https://github.com/khorshuheng)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.2...spark-operator-chart-1.1.3) + +## [spark-operator-chart-1.1.2](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.2) (2021-05-25) + +- implement shareProcessNamespace in SparkPodSpec ([#1262](https://github.com/kubeflow/spark-operator/pull/1262) by [@ImpSy](https://github.com/ImpSy)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.1...spark-operator-chart-1.1.2) + +## [spark-operator-chart-1.1.1](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.1) (2021-05-19) + +- Enable UI service flag for disabling UI service ([#1261](https://github.com/kubeflow/spark-operator/pull/1261) by [@sairamankumar2](https://github.com/sairamankumar2)) +- Add DiDi to who-is-using.md ([#1255](https://github.com/kubeflow/spark-operator/pull/1255) by [@Run-Lin](https://github.com/Run-Lin)) +- doc: update who is using page ([#1251](https://github.com/kubeflow/spark-operator/pull/1251) by [@luizm](https://github.com/luizm)) +- Add Tongdun under who-is-using ([#1249](https://github.com/kubeflow/spark-operator/pull/1249) by [@lomoJG](https://github.com/lomoJG)) +- [#1239] Custom service port name for spark application UI ([#1240](https://github.com/kubeflow/spark-operator/pull/1240) by [@marcozov](https://github.com/marcozov)) +- fix: do not remove preemptionPolicy in patcher when not present ([#1246](https://github.com/kubeflow/spark-operator/pull/1246) by [@HHK1](https://github.com/HHK1)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.1.0...spark-operator-chart-1.1.1) + +## [spark-operator-chart-1.1.0](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.1.0) (2021-04-28) + +- Updating Spark version from 3.0 to 3.1.1 ([#1153](https://github.com/kubeflow/spark-operator/pull/1153) by [@chethanuk](https://github.com/chethanuk)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.10...spark-operator-chart-1.1.0) + +## [spark-operator-chart-1.0.10](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.10) (2021-04-28) + +- Add support for blue/green deployments ([#1230](https://github.com/kubeflow/spark-operator/pull/1230) by [@flupke](https://github.com/flupke)) +- Update who-is-using.md: Fossil is using Spark Operator for Production ([#1244](https://github.com/kubeflow/spark-operator/pull/1244) by [@duyet](https://github.com/duyet)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.9...spark-operator-chart-1.0.10) + +## [spark-operator-chart-1.0.9](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.9) (2021-04-23) + +- Link to Kubernetes Slack ([#1234](https://github.com/kubeflow/spark-operator/pull/1234) by [@jsoref](https://github.com/jsoref)) +- fix: remove preemptionPolicy when priority class name is used ([#1236](https://github.com/kubeflow/spark-operator/pull/1236) by [@HHK1](https://github.com/HHK1)) +- Spelling ([#1231](https://github.com/kubeflow/spark-operator/pull/1231) by [@jsoref](https://github.com/jsoref)) +- Add support to expose custom ports ([#1205](https://github.com/kubeflow/spark-operator/pull/1205) by [@luizm](https://github.com/luizm)) +- Fix the error of hostAliases when there are more than 2 hostnames ([#1209](https://github.com/kubeflow/spark-operator/pull/1209) by [@cdmikechen](https://github.com/cdmikechen)) +- remove multiple prefixes for 'p' ([#1210](https://github.com/kubeflow/spark-operator/pull/1210) by [@chaudhryfaisal](https://github.com/chaudhryfaisal)) +- added --s3-force-path-style to force path style URLs for S3 objects ([#1206](https://github.com/kubeflow/spark-operator/pull/1206) by [@chaudhryfaisal](https://github.com/chaudhryfaisal)) +- Allow custom bucket path ([#1207](https://github.com/kubeflow/spark-operator/pull/1207) by [@bribroder](https://github.com/bribroder)) +- fix: Remove priority from the spec when using priority class ([#1203](https://github.com/kubeflow/spark-operator/pull/1203) by [@HHK1](https://github.com/HHK1)) +- Fix go get issue with "unknown revision v0.0.0" ([#1198](https://github.com/kubeflow/spark-operator/pull/1198) by [@hongshaoyang](https://github.com/hongshaoyang)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.8...spark-operator-chart-1.0.9) + +## [spark-operator-chart-1.0.8](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.8) (2021-03-07) + +- Helm: Put service account into pre-install hook. ([#1155](https://github.com/kubeflow/spark-operator/pull/1155) by [@tandrup](https://github.com/tandrup)) +- correct hook annotation for webhook job ([#1193](https://github.com/kubeflow/spark-operator/pull/1193) by [@chaudhryfaisal](https://github.com/chaudhryfaisal)) +- Update who-is-using.md ([#1174](https://github.com/kubeflow/spark-operator/pull/1174) by [@tarek-izemrane](https://github.com/tarek-izemrane)) +- add Carrefour as adopter and contributor ([#1156](https://github.com/kubeflow/spark-operator/pull/1156) by [@AliGouta](https://github.com/AliGouta)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.7...spark-operator-chart-1.0.8) + +## [spark-operator-chart-1.0.7](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.7) (2021-02-05) + +- fix issue #1131 ([#1142](https://github.com/kubeflow/spark-operator/pull/1142) by [@kz33](https://github.com/kz33)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.6...spark-operator-chart-1.0.7) + +## [spark-operator-chart-1.0.6](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.6) (2021-02-04) + +- Add Fossil to who-is-using.md ([#1152](https://github.com/kubeflow/spark-operator/pull/1152) by [@duyet](https://github.com/duyet)) +- #1143 Helm issues while deploying using argocd ([#1145](https://github.com/kubeflow/spark-operator/pull/1145) by [@TomHellier](https://github.com/TomHellier)) +- Include Gojek in who-is-using.md ([#1146](https://github.com/kubeflow/spark-operator/pull/1146) by [@pradithya](https://github.com/pradithya)) +- add hostAliases for SparkPodSpec ([#1133](https://github.com/kubeflow/spark-operator/pull/1133) by [@ImpSy](https://github.com/ImpSy)) +- Adding MavenCode ([#1128](https://github.com/kubeflow/spark-operator/pull/1128) by [@charlesa101](https://github.com/charlesa101)) +- Add MongoDB to who-is-using.md ([#1123](https://github.com/kubeflow/spark-operator/pull/1123) by [@chickenPopcorn](https://github.com/chickenPopcorn)) +- update go version to 1.15 and k8s deps to v0.19.6 ([#1119](https://github.com/kubeflow/spark-operator/pull/1119) by [@stpabhi](https://github.com/stpabhi)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.5...spark-operator-chart-1.0.6) + +## [spark-operator-chart-1.0.5](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.5) (2020-12-15) + +- Add prometheus containr port name ([#1099](https://github.com/kubeflow/spark-operator/pull/1099) by [@nicholas-fwang](https://github.com/nicholas-fwang)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.4...spark-operator-chart-1.0.5) + +## [spark-operator-chart-1.0.4](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.4) (2020-12-12) + +- Upgrade the Chart version to 1.0.4 ([#1113](https://github.com/kubeflow/spark-operator/pull/1113) by [@ordukhanian](https://github.com/ordukhanian)) +- Support Prometheus PodMonitor Deployment (#1106) ([#1112](https://github.com/kubeflow/spark-operator/pull/1112) by [@ordukhanian](https://github.com/ordukhanian)) +- update executor status if pod is lost while app is still running ([#1111](https://github.com/kubeflow/spark-operator/pull/1111) by [@ImpSy](https://github.com/ImpSy)) +- Add scheduler func for clearing batch scheduling on completed ([#1079](https://github.com/kubeflow/spark-operator/pull/1079) by [@nicholas-fwang](https://github.com/nicholas-fwang)) +- Add configuration for SparkUI service type ([#1100](https://github.com/kubeflow/spark-operator/pull/1100) by [@jutley](https://github.com/jutley)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.3...spark-operator-chart-1.0.4) + +## [spark-operator-chart-1.0.3](https://github.com/kubeflow/spark-operator/tree/spark-operator-chart-1.0.3) (2020-12-07) + +- Update docs with new helm instructions ([#1105](https://github.com/kubeflow/spark-operator/pull/1105) by [@hagaibarel](https://github.com/hagaibarel)) + +[Full Changelog](https://github.com/kubeflow/spark-operator/compare/spark-operator-chart-1.0.2...spark-operator-chart-1.0.3) diff --git a/hack/generate-changelog.py b/hack/generate-changelog.py new file mode 100644 index 0000000000..bf5810b0a2 --- /dev/null +++ b/hack/generate-changelog.py @@ -0,0 +1,72 @@ +import argparse + +from github import Github + +REPO_NAME = "kubeflow/spark-operator" +CHANGELOG_FILE = "CHANGELOG.md" + +parser = argparse.ArgumentParser() +parser.add_argument("--token", type=str, help="GitHub Access Token") +parser.add_argument( + "--range", type=str, help="Changelog is generated for this release range" +) +args = parser.parse_args() + +if args.token is None: + raise Exception("GitHub Token must be set") +try: + previous_release = args.range.split("..")[0] + current_release = args.range.split("..")[1] +except Exception: + raise Exception("Release range must be set in this format: v1.7.0..v1.8.0") + +# Get list of commits from the range. +github_repo = Github(args.token).get_repo(REPO_NAME) +comparison = github_repo.compare(previous_release, current_release) +commits = comparison.commits + +# The latest commit contains the release date. +release_date = str(commits[-1].commit.author.date).split(" ")[0] +release_url = "https://github.com/{}/tree/{}".format(REPO_NAME, current_release) + +# Get all PRs in reverse chronological order from the commits. +pr_list = "" +pr_set = set() +for commit in commits.reversed: + # Only add commits with PRs. + for pr in commit.get_pulls(): + # Each PR is added only one time to the list. + if pr.number in pr_set: + continue + if not pr.merged: + continue + pr_set.add(pr.number) + + new_pr = "- {title} ([#{id}]({pr_link}) by [@{user_id}]({user_url}))\n".format( + title=pr.title, + id=pr.number, + pr_link=pr.html_url, + user_id=pr.user.login, + user_url=pr.user.html_url, + ) + pr_list += new_pr + +change_log = [ + "\n", + "## [{}]({}) ({})\n".format(current_release, release_url, release_date), + "\n", + pr_list, + "\n", + "[Full Changelog]({})\n".format(comparison.html_url), +] + +# Update Changelog with the new changes. +with open(CHANGELOG_FILE, "r+") as f: + lines = f.readlines() + f.seek(0) + lines = lines[:1] + change_log + lines[1:] + f.writelines(lines) + +print("Changelog has been updated\n") +print("Group PRs in the Changelog into Features, Bug fixes, Misc, etc.\n") +print("After that, submit a PR with the updated Changelog") From b27717d95d6614d902df3a657b23e3282b2cdaa4 Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Wed, 24 Jul 2024 23:38:53 +0800 Subject: [PATCH 79/87] Add @ChenYi015 to approvers (#2096) Signed-off-by: Yi Chen --- OWNERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/OWNERS b/OWNERS index 17604590b2..0f5f887f2e 100644 --- a/OWNERS +++ b/OWNERS @@ -1,7 +1,6 @@ approvers: - andreyvelich + - ChenYi015 - mwielgus - yuchaoran2011 - vara-bonthu -reviewers: - - ChenYi015 From 461ddc906e91a2d4050bc37579c997879547558a Mon Sep 17 00:00:00 2001 From: Andrey Velichkevich Date: Wed, 24 Jul 2024 16:39:53 +0100 Subject: [PATCH 80/87] Update Stale bot settings (#2095) Signed-off-by: Andrey Velichkevich --- .github/workflows/stale.yaml | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 61d10d9748..7a0ea9e674 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -1,8 +1,8 @@ -name: Close stale issues and PRs +name: Mark stale issues and pull requests on: schedule: - - cron: "0 1 * * *" + - cron: "0 */5 * * *" jobs: stale: @@ -15,21 +15,24 @@ jobs: steps: - uses: actions/stale@v9 with: - days-before-issue-stale: 60 - days-before-issue-close: 30 - days-before-pr-stale: 60 - days-before-pr-close: 30 + repo-token: ${{ secrets.GITHUB_TOKEN }} + days-before-stale: 90 + days-before-close: 20 stale-issue-message: > - This issue has been automatically marked as stale because it has been open 60 days with no activity. - Remove stale label or comment or this will be closed in 30 days. - Thank you for your contributions. + This issue has been automatically marked as stale because it has not had + recent activity. It will be closed if no further activity occurs. Thank you + for your contributions. close-issue-message: > - This issue has been automatically closed because it has been stalled for 30 days with no activity. - Please comment "/reopen" to reopen it. + This issue has been automatically closed because it has not had recent + activity. Please comment "/reopen" to reopen it. + stale-issue-label: lifecycle/stale + exempt-issue-labels: lifecycle/frozen stale-pr-message: > - This pull request has been automatically marked as stale because it has been open 60 days with no activity. - Remove stale label or comment or this will be closed in 30 days. - Thank you for your contributions. + This pull request has been automatically marked as stale because it has not had + recent activity. It will be closed if no further activity occurs. Thank you + for your contributions. close-pr-message: > - This pull request has been automatically closed because it has been stalled for 30 days with no activity. - Please comment "/reopen" to reopen it. + This pull request has been automatically closed because it has not had recent + activity. Please comment "/reopen" to reopen it. + stale-pr-label: lifecycle/stale + exempt-pr-labels: lifecycle/frozen From 51e488695301f6b3a95ec9f0fd80bd8845279d8c Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Wed, 24 Jul 2024 23:40:53 +0800 Subject: [PATCH 81/87] Add Alibaba Cloud to adopters (#2097) Signed-off-by: Yi Chen --- ADOPTERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/ADOPTERS.md b/ADOPTERS.md index bf7df2a030..99d55f3c7c 100644 --- a/ADOPTERS.md +++ b/ADOPTERS.md @@ -4,6 +4,7 @@ Below are the adopters of project Spark Operator. If you are using Spark Operato | Organization | Contact (GitHub User Name) | Environment | Description of Use | | ------------- | ------------- | ------------- | ------------- | +| [Alibaba Cloud](https://www.alibabacloud.com) | [@ChenYi015](https://github.com/ChenYi015) | Production | AI & Data Infrastructure | | [Beeline](https://beeline.ru) | @spestua | Evaluation | ML & Data Infrastructure | | Bringg | @EladDolev | Production | ML & Analytics Data Platform | | [Caicloud](https://intl.caicloud.io/) | @gaocegege | Production | Cloud-Native AI Platform | From 4108f5493706f463cc2ceb823b66b5e6ac8fb1ca Mon Sep 17 00:00:00 2001 From: jbhalodia-slack Date: Fri, 26 Jul 2024 04:39:55 -0400 Subject: [PATCH 82/87] Add topologySpreadConstraints (#2091) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update README and documentation (#2047) * Update docs Signed-off-by: Yi Chen * Remove docs and update README Signed-off-by: Yi Chen * Add link to monthly community meeting Signed-off-by: Yi Chen --------- Signed-off-by: Yi Chen Signed-off-by: jbhalodia-slack * Add PodDisruptionBudget to chart (#2078) * Add PodDisruptionBudget to chart Signed-off-by: Carlos Sánchez Páez Signed-off-by: Carlos Sánchez Páez Signed-off-by: Carlos Sánchez Páez * PR comments Signed-off-by: Carlos Sánchez Páez --------- Signed-off-by: Carlos Sánchez Páez Signed-off-by: Carlos Sánchez Páez Signed-off-by: jbhalodia-slack * Set topologySpreadConstraints Signed-off-by: jbhalodia-slack * Update README and increase patch version Signed-off-by: jbhalodia-slack * Revert replicaCount change Signed-off-by: jbhalodia-slack * Update README after master merger Signed-off-by: jbhalodia-slack * Update README Signed-off-by: jbhalodia-slack --------- Signed-off-by: Yi Chen Signed-off-by: jbhalodia-slack Signed-off-by: Carlos Sánchez Páez Signed-off-by: Carlos Sánchez Páez Co-authored-by: Yi Chen Co-authored-by: Carlos Sánchez Páez --- charts/spark-operator-chart/Chart.yaml | 2 +- charts/spark-operator-chart/README.md | 3 +- .../templates/deployment.yaml | 10 ++++ .../tests/deployment_test.yaml | 51 +++++++++++++++++++ .../tests/poddisruptionbudget_test.yaml | 7 +-- charts/spark-operator-chart/values.yaml | 12 +++++ 6 files changed, 80 insertions(+), 5 deletions(-) diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 8e78d3f4ad..6068bba17f 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: spark-operator description: A Helm chart for Spark on Kubernetes operator -version: 1.4.5 +version: 1.4.6 appVersion: v1beta2-1.6.2-3.5.0 keywords: - spark diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index cab86da27c..5f9a6a2ad8 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,6 +1,6 @@ # spark-operator -![Version: 1.4.5](https://img.shields.io/badge/Version-1.4.5-informational?style=flat-square) ![AppVersion: v1beta2-1.6.2-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.2--3.5.0-informational?style=flat-square) +![Version: 1.4.6](https://img.shields.io/badge/Version-1.4.6-informational?style=flat-square) ![AppVersion: v1beta2-1.6.2-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.2--3.5.0-informational?style=flat-square) A Helm chart for Spark on Kubernetes operator @@ -132,6 +132,7 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum | sidecars | list | `[]` | Sidecar containers | | sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs | | tolerations | list | `[]` | List of node taints to tolerate | +| topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) Specify topologySpreadConstraints without the labelSelector field, the labelSelector field will be set to "spark-operator.selectorLabels" subtemplate in the deployment.yaml file. | | uiService.enable | bool | `true` | Enable UI service creation for Spark application | | volumeMounts | list | `[]` | | | volumes | list | `[]` | | diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml index cf12fb2e89..396f8ae019 100644 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ b/charts/spark-operator-chart/templates/deployment.yaml @@ -138,3 +138,13 @@ spec: tolerations: {{- toYaml . | nindent 8 }} {{- end }} + {{- if and .Values.topologySpreadConstraints (gt (int .Values.replicaCount) 1) }} + {{- $selectorLabels := include "spark-operator.selectorLabels" . | fromYaml -}} + {{- $labelSelectorDict := dict "labelSelector" ( dict "matchLabels" $selectorLabels ) }} + topologySpreadConstraints: + {{- range .Values.topologySpreadConstraints }} + - {{ mergeOverwrite . $labelSelectorDict | toYaml | nindent 8 | trim }} + {{- end }} + {{ else if and .Values.topologySpreadConstraints (eq (int .Values.replicaCount) 1) }} + {{ fail "replicaCount must be greater than 1 to enable topologySpreadConstraints."}} + {{- end }} diff --git a/charts/spark-operator-chart/tests/deployment_test.yaml b/charts/spark-operator-chart/tests/deployment_test.yaml index 5debda1932..055d3b25fa 100644 --- a/charts/spark-operator-chart/tests/deployment_test.yaml +++ b/charts/spark-operator-chart/tests/deployment_test.yaml @@ -299,3 +299,54 @@ tests: - key: key2 operator: Exists effect: NoSchedule + + - it: Should not contain topologySpreadConstraints if topologySpreadConstraints is not set + set: + topologySpreadConstraints: [] + asserts: + - notExists: + path: spec.template.spec.topologySpreadConstraints + + - it: Should add topologySpreadConstraints if topologySpreadConstraints is set and replicaCount is greater than 1 + set: + replicaCount: 2 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + asserts: + - equal: + path: spec.template.spec.topologySpreadConstraints + value: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: spark-operator + app.kubernetes.io/name: spark-operator + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/instance: spark-operator + app.kubernetes.io/name: spark-operator + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + + - it: Should fail if topologySpreadConstraints is set and replicaCount is not greater than 1 + set: + replicaCount: 1 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + asserts: + - failedTemplate: + errorMessage: "replicaCount must be greater than 1 to enable topologySpreadConstraints." + \ No newline at end of file diff --git a/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml b/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml index 3f702fd105..56b9e4fe3d 100644 --- a/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml +++ b/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml @@ -17,21 +17,22 @@ tests: - it: Should render spark operator podDisruptionBudget if podDisruptionBudget.enable is true set: + replicaCount: 2 podDisruptionBudget: enable: true - documentIndex: 0 asserts: - containsDocument: apiVersion: policy/v1 kind: PodDisruptionBudget - name: spark-operator-podDisruptionBudget + name: spark-operator-pdb - it: Should set minAvailable from values set: + replicaCount: 2 podDisruptionBudget: enable: true minAvailable: 3 asserts: - equal: - path: spec.template.minAvailable + path: spec.minAvailable value: 3 diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index 6eefe666bf..bcb3a100a1 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -143,6 +143,18 @@ podDisruptionBudget: # Require `replicaCount` to be greater than 1 minAvailable: 1 +# -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. +# Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) +# Specify topologySpreadConstraints without the labelSelector field, the labelSelector field will be set +# to "spark-operator.selectorLabels" subtemplate in the deployment.yaml file. +topologySpreadConstraints: [] +# - maxSkew: 1 +# topologyKey: topology.kubernetes.io/zone +# whenUnsatisfiable: ScheduleAnyway +# - maxSkew: 1 +# topologyKey: kubernetes.io/hostname +# whenUnsatisfiable: DoNotSchedule + # nodeSelector -- Node labels for pod assignment nodeSelector: {} From a3ec8f193fe426fc4a6449b85ff94ba89ddb4cbc Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Thu, 1 Aug 2024 20:26:06 +0800 Subject: [PATCH 83/87] Update workflow and docs for releasing Spark operator (#2089) * Update .helmignore Signed-off-by: Yi Chen * Add release docs Signed-off-by: Yi Chen * Update release workflow Signed-off-by: Yi Chen * Update integration test workflow Signed-off-by: Yi Chen * Add workflow for pushing tag when VERSION file changes Signed-off-by: Yi Chen * Update Signed-off-by: Yi Chen * Remove the leading 'v' from chart version Signed-off-by: Yi Chen * Update docker image tags Signed-off-by: Yi Chen --------- Signed-off-by: Yi Chen --- .../workflows/{main.yaml => integration.yaml} | 45 +++-- .github/workflows/push-tag.yaml | 44 +++++ .github/workflows/release-charts.yaml | 58 ++++++ .github/workflows/release-docker.yaml | 120 ++++++++++++ .github/workflows/release.yaml | 183 +++--------------- VERSION | 1 + charts/spark-operator-chart/.helmignore | 5 +- docs/release.md | 119 ++++++++++++ 8 files changed, 406 insertions(+), 169 deletions(-) rename .github/workflows/{main.yaml => integration.yaml} (82%) create mode 100644 .github/workflows/push-tag.yaml create mode 100644 .github/workflows/release-charts.yaml create mode 100644 .github/workflows/release-docker.yaml create mode 100644 VERSION create mode 100644 docs/release.md diff --git a/.github/workflows/main.yaml b/.github/workflows/integration.yaml similarity index 82% rename from .github/workflows/main.yaml rename to .github/workflows/integration.yaml index ba0ee2a657..be5200f9e2 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/integration.yaml @@ -1,13 +1,19 @@ -name: Pre-commit checks +name: Integration Test on: pull_request: branches: - master + - release-* push: branches: - master + - release-* + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.actor }} + cancel-in-progress: true jobs: build-api-docs: @@ -15,8 +21,6 @@ jobs: steps: - name: Checkout source code uses: actions/checkout@v4 - with: - fetch-depth: "0" - name: Set up Go uses: actions/setup-go@v5 @@ -37,17 +41,14 @@ jobs: steps: - name: Checkout source code uses: actions/checkout@v4 - with: - fetch-depth: "0" - name: Set up Go uses: actions/setup-go@v5 with: - go-version-file: "go.mod" + go-version-file: go.mod - name: build sparkctl - run: | - make build-sparkctl + run: make build-sparkctl build-spark-operator: runs-on: ubuntu-latest @@ -55,17 +56,17 @@ jobs: - name: Checkout source code uses: actions/checkout@v4 with: - fetch-depth: "0" + fetch-depth: 0 - name: Set up Go uses: actions/setup-go@v5 with: - go-version-file: "go.mod" + go-version-file: go.mod - name: Run go fmt check run: make go-fmt - - name: Run go vet + - name: Run go vet check run: make go-vet - name: Run unit tests @@ -92,10 +93,22 @@ jobs: build-helm-chart: runs-on: ubuntu-20.04 steps: + - name: Determine branch name + id: get_branch + run: | + BRANCH="" + if [ "${{ github.event_name }}" == "push" ]; then + BRANCH=${{ github.ref }} + elif [ "${{ github.event_name }}" == "pull_request" ]; then + BRANCH=${{ github.base_ref }} + fi + echo "Branch name: $BRANCH" + echo "BRANCH=$BRANCH" >> "$GITHUB_OUTPUT" + - name: Checkout source code uses: actions/checkout@v4 with: - fetch-depth: "0" + fetch-depth: 0 - name: Install Helm uses: azure/setup-helm@v4 @@ -117,14 +130,16 @@ jobs: run: ct version - name: Run chart-testing (lint) - run: ct lint + run: ct lint --check-version-increment=false - name: Run chart-testing (list-changed) id: list-changed + env: + BRANCH: ${{ steps.get_branch.outputs.BRANCH }} run: | - changed=$(ct list-changed) + changed=$(ct list-changed --target-branch $BRANCH) if [[ -n "$changed" ]]; then - echo "::set-output name=changed::true" + echo "changed=true" >> "$GITHUB_OUTPUT" fi - name: Detect CRDs drift between chart and manifest diff --git a/.github/workflows/push-tag.yaml b/.github/workflows/push-tag.yaml new file mode 100644 index 0000000000..f9329f080f --- /dev/null +++ b/.github/workflows/push-tag.yaml @@ -0,0 +1,44 @@ +name: Push Tag on VERSION change + +on: + push: + branches: + - master + - release-* + paths: + - VERSION + +jobs: + push_tag: + runs-on: ubuntu-latest + + steps: + - name: Checkout source code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Read version from VERSION file + run: | + VERSION=$(cat VERSION) + echo "VERSION=$VERSION" >> $GITHUB_ENV + + - name: Check if tag exists + run: | + git fetch --tags + if git tag -l | grep -q "^${VERSION}$"; then + echo "TAG_EXISTS=true" >> $GITHUB_ENV + else + echo "TAG_EXISTS=false" >> $GITHUB_ENV + fi + + - name: Create and push tag + if: env.TAG_EXISTS == 'false' + run: | + git tag -a "$VERSION" -m "Release $VERSION" + git push origin "$VERSION" diff --git a/.github/workflows/release-charts.yaml b/.github/workflows/release-charts.yaml new file mode 100644 index 0000000000..874696f091 --- /dev/null +++ b/.github/workflows/release-charts.yaml @@ -0,0 +1,58 @@ +name: Release Helm charts + +on: + release: + types: [published] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build: + permissions: + contents: write + runs-on: ubuntu-latest + steps: + - name: Checkout source code + uses: actions/checkout@v4 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Set up Helm + uses: azure/setup-helm@v4.2.0 + with: + version: v3.14.4 + + - name: Package Helm charts + run: | + for chart in $(ls charts); do + helm package charts/$chart + done + + - name: Save packaged charts to temp directory + run: | + mkdir -p /tmp/charts + cp *.tgz /tmp/charts + + - name: Checkout to branch gh-pages + uses: actions/checkout@v4 + with: + ref: gh-pages + fetch-depth: 0 + + - name: Copy packages charts + run: | + cp /tmp/charts/*.tgz . + + - name: Update Helm charts repo index + env: + CHART_URL: https://github.com/${{ github.repository }}/releases/download/${{ github.ref_name }} + run: | + helm repo index --merge index.yaml --url $CHART_URL . + git add index.yaml + git commit -s -m "Update index.yaml" || exit 0 + git push diff --git a/.github/workflows/release-docker.yaml b/.github/workflows/release-docker.yaml new file mode 100644 index 0000000000..849a0e1097 --- /dev/null +++ b/.github/workflows/release-docker.yaml @@ -0,0 +1,120 @@ +name: Release Docker images + +on: + push: + tags: + - v*.*.* + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + IMAGE_REGISTRY: docker.io + IMAGE_REPOSITORY: kubeflow/spark-operator + +# Ref: https://docs.docker.com/build/ci/github-actions/multi-platform/#distribute-build-across-multiple-runners. +jobs: + build: + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + platform: + - linux/amd64 + - linux/arm64 + + steps: + - name: Prepare + run: | + platform=${{ matrix.platform }} + echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV + + - name: Checkout source code + uses: actions/checkout@v4 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_REPOSITORY }} + tags: | + type=ref,event=branch + type=semver,pattern={{version}} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to container registry + uses: docker/login-action@v3 + with: + registry: ${{ env.IMAGE_REGISTRY }} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build and push by digest + id: build + uses: docker/build-push-action@v6 + with: + platforms: ${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} + outputs: type=image,name=${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_REPOSITORY }},push-by-digest=true,name-canonical=true,push=true + + - name: Export digest + run: | + mkdir -p /tmp/digests + digest="${{ steps.build.outputs.digest }}" + touch "/tmp/digests/${digest#sha256:}" + + - name: Upload digest + uses: actions/upload-artifact@v4 + with: + name: digests-${{ env.PLATFORM_PAIR }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + + merge: + runs-on: ubuntu-latest + needs: + - build + steps: + - name: Download digests + uses: actions/download-artifact@v4 + with: + path: /tmp/digests + pattern: digests-* + merge-multiple: true + + - name: Set up Docker buildx + uses: docker/setup-buildx-action@v3 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_REPOSITORY }} + tags: | + type=ref,event=branch + type=semver,pattern={{version}} + + - name: Login to container registry + uses: docker/login-action@v3 + with: + registry: ${{ env.IMAGE_REGISTRY }} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Create manifest list and push + working-directory: /tmp/digests + run: | + docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ + $(printf '${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_REPOSITORY }}@sha256:%s ' *) + + - name: Inspect image + run: | + docker buildx imagetools inspect ${{ env.IMAGE_REGISTRY }}/${{ env.IMAGE_REPOSITORY }}:${{ steps.meta.outputs.version }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 417ffb267b..ebd0e62a56 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,168 +1,47 @@ -name: Release Charts +name: Create draft release on: push: - branches: - - master -env: - REGISTRY_IMAGE: docker.io/kubeflow/spark-operator + tags: + - v*.*.* -jobs: - build-skip-check: - runs-on: ubuntu-latest - outputs: - image_changed: ${{ steps.skip-check.outputs.image_changed }} - chart_changed: ${{ steps.skip-check.outputs.chart_changed }} - app_version_tag: ${{ steps.skip-check.outputs.app_version_tag }} - chart_version_tag: ${{ steps.skip-check.outputs.chart_version_tag }} - steps: - - name: Checkout source code - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Check if build should be skipped - id: skip-check - run: | - app_version_tag=$(cat charts/spark-operator-chart/Chart.yaml | grep "appVersion: .*" | cut -c13-) - chart_version_tag=$(cat charts/spark-operator-chart/Chart.yaml | grep "version: .*" | cut -c10-) - - # Initialize flags - image_changed=false - chart_changed=false - - if ! git rev-parse -q --verify "refs/tags/$app_version_tag"; then - image_changed=true - git tag $app_version_tag - git push origin $app_version_tag - echo "Spark-Operator Docker Image new tag: $app_version_tag released" - fi - - if ! git rev-parse -q --verify "refs/tags/spark-operator-chart-$chart_version_tag"; then - chart_changed=true - git tag spark-operator-chart-$chart_version_tag - git push origin spark-operator-chart-$chart_version_tag - echo "Spark-Operator Helm Chart new tag: spark-operator-chart-$chart_version_tag released" - fi +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true - echo "image_changed=${image_changed}" >> "$GITHUB_OUTPUT" - echo "chart_changed=${chart_changed}" >> "$GITHUB_OUTPUT" - echo "app_version_tag=${app_version_tag}" >> "$GITHUB_OUTPUT" - echo "chart_version_tag=${chart_version_tag}" >> "$GITHUB_OUTPUT" +jobs: release: + permissions: + contents: write runs-on: ubuntu-latest - needs: - - build-skip-check - if: needs.build-skip-check.outputs.image_changed == 'true' - strategy: - fail-fast: false - matrix: - platform: - - linux/amd64 - - linux/arm64 steps: - name: Checkout uses: actions/checkout@v4 - with: - fetch-depth: 1 + - name: Configure Git run: | git config user.name "$GITHUB_ACTOR" git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - platform=${{ matrix.platform }} - echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV - echo "SCOPE=${platform//\//-}" >> $GITHUB_ENV - - name: Set up QEMU - timeout-minutes: 1 - uses: docker/setup-qemu-action@v3 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Install Helm - uses: azure/setup-helm@v4 - with: - version: v3.14.3 - - name: Login to Packages Container registry - uses: docker/login-action@v3 - with: - registry: docker.io - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and Push Spark-Operator Docker Image to Docker Hub - id: build - uses: docker/build-push-action@v5 - with: - context: . - platforms: ${{ matrix.platform }} - cache-to: type=gha,mode=max,scope=${{ env.SCOPE }} - cache-from: type=gha,scope=${{ env.SCOPE }} - push: true - outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true - - name: Export digest - run: | - mkdir -p /tmp/digests - digest="${{ steps.build.outputs.digest }}" - touch "/tmp/digests/${digest#sha256:}" - - name: Upload digest - uses: actions/upload-artifact@v4 - with: - name: digests-${{ env.PLATFORM_PAIR }} - path: /tmp/digests/* - if-no-files-found: error - retention-days: 1 - publish-image: - runs-on: ubuntu-latest - needs: - - release - - build-skip-check - if: needs.build-skip-check.outputs.image_changed == 'true' - steps: - - name: Download digests - uses: actions/download-artifact@v4 - with: - pattern: digests-* - path: /tmp/digests - merge-multiple: true - - name: Setup Docker Buildx - uses: docker/setup-buildx-action@v3 - - name: Docker meta - id: meta - uses: docker/metadata-action@v5 - with: - images: ${{ env.REGISTRY_IMAGE }} - tags: ${{ needs.build-skip-check.outputs.app_version_tag }} - - name: Login to Docker Hub - uses: docker/login-action@v3 - with: - registry: docker.io - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Create manifest list and push - working-directory: /tmp/digests - run: | - docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ - $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) - - name: Inspect image - run: | - docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} - publish-chart: - runs-on: ubuntu-latest - if: needs.build-skip-check.outputs.chart_changed == 'true' - needs: - - build-skip-check - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Install Helm - uses: azure/setup-helm@v4 + + - name: Set up Helm + uses: azure/setup-helm@v4.2.0 with: - version: v3.14.3 - - name: Configure Git + version: v3.14.4 + + - name: Package Helm charts run: | - git config user.name "$GITHUB_ACTOR" - git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - - name: Release Spark-Operator Helm Chart - uses: helm/chart-releaser-action@v1.6.0 - env: - CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - CR_RELEASE_NAME_TEMPLATE: "spark-operator-chart-{{ .Version }}" + for chart in $(ls charts); do + helm package charts/$chart + done + + - name: Release + id: release + uses: softprops/action-gh-release@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + draft: true + prerelease: ${{ contains(github.ref, 'rc') }} + target_commitish: ${{ github.sha }} + files: | + *.tgz + diff --git a/VERSION b/VERSION new file mode 100644 index 0000000000..17da53b58b --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +v1beta2-1.6.2-3.5.0 \ No newline at end of file diff --git a/charts/spark-operator-chart/.helmignore b/charts/spark-operator-chart/.helmignore index 4fbbbf5df1..8cefbf465e 100644 --- a/charts/spark-operator-chart/.helmignore +++ b/charts/spark-operator-chart/.helmignore @@ -3,6 +3,7 @@ # negation (prefixed with !). Only one pattern per line. ci/ +.helmignore # Common VCS dirs .git/ @@ -21,16 +22,16 @@ ci/ *~ # Various IDEs +*.tmproj .project .idea/ -*.tmproj .vscode/ # MacOS .DS_Store # helm-unittest -./tests +tests .debug __snapshot__ diff --git a/docs/release.md b/docs/release.md new file mode 100644 index 0000000000..d3a385b602 --- /dev/null +++ b/docs/release.md @@ -0,0 +1,119 @@ +# Releasing the Spark operator + +## Prerequisites + +- [Write](https://docs.github.com/organizations/managing-access-to-your-organizations-repositories/repository-permission-levels-for-an-organization#permission-levels-for-repositories-owned-by-an-organization) permission for the Spark operator repository. + +- Create a [GitHub Token](https://docs.github.com/github/authenticating-to-github/keeping-your-account-and-data-secure/creating-a-personal-access-token). + +- Install `PyGithub` to generate the [Changelog](../CHANGELOG.md): + + ```bash + pip install PyGithub==2.3.0 + ``` + +## Versioning policy + +Spark Operator version format follows [Semantic Versioning](https://semver.org/). Spark Operator versions are in the format of `vX.Y.Z`, where `X` is the major version, `Y` is the minor version, and `Z` is the patch version. The patch version contains only bug fixes. + +Additionally, Spark Operator does pre-releases in this format: `vX.Y.Z-rc.N` where `N` is a number of the `Nth` release candidate (RC) before an upcoming public release named `vX.Y.Z`. + +## Release branches and tags + +Spark Operator releases are tagged with tags like `vX.Y.Z`, for example `v1.7.2`. + +Release branches are in the format of `release-X.Y`, where `X.Y` stands for the minor release. + +`vX.Y.Z` releases are released from the `release-X.Y` branch. For example, `v1.7.2` release should be on `release-1.7` branch. + +If you want to push changes to the `release-X.Y` release branch, you have to cherry pick your changes from the `master` branch and submit a PR. + +## Create a new release + +### Create release branch + +1. Depends on what version you want to release, + + - Major or Minor version - Use the GitHub UI to create a release branch from `master` and name the release branch `release-X.Y`. + - Patch version - You don't need to create a new release branch. + +2. Fetch the upstream changes into your local directory: + + ```bash + git fetch upstream + ``` + +3. Checkout into the release branch: + + ```bash + git checkout release-X.Y + git rebase upstream/release-X.Y + ``` + +### Create GitHub tag + +1. Modify `VERSION` file in the root directory of the project: + + - For the RC tag as follows: + + ```bash + vX.Y.Z-rc.N + ``` + + - For the official release tag as follows: + + ```bash + vX.Y.Z + ``` + +2. Modify `version` and `appVersion` in `Chart.yaml`: + + ```bash + # Get version and remove the leading 'v' + VERSION=$(cat VERSION | sed "s/^v//") + sed -i "s/^version.*/version: ${VERSION}/" charts/spark-operator-chart/Chart.yaml + sed -i "s/^appVersion.*/appVersion: ${VERSION}/" charts/spark-operator-chart/Chart.yaml + ``` + +3. Commit the changes: + + ```bash + git add VERSION + git add charts/spark-operator-chart/Chart.yaml + git commit -s -m "Release $VERSION" + git push + ``` + +4. Submit a PR to the release branch. After the PR is merged, a new tag will be automatically created if the `VERSION` file has changed. + +### Release Spark Operator Image + +After a pre-release/release tag is pushed, a release workflow will be triggered to build and push Spark operator docker image to Docker Hub. + +### Publish release + +After a pre-release/release tag is pushed, a release workflow will be triggered to create a new draft release. + +### Release Spark Operator Helm Chart + +After the draft release is published, a release workflow will be triggered to update the Helm chart repo index and publish it to the Helm repository. + +## Update Changelog + +Update the `CHANGELOG.md` file by running: + +```bash +python hack/generate-changelog.py \ + --token= \ + --range=.. +``` + +If you are creating the **first minor pre-release** or the **minor** release (`X.Y`), your `previous-release` is equal to the latest release on the `release-X.Y` branch. +For example: `--range=v1.7.1..v1.8.0`. + +Otherwise, your `previous-release` is equal to the latest release on the `release-X.Y` branch. +For example: `--range=v1.7.0..v1.8.0-rc.0` + +Group PRs in the Changelog into Features, Bug fixes, Documentation, etc. + +Finally, submit a PR with the updated Changelog. From 0dc641bd1d7d262e4a8c5564c926fc4716bcf722 Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Thu, 1 Aug 2024 20:29:06 +0800 Subject: [PATCH 84/87] Use controller-runtime to reconsturct spark operator (#2072) * Use controller-runtime to reconstruct spark operator Signed-off-by: Yi Chen * Update helm charts Signed-off-by: Yi Chen * Update examples Signed-off-by: Yi Chen --------- Signed-off-by: Yi Chen --- .golangci.yaml | 39 + .pre-commit-config.yaml | 1 + Dockerfile | 25 +- Makefile | 107 +- PROJECT | 47 + .../v1beta1/defaults.go | 0 .../v1beta1/doc.go | 0 api/v1beta1/groupversion_info.go | 36 + .../v1beta1/register.go | 28 +- .../scheduledsparkapplication_types.go | 104 + .../v1beta1/sparkapplication_types.go | 299 ++- .../v1beta1/zz_generated.deepcopy.go | 2 +- .../v1beta2/defaults.go | 10 +- .../v1beta2/defaults_test.go | 18 +- .../v1beta2/doc.go | 0 api/v1beta2/groupversion_info.go | 36 + .../register.go => api/v1beta2/pod_webhook.go | 8 +- .../v1beta2/register.go | 28 +- .../scheduledsparkapplication_types.go | 125 ++ .../v1beta2/sparkapplication_types.go | 457 ++-- .../v1beta2/zz_generated.deepcopy.go | 2 +- charts/spark-operator-chart/Chart.yaml | 40 +- charts/spark-operator-chart/README.md | 161 +- charts/spark-operator-chart/README.md.gotmpl | 10 +- charts/spark-operator-chart/ci/ci-values.yaml | 2 +- ...tor.k8s.io_scheduledsparkapplications.yaml | 13 +- ...parkoperator.k8s.io_sparkapplications.yaml | 19 +- .../templates/_helpers.tpl | 45 +- .../templates/controller/_helpers.tpl | 70 + .../templates/controller/deployment.yaml | 162 ++ .../controller/poddisruptionbudget.yaml | 34 + .../templates/controller/rbac.yaml | 201 ++ .../templates/controller/serviceaccount.yaml | 28 + .../templates/deployment.yaml | 150 -- .../templates/poddisruptionbudget.yaml | 17 - .../templates/prometheus-podmonitor.yaml | 19 - .../templates/prometheus/_helpers.tpl | 22 + .../templates/prometheus/podmonitor.yaml | 44 + .../spark-operator-chart/templates/rbac.yaml | 148 -- .../templates/serviceaccount.yaml | 12 - .../templates/spark-rbac.yaml | 39 - .../templates/spark-serviceaccount.yaml | 14 - .../templates/spark/_helpers.tpl | 47 + .../templates/spark/rbac.yaml | 73 + .../templates/spark/serviceaccount.yaml | 30 + .../templates/webhook/_helpers.tpl | 107 +- .../templates/webhook/deployment.yaml | 155 ++ .../webhook/mutatingwebhookconfiguration.yaml | 116 + .../webhook/poddisruptionbudget.yaml | 34 + .../templates/webhook/rbac.yaml | 171 ++ .../templates/webhook/secret.yaml | 13 - .../templates/webhook/service.yaml | 26 +- .../templates/webhook/serviceaccount.yaml | 28 + .../validatingwebhookconfiguration.yaml | 83 + .../tests/controller/deployment_test.yaml | 537 +++++ .../controller/poddisruptionbudget_test.yaml | 68 + .../tests/controller/rbac_test.yaml | 79 + .../tests/controller/serviceaccount_test.yaml | 67 + .../tests/deployment_test.yaml | 352 --- .../tests/poddisruptionbudget_test.yaml | 38 - .../tests/prometheus/podmonitor_test.yaml | 102 + .../spark-operator-chart/tests/rbac_test.yaml | 90 - .../tests/serviceaccount_test.yaml | 54 - .../tests/spark-rbac_test.yaml | 133 -- .../tests/spark-serviceaccount_test.yaml | 112 - .../tests/spark/rbac_test.yaml | 123 ++ .../tests/spark/serviceaccount_test.yaml | 124 ++ .../tests/webhook/deployment_test.yaml | 504 +++++ .../mutatingwebhookconfiguration_test.yaml | 78 + .../webhook/poddisruptionbudget_test.yaml | 68 + .../tests/webhook/secret_test.yaml | 31 - .../tests/webhook/service_test.yaml | 32 +- .../validatingwebhookconfiguration_test.yaml | 77 + charts/spark-operator-chart/values.yaml | 484 +++-- cmd/main.go | 31 + .../operator/controller/root.go | 22 +- cmd/operator/controller/start.go | 364 ++++ cmd/operator/root.go | 39 + .../operator/version/root.go | 34 +- cmd/operator/webhook/root.go | 33 + cmd/operator/webhook/start.go | 410 ++++ codecov.yaml | 10 + config/certmanager/certificate.yaml | 35 + config/certmanager/kustomization.yaml | 5 + config/certmanager/kustomizeconfig.yaml | 8 + ...tor.k8s.io_scheduledsparkapplications.yaml | 13 +- ...parkoperator.k8s.io_sparkapplications.yaml | 19 +- config/crd/kustomization.yaml | 10 +- .../cainjection_in_sparkapplications.yaml | 7 + .../patches/webhook_in_sparkapplications.yaml | 16 + config/default/manager_webhook_patch.yaml | 23 + config/default/webhookcainjection_patch.yaml | 25 + config/rbac/role.yaml | 130 ++ ...scheduledsparkapplication_editor_role.yaml | 27 + ...scheduledsparkapplication_viewer_role.yaml | 23 + config/rbac/sparkapplication_editor_role.yaml | 27 + config/rbac/sparkapplication_viewer_role.yaml | 23 + config/samples/kustomization.yaml | 7 + .../v1beta1_scheduledsparkapplication.yaml | 9 + config/samples/v1beta1_sparkapplication.yaml | 23 + .../v1beta2_scheduledsparkapplication.yaml | 34 + config/samples/v1beta2_sparkapplication.yaml | 23 + config/webhook/kustomization.yaml | 6 + config/webhook/kustomizeconfig.yaml | 22 + config/webhook/manifests.yaml | 119 + config/webhook/service.yaml | 15 + docs/api-docs.md | 1905 ++++++++--------- entrypoint.sh | 13 +- examples/spark-operator-with-metrics.yaml | 53 - examples/spark-operator-with-webhook.yaml | 94 - examples/spark-pi-configmap.yaml | 38 +- examples/spark-pi-custom-resource.yaml | 42 +- examples/spark-pi-dynamic-allocation.yaml | 49 + examples/spark-pi-prometheus.yaml | 18 +- ...{spark-py-pi.yaml => spark-pi-python.yaml} | 29 +- ...-schedule.yaml => spark-pi-scheduled.yaml} | 25 +- examples/spark-pi-volcano.yaml | 43 + examples/spark-pi.yaml | 39 +- go.mod | 237 +- go.sum | 582 +++-- {pkg => internal}/controller/doc.go | 4 +- .../controller.go | 99 + .../event_filter.go | 56 + .../event_handler.go | 102 + .../scheduledsparkapplication/controller.go | 377 ++++ .../controller_test.go | 90 + .../scheduledsparkapplication/event_filter.go | 81 + .../event_handler.go | 85 + .../scheduledsparkapplication/suite_test.go | 94 + .../controller/sparkapplication/controller.go | 1217 +++++++++++ .../sparkapplication/controller_test.go | 290 +++ .../sparkapplication/driveringress.go | 158 +- .../sparkapplication/driveringress_test.go | 713 ++++++ .../sparkapplication/event_filter.go | 207 ++ .../sparkapplication/event_handler.go | 220 ++ .../sparkapplication/monitoring_config.go | 89 +- .../monitoring_config_test.go | 255 +++ .../controller/sparkapplication/submission.go | 1023 +++++++++ .../sparkapplication/submission_test.go | 696 ++++++ .../controller/sparkapplication/suite_test.go | 94 + .../controller/sparkapplication/validator.go | 44 + .../controller/sparkapplication/web_ui.go | 92 + .../sparkapplication/web_ui_test.go | 655 ++++++ .../controller.go | 100 + .../event_filter.go | 56 + .../event_handler.go | 102 + internal/metrics/metrcis.go | 23 + internal/metrics/sparkapplication_metrics.go | 386 ++++ internal/metrics/sparkpod_metrics.go | 191 ++ internal/scheduler/registry.go | 75 + internal/scheduler/scheduler.go | 41 + internal/scheduler/volcano/scheduler.go | 229 ++ .../scheduler/volcano/scheduler_test.go | 44 +- .../scheduler/volcano/util.go | 19 +- {pkg => internal}/webhook/doc.go | 0 internal/webhook/resourcequota.go | 259 +++ internal/webhook/resourcequota_test.go | 41 + .../scheduledsparkapplication_defaulter.go | 49 + .../scheduledsparkapplication_validator.go | 80 + .../webhook/sparkapplication_defaulter.go | 125 ++ .../webhook/sparkapplication_validator.go | 173 ++ internal/webhook/sparkpod_defaulter.go | 732 +++++++ .../webhook/sparkpod_defaulter_test.go | 518 +++-- internal/webhook/suite_test.go | 150 ++ internal/webhook/webhook.go | 37 + main.go | 312 --- .../spark-application-rbac/kustomization.yaml | 23 - .../spark-application-rbac.yaml | 52 - .../spark-operator-install/kustomization.yaml | 25 - .../spark-operator-rbac.yaml | 97 - .../spark-operator.yaml | 45 - .../kustomization.yaml | 27 - .../spark-operator-patch.yaml | 40 - .../spark-operator-webhook.yaml | 53 - .../spark-operator-with-webhook.yaml | 96 - pkg/batchscheduler/scheduler_manager.go | 80 - .../volcano/volcano_scheduler.go | 307 --- pkg/certificate/certificate.go | 307 +++ pkg/certificate/certificate_test.go | 175 ++ pkg/certificate/doc.go | 17 + pkg/certificate/suite_test.go | 94 + pkg/{util/cert.go => certificate/util.go} | 24 +- pkg/certificate/util_test.go | 58 + .../clientset/versioned/fake/register.go | 4 +- .../clientset/versioned/scheme/register.go | 4 +- .../fake/fake_scheduledsparkapplication.go | 2 +- .../v1beta1/fake/fake_sparkapplication.go | 2 +- .../v1beta1/scheduledsparkapplication.go | 2 +- .../v1beta1/sparkapplication.go | 2 +- .../v1beta1/sparkoperator.k8s.io_client.go | 4 +- .../fake/fake_scheduledsparkapplication.go | 2 +- .../v1beta2/fake/fake_sparkapplication.go | 2 +- .../v1beta2/scheduledsparkapplication.go | 2 +- .../v1beta2/sparkapplication.go | 2 +- .../v1beta2/sparkoperator.k8s.io_client.go | 2 +- .../informers/externalversions/generic.go | 12 +- .../v1beta1/scheduledsparkapplication.go | 2 +- .../v1beta1/sparkapplication.go | 2 +- .../v1beta2/scheduledsparkapplication.go | 2 +- .../v1beta2/sparkapplication.go | 2 +- .../v1beta1/scheduledsparkapplication.go | 2 +- .../v1beta1/sparkapplication.go | 2 +- .../v1beta2/scheduledsparkapplication.go | 2 +- .../v1beta2/sparkapplication.go | 2 +- pkg/common/constants.go | 50 + pkg/{config => common}/doc.go | 4 +- pkg/common/event.go | 58 + pkg/common/metrics.go | 49 + pkg/common/prometheus.go | 139 ++ pkg/common/spark.go | 370 ++++ pkg/common/volcano.go | 23 + pkg/config/config.go | 58 - pkg/config/config_test.go | 74 - pkg/config/constants.go | 317 --- pkg/config/secret.go | 74 - pkg/config/secret_test.go | 107 - .../scheduledsparkapplication/controller.go | 425 ---- .../controller_test.go | 552 ----- pkg/controller/sparkapplication/controller.go | 1130 ---------- .../sparkapplication/controller_test.go | 1674 --------------- .../sparkapplication/driveringress_test.go | 730 ------- .../monitoring_config_test.go | 267 --- .../spark_pod_eventhandler.go | 97 - .../spark_pod_eventhandler_test.go | 288 --- .../sparkapplication/sparkapp_metrics.go | 336 --- .../sparkapplication/sparkapp_metrics_test.go | 74 - .../sparkapplication/sparkapp_util.go | 218 -- .../sparkapplication/sparkapp_util_test.go | 59 - pkg/controller/sparkapplication/sparkui.go | 96 - .../sparkapplication/sparkui_test.go | 673 ------ pkg/controller/sparkapplication/submission.go | 532 ----- .../sparkapplication/submission_test.go | 695 ------ pkg/util/capabilities.go | 25 +- pkg/util/cert_test.go | 39 - pkg/util/histogram_buckets.go | 43 - pkg/util/metrics.go | 191 -- pkg/util/metrics_test.go | 68 - .../scheme.go => util/resourcequota.go} | 33 +- pkg/util/sparkapplication.go | 430 ++++ pkg/util/sparkapplication_test.go | 330 +++ pkg/util/sparkpod.go | 48 + pkg/util/sparkpod_test.go | 301 +++ pkg/util/suite_test.go | 37 + pkg/util/util.go | 76 +- pkg/util/util_test.go | 131 ++ pkg/webhook/certs.go | 170 -- pkg/webhook/certs_test.go | 118 - pkg/webhook/patch.go | 856 -------- pkg/webhook/resourceusage/enforcer.go | 95 - pkg/webhook/resourceusage/handlers.go | 119 - pkg/webhook/resourceusage/util.go | 241 --- pkg/webhook/resourceusage/util_test.go | 25 - pkg/webhook/resourceusage/watcher.go | 157 -- pkg/webhook/webhook.go | 657 ------ pkg/webhook/webhook_test.go | 310 --- sparkctl/README.md | 92 +- sparkctl/build.sh | 18 +- sparkctl/cmd/client.go | 2 +- sparkctl/cmd/create.go | 21 +- sparkctl/cmd/create_test.go | 24 +- sparkctl/cmd/delete.go | 3 +- sparkctl/cmd/event.go | 5 +- sparkctl/cmd/forward.go | 6 +- sparkctl/cmd/gcs.go | 8 +- sparkctl/cmd/list.go | 5 +- sparkctl/cmd/log.go | 22 +- sparkctl/cmd/s3.go | 2 +- sparkctl/cmd/status.go | 4 +- test/e2e/README.md | 33 - test/e2e/basic_test.go | 118 - test/e2e/framework/cluster_role.go | 101 - test/e2e/framework/cluster_role_binding.go | 103 - test/e2e/framework/config_map.go | 62 - test/e2e/framework/context.go | 75 - test/e2e/framework/deployment.go | 86 - test/e2e/framework/framework.go | 216 -- test/e2e/framework/helpers.go | 144 -- test/e2e/framework/job.go | 106 - test/e2e/framework/namespace.go | 64 - test/e2e/framework/operator.go | 47 - test/e2e/framework/role.go | 100 - test/e2e/framework/role_binding.go | 108 - test/e2e/framework/service.go | 122 -- test/e2e/framework/service_account.go | 89 - test/e2e/framework/sparkapplication.go | 75 - test/e2e/lifecycle_test.go | 103 - test/e2e/main_test.go | 84 - test/e2e/sparkapplication_test.go | 267 +++ test/e2e/suit_test.go | 159 ++ test/e2e/volume_mount_test.go | 92 - version.go | 90 + 291 files changed, 20893 insertions(+), 18910 deletions(-) create mode 100644 .golangci.yaml create mode 100644 PROJECT rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta1/defaults.go (100%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta1/doc.go (100%) create mode 100644 api/v1beta1/groupversion_info.go rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta1/register.go (53%) create mode 100644 api/v1beta1/scheduledsparkapplication_types.go rename pkg/apis/sparkoperator.k8s.io/v1beta1/types.go => api/v1beta1/sparkapplication_types.go (83%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta1/zz_generated.deepcopy.go (99%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta2/defaults.go (92%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta2/defaults_test.go (92%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta2/doc.go (100%) create mode 100644 api/v1beta2/groupversion_info.go rename pkg/apis/sparkoperator.k8s.io/register.go => api/v1beta2/pod_webhook.go (84%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta2/register.go (53%) create mode 100644 api/v1beta2/scheduledsparkapplication_types.go rename pkg/apis/sparkoperator.k8s.io/v1beta2/types.go => api/v1beta2/sparkapplication_types.go (75%) rename {pkg/apis/sparkoperator.k8s.io => api}/v1beta2/zz_generated.deepcopy.go (99%) create mode 100644 charts/spark-operator-chart/templates/controller/_helpers.tpl create mode 100644 charts/spark-operator-chart/templates/controller/deployment.yaml create mode 100644 charts/spark-operator-chart/templates/controller/poddisruptionbudget.yaml create mode 100644 charts/spark-operator-chart/templates/controller/rbac.yaml create mode 100644 charts/spark-operator-chart/templates/controller/serviceaccount.yaml delete mode 100644 charts/spark-operator-chart/templates/deployment.yaml delete mode 100644 charts/spark-operator-chart/templates/poddisruptionbudget.yaml delete mode 100644 charts/spark-operator-chart/templates/prometheus-podmonitor.yaml create mode 100644 charts/spark-operator-chart/templates/prometheus/_helpers.tpl create mode 100644 charts/spark-operator-chart/templates/prometheus/podmonitor.yaml delete mode 100644 charts/spark-operator-chart/templates/rbac.yaml delete mode 100644 charts/spark-operator-chart/templates/serviceaccount.yaml delete mode 100644 charts/spark-operator-chart/templates/spark-rbac.yaml delete mode 100644 charts/spark-operator-chart/templates/spark-serviceaccount.yaml create mode 100644 charts/spark-operator-chart/templates/spark/_helpers.tpl create mode 100644 charts/spark-operator-chart/templates/spark/rbac.yaml create mode 100644 charts/spark-operator-chart/templates/spark/serviceaccount.yaml create mode 100644 charts/spark-operator-chart/templates/webhook/deployment.yaml create mode 100644 charts/spark-operator-chart/templates/webhook/mutatingwebhookconfiguration.yaml create mode 100644 charts/spark-operator-chart/templates/webhook/poddisruptionbudget.yaml create mode 100644 charts/spark-operator-chart/templates/webhook/rbac.yaml delete mode 100644 charts/spark-operator-chart/templates/webhook/secret.yaml create mode 100644 charts/spark-operator-chart/templates/webhook/serviceaccount.yaml create mode 100644 charts/spark-operator-chart/templates/webhook/validatingwebhookconfiguration.yaml create mode 100644 charts/spark-operator-chart/tests/controller/deployment_test.yaml create mode 100644 charts/spark-operator-chart/tests/controller/poddisruptionbudget_test.yaml create mode 100644 charts/spark-operator-chart/tests/controller/rbac_test.yaml create mode 100644 charts/spark-operator-chart/tests/controller/serviceaccount_test.yaml delete mode 100644 charts/spark-operator-chart/tests/deployment_test.yaml delete mode 100644 charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml create mode 100644 charts/spark-operator-chart/tests/prometheus/podmonitor_test.yaml delete mode 100644 charts/spark-operator-chart/tests/rbac_test.yaml delete mode 100644 charts/spark-operator-chart/tests/serviceaccount_test.yaml delete mode 100644 charts/spark-operator-chart/tests/spark-rbac_test.yaml delete mode 100644 charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml create mode 100644 charts/spark-operator-chart/tests/spark/rbac_test.yaml create mode 100644 charts/spark-operator-chart/tests/spark/serviceaccount_test.yaml create mode 100644 charts/spark-operator-chart/tests/webhook/deployment_test.yaml create mode 100644 charts/spark-operator-chart/tests/webhook/mutatingwebhookconfiguration_test.yaml create mode 100644 charts/spark-operator-chart/tests/webhook/poddisruptionbudget_test.yaml delete mode 100644 charts/spark-operator-chart/tests/webhook/secret_test.yaml create mode 100644 charts/spark-operator-chart/tests/webhook/validatingwebhookconfiguration_test.yaml create mode 100644 cmd/main.go rename pkg/batchscheduler/interface/interface.go => cmd/operator/controller/root.go (60%) create mode 100644 cmd/operator/controller/start.go create mode 100644 cmd/operator/root.go rename pkg/controller/scheduledsparkapplication/controller_util.go => cmd/operator/version/root.go (53%) create mode 100644 cmd/operator/webhook/root.go create mode 100644 cmd/operator/webhook/start.go create mode 100644 codecov.yaml create mode 100644 config/certmanager/certificate.yaml create mode 100644 config/certmanager/kustomization.yaml create mode 100644 config/certmanager/kustomizeconfig.yaml create mode 100644 config/crd/patches/cainjection_in_sparkapplications.yaml create mode 100644 config/crd/patches/webhook_in_sparkapplications.yaml create mode 100644 config/default/manager_webhook_patch.yaml create mode 100644 config/default/webhookcainjection_patch.yaml create mode 100644 config/rbac/role.yaml create mode 100644 config/rbac/scheduledsparkapplication_editor_role.yaml create mode 100644 config/rbac/scheduledsparkapplication_viewer_role.yaml create mode 100644 config/rbac/sparkapplication_editor_role.yaml create mode 100644 config/rbac/sparkapplication_viewer_role.yaml create mode 100644 config/samples/kustomization.yaml create mode 100644 config/samples/v1beta1_scheduledsparkapplication.yaml create mode 100644 config/samples/v1beta1_sparkapplication.yaml create mode 100644 config/samples/v1beta2_scheduledsparkapplication.yaml create mode 100644 config/samples/v1beta2_sparkapplication.yaml create mode 100644 config/webhook/kustomization.yaml create mode 100644 config/webhook/kustomizeconfig.yaml create mode 100644 config/webhook/manifests.yaml create mode 100644 config/webhook/service.yaml delete mode 100644 examples/spark-operator-with-metrics.yaml delete mode 100644 examples/spark-operator-with-webhook.yaml create mode 100644 examples/spark-pi-dynamic-allocation.yaml rename examples/{spark-py-pi.yaml => spark-pi-python.yaml} (72%) rename examples/{spark-pi-schedule.yaml => spark-pi-scheduled.yaml} (76%) create mode 100644 examples/spark-pi-volcano.yaml rename {pkg => internal}/controller/doc.go (95%) create mode 100644 internal/controller/mutatingwebhookconfiguration/controller.go create mode 100644 internal/controller/mutatingwebhookconfiguration/event_filter.go create mode 100644 internal/controller/mutatingwebhookconfiguration/event_handler.go create mode 100644 internal/controller/scheduledsparkapplication/controller.go create mode 100644 internal/controller/scheduledsparkapplication/controller_test.go create mode 100644 internal/controller/scheduledsparkapplication/event_filter.go create mode 100644 internal/controller/scheduledsparkapplication/event_handler.go create mode 100644 internal/controller/scheduledsparkapplication/suite_test.go create mode 100644 internal/controller/sparkapplication/controller.go create mode 100644 internal/controller/sparkapplication/controller_test.go rename {pkg => internal}/controller/sparkapplication/driveringress.go (65%) create mode 100644 internal/controller/sparkapplication/driveringress_test.go create mode 100644 internal/controller/sparkapplication/event_filter.go create mode 100644 internal/controller/sparkapplication/event_handler.go rename {pkg => internal}/controller/sparkapplication/monitoring_config.go (55%) create mode 100644 internal/controller/sparkapplication/monitoring_config_test.go create mode 100644 internal/controller/sparkapplication/submission.go create mode 100644 internal/controller/sparkapplication/submission_test.go create mode 100644 internal/controller/sparkapplication/suite_test.go create mode 100644 internal/controller/sparkapplication/validator.go create mode 100644 internal/controller/sparkapplication/web_ui.go create mode 100644 internal/controller/sparkapplication/web_ui_test.go create mode 100644 internal/controller/validatingwebhookconfiguration/controller.go create mode 100644 internal/controller/validatingwebhookconfiguration/event_filter.go create mode 100644 internal/controller/validatingwebhookconfiguration/event_handler.go create mode 100644 internal/metrics/metrcis.go create mode 100644 internal/metrics/sparkapplication_metrics.go create mode 100644 internal/metrics/sparkpod_metrics.go create mode 100644 internal/scheduler/registry.go create mode 100644 internal/scheduler/scheduler.go create mode 100644 internal/scheduler/volcano/scheduler.go rename pkg/batchscheduler/volcano/volcano_scheduler_test.go => internal/scheduler/volcano/scheduler_test.go (80%) rename pkg/util/array_flag.go => internal/scheduler/volcano/util.go (70%) rename {pkg => internal}/webhook/doc.go (100%) create mode 100644 internal/webhook/resourcequota.go create mode 100644 internal/webhook/resourcequota_test.go create mode 100644 internal/webhook/scheduledsparkapplication_defaulter.go create mode 100644 internal/webhook/scheduledsparkapplication_validator.go create mode 100644 internal/webhook/sparkapplication_defaulter.go create mode 100644 internal/webhook/sparkapplication_validator.go create mode 100644 internal/webhook/sparkpod_defaulter.go rename pkg/webhook/patch_test.go => internal/webhook/sparkpod_defaulter_test.go (76%) create mode 100644 internal/webhook/suite_test.go create mode 100644 internal/webhook/webhook.go delete mode 100644 main.go delete mode 100644 manifest/spark-application-rbac/kustomization.yaml delete mode 100644 manifest/spark-application-rbac/spark-application-rbac.yaml delete mode 100644 manifest/spark-operator-install/kustomization.yaml delete mode 100644 manifest/spark-operator-install/spark-operator-rbac.yaml delete mode 100644 manifest/spark-operator-install/spark-operator.yaml delete mode 100644 manifest/spark-operator-with-webhook-install/kustomization.yaml delete mode 100644 manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml delete mode 100644 manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml delete mode 100644 manifest/spark-operator-with-webhook-install/spark-operator-with-webhook.yaml delete mode 100644 pkg/batchscheduler/scheduler_manager.go delete mode 100644 pkg/batchscheduler/volcano/volcano_scheduler.go create mode 100644 pkg/certificate/certificate.go create mode 100644 pkg/certificate/certificate_test.go create mode 100644 pkg/certificate/doc.go create mode 100644 pkg/certificate/suite_test.go rename pkg/{util/cert.go => certificate/util.go} (72%) create mode 100644 pkg/certificate/util_test.go create mode 100644 pkg/common/constants.go rename pkg/{config => common}/doc.go (89%) create mode 100644 pkg/common/event.go create mode 100644 pkg/common/metrics.go create mode 100644 pkg/common/prometheus.go create mode 100644 pkg/common/spark.go create mode 100644 pkg/common/volcano.go delete mode 100644 pkg/config/config.go delete mode 100644 pkg/config/config_test.go delete mode 100644 pkg/config/constants.go delete mode 100644 pkg/config/secret.go delete mode 100644 pkg/config/secret_test.go delete mode 100644 pkg/controller/scheduledsparkapplication/controller.go delete mode 100644 pkg/controller/scheduledsparkapplication/controller_test.go delete mode 100644 pkg/controller/sparkapplication/controller.go delete mode 100644 pkg/controller/sparkapplication/controller_test.go delete mode 100644 pkg/controller/sparkapplication/driveringress_test.go delete mode 100644 pkg/controller/sparkapplication/monitoring_config_test.go delete mode 100644 pkg/controller/sparkapplication/spark_pod_eventhandler.go delete mode 100644 pkg/controller/sparkapplication/spark_pod_eventhandler_test.go delete mode 100644 pkg/controller/sparkapplication/sparkapp_metrics.go delete mode 100644 pkg/controller/sparkapplication/sparkapp_metrics_test.go delete mode 100644 pkg/controller/sparkapplication/sparkapp_util.go delete mode 100644 pkg/controller/sparkapplication/sparkapp_util_test.go delete mode 100644 pkg/controller/sparkapplication/sparkui.go delete mode 100644 pkg/controller/sparkapplication/sparkui_test.go delete mode 100644 pkg/controller/sparkapplication/submission.go delete mode 100644 pkg/controller/sparkapplication/submission_test.go delete mode 100644 pkg/util/cert_test.go delete mode 100644 pkg/util/histogram_buckets.go delete mode 100644 pkg/util/metrics_test.go rename pkg/{webhook/scheme.go => util/resourcequota.go} (57%) create mode 100644 pkg/util/sparkapplication.go create mode 100644 pkg/util/sparkapplication_test.go create mode 100644 pkg/util/sparkpod.go create mode 100644 pkg/util/sparkpod_test.go create mode 100644 pkg/util/suite_test.go create mode 100644 pkg/util/util_test.go delete mode 100644 pkg/webhook/certs.go delete mode 100644 pkg/webhook/certs_test.go delete mode 100644 pkg/webhook/patch.go delete mode 100644 pkg/webhook/resourceusage/enforcer.go delete mode 100644 pkg/webhook/resourceusage/handlers.go delete mode 100644 pkg/webhook/resourceusage/util.go delete mode 100644 pkg/webhook/resourceusage/util_test.go delete mode 100644 pkg/webhook/resourceusage/watcher.go delete mode 100644 pkg/webhook/webhook.go delete mode 100644 pkg/webhook/webhook_test.go delete mode 100644 test/e2e/README.md delete mode 100644 test/e2e/basic_test.go delete mode 100644 test/e2e/framework/cluster_role.go delete mode 100644 test/e2e/framework/cluster_role_binding.go delete mode 100644 test/e2e/framework/config_map.go delete mode 100644 test/e2e/framework/context.go delete mode 100644 test/e2e/framework/deployment.go delete mode 100644 test/e2e/framework/framework.go delete mode 100644 test/e2e/framework/helpers.go delete mode 100644 test/e2e/framework/job.go delete mode 100644 test/e2e/framework/namespace.go delete mode 100644 test/e2e/framework/operator.go delete mode 100644 test/e2e/framework/role.go delete mode 100644 test/e2e/framework/role_binding.go delete mode 100644 test/e2e/framework/service.go delete mode 100644 test/e2e/framework/service_account.go delete mode 100644 test/e2e/framework/sparkapplication.go delete mode 100644 test/e2e/lifecycle_test.go delete mode 100644 test/e2e/main_test.go create mode 100644 test/e2e/sparkapplication_test.go create mode 100644 test/e2e/suit_test.go delete mode 100644 test/e2e/volume_mount_test.go create mode 100644 version.go diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 0000000000..01f5a3e582 --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,39 @@ +run: + deadline: 5m + +linters: + enable: + - revive + - gci + - depguard + - godot + - testifylint + - unconvert + +issues: + exclude-rules: + # Disable errcheck linter for test files. + - path: _test.go + linters: + - errcheck + +linters-settings: + gci: + sections: + - standard + - default + - prefix(github.com/kubeflow/spark-operator) + depguard: + Main: + files: + - $all + - "!$test" + listMode: Lax + deny: + reflect: Please don't use reflect package + Test: + files: + - $test + listMode: Lax + deny: + reflect: Please don't use reflect package diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0385ecba56..7086ba78d4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,3 +7,4 @@ repos: # Make the tool search for charts only under the `charts` directory - --chart-search-root=charts - --template-files=README.md.gotmpl + - --sort-values-order=file diff --git a/Dockerfile b/Dockerfile index c126cca210..61815e195b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,33 +16,26 @@ ARG SPARK_IMAGE=spark:3.5.0 -FROM golang:1.22-alpine as builder +FROM golang:1.22.5 AS builder WORKDIR /workspace -# Copy the Go Modules manifests -COPY go.mod go.mod -COPY go.sum go.sum -# Cache deps before building and copying source so that we don't need to re-download as much -# and so that source changes don't invalidate our downloaded layer -RUN go mod download +COPY . . -# Copy the go source code -COPY main.go main.go -COPY pkg/ pkg/ - -# Build ARG TARGETARCH -RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on go build -a -o /usr/bin/spark-operator main.go + +RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} GO111MODULE=on make build-operator FROM ${SPARK_IMAGE} + USER root -COPY --from=builder /usr/bin/spark-operator /usr/bin/ -RUN apt-get update --allow-releaseinfo-change \ - && apt-get update \ + +RUN apt-get update \ && apt-get install -y tini \ && rm -rf /var/lib/apt/lists/* +COPY --from=builder /workspace/bin/spark-operator /usr/bin/spark-operator + COPY entrypoint.sh /usr/bin/ ENTRYPOINT ["/usr/bin/entrypoint.sh"] diff --git a/Makefile b/Makefile index 966e027ddd..30ba67c7c4 100644 --- a/Makefile +++ b/Makefile @@ -12,10 +12,18 @@ endif SHELL = /usr/bin/env bash -o pipefail .SHELLFLAGS = -ec +# Version information. +VERSION=$(shell cat VERSION | sed "s/^v//") +BUILD_DATE = $(shell date -u +"%Y-%m-%dT%H:%M:%S%:z") +GIT_COMMIT = $(shell git rev-parse HEAD) +GIT_TAG = $(shell if [ -z "`git status --porcelain`" ]; then git describe --exact-match --tags HEAD 2>/dev/null; fi) +GIT_TREE_STATE = $(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi) +GIT_SHA = $(shell git rev-parse --short HEAD || echo "HEAD") +GIT_VERSION = ${VERSION}-${GIT_SHA} + REPO=github.com/kubeflow/spark-operator SPARK_OPERATOR_GOPATH=/go/src/github.com/kubeflow/spark-operator SPARK_OPERATOR_CHART_PATH=charts/spark-operator-chart -OPERATOR_VERSION ?= $$(grep appVersion $(SPARK_OPERATOR_CHART_PATH)/Chart.yaml | awk '{print $$2}') DEP_VERSION:=`grep DEP_VERSION= Dockerfile | awk -F\" '{print $$2}'` BUILDER=`grep "FROM golang:" Dockerfile | awk '{print $$2}'` UNAME:=`uname | tr '[:upper:]' '[:lower:]'` @@ -27,9 +35,18 @@ UNAME:=`uname | tr '[:upper:]' '[:lower:]'` CONTAINER_TOOL ?= docker # Image URL to use all building/pushing image targets -IMAGE_REPOSITORY ?= docker.io/kubeflow/spark-operator -IMAGE_TAG ?= $(OPERATOR_VERSION) -OPERATOR_IMAGE ?= $(IMAGE_REPOSITORY):$(IMAGE_TAG) +IMAGE_REGISTRY ?= docker.io +IMAGE_REPOSITORY ?= kubeflow/spark-operator +IMAGE_TAG ?= $(VERSION) +IMAGE ?= $(IMAGE_REGISTRY)/$(IMAGE_REPOSITORY):$(IMAGE_TAG) + +# Kind cluster +KIND_CLUSTER_NAME ?= spark-operator +KIND_CONFIG_FILE ?= charts/spark-operator-chart/ci/kind-config.yaml +KIND_KUBE_CONFIG ?= $(HOME)/.kube/config + +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.29.3 ##@ General @@ -46,7 +63,11 @@ OPERATOR_IMAGE ?= $(IMAGE_REPOSITORY):$(IMAGE_TAG) .PHONY: help help: ## Display this help. - @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-30s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +.PHONY: version +version: ## Print version information. + @echo "Version: ${VERSION}" ##@ Development @@ -94,20 +115,28 @@ lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes. $(GOLANGCI_LINT) run --fix .PHONY: unit-test -unit-test: clean ## Run go unit tests. - @echo "running unit tests" +unit-test: envtest ## Run unit tests. + @echo "Running unit tests..." + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $(shell go list ./... | grep -v /e2e) -coverprofile cover.out .PHONY: e2e-test -e2e-test: clean ## Run go integration tests. - @echo "running integration tests" - go test -v ./test/e2e/ --kubeconfig "$(HOME)/.kube/config" --operator-image=docker.io/spark-operator/spark-operator:local +e2e-test: envtest ## Run the e2e tests against a Kind k8s instance that is spun up. + @echo "Running e2e tests..." + go test ./test/e2e/ -v -ginkgo.v -timeout 30m ##@ Build +override LDFLAGS += \ + -X ${REPO}.version=v${VERSION} \ + -X ${REPO}.buildDate=${BUILD_DATE} \ + -X ${REPO}.gitCommit=${GIT_COMMIT} \ + -X ${REPO}.gitTreeState=${GIT_TREE_STATE} \ + -extldflags "-static" + .PHONY: build-operator -build-operator: ## Build spark-operator binary. - go build -o bin/spark-operator main.go +build-operator: ## Build Spark operator + go build -o bin/spark-operator -ldflags '${LDFLAGS}' cmd/main.go .PHONY: build-sparkctl build-sparkctl: ## Build sparkctl binary. @@ -117,7 +146,7 @@ build-sparkctl: ## Build sparkctl binary. -v $$(pwd):$(SPARK_OPERATOR_GOPATH) $(BUILDER) sh -c \ "apk add --no-cache bash git && \ cd sparkctl && \ - ./build.sh" || true + bash build.sh" || true .PHONY: install-sparkctl install-sparkctl: | sparkctl/sparkctl-darwin-amd64 sparkctl/sparkctl-linux-amd64 ## Install sparkctl binary. @@ -141,7 +170,7 @@ clean-sparkctl: ## Clean sparkctl binary. build-api-docs: gen-crd-api-reference-docs ## Build api documentaion. $(GEN_CRD_API_REFERENCE_DOCS) \ -config hack/api-docs/config.json \ - -api-dir github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2 \ + -api-dir github.com/kubeflow/spark-operator/api/v1beta2 \ -template-dir hack/api-docs/template \ -out-file docs/api-docs.md @@ -150,11 +179,11 @@ build-api-docs: gen-crd-api-reference-docs ## Build api documentaion. # More info: https://docs.docker.com/develop/develop-images/build_enhancements/ .PHONY: docker-build docker-build: ## Build docker image with the operator. - $(CONTAINER_TOOL) build -t ${IMAGE_REPOSITORY}:${IMAGE_TAG} . + $(CONTAINER_TOOL) build -t ${IMAGE} . .PHONY: docker-push docker-push: ## Push docker image with the operator. - $(CONTAINER_TOOL) push ${IMAGE_REPOSITORY}:${IMAGE_TAG} + $(CONTAINER_TOOL) push ${IMAGE} # PLATFORMS defines the target platforms for the operator image be built to provide support to multiple # architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: @@ -164,14 +193,11 @@ docker-push: ## Push docker image with the operator. # To adequately provide solutions that are compatible with multiple platforms, you should consider using this option. PLATFORMS ?= linux/amd64,linux/arm64 .PHONY: docker-buildx -docker-buildx: ## Build and push docker image for the operator for cross-platform support. - # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile - sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross +docker-buildx: ## Build and push docker image for the operator for cross-platform support - $(CONTAINER_TOOL) buildx create --name spark-operator-builder $(CONTAINER_TOOL) buildx use spark-operator-builder - - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMAGE_REPOSITORY}:${IMAGE_TAG} -f Dockerfile.cross . + - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMAGE} -f Dockerfile . - $(CONTAINER_TOOL) buildx rm spark-operator-builder - rm Dockerfile.cross ##@ Helm @@ -185,11 +211,11 @@ helm-unittest: helm-unittest-plugin ## Run Helm chart unittests. .PHONY: helm-lint helm-lint: ## Run Helm chart lint test. - docker run --rm --workdir /workspace --volume "$$(pwd):/workspace" quay.io/helmpack/chart-testing:latest ct lint --target-branch master + docker run --rm --workdir /workspace --volume "$$(pwd):/workspace" quay.io/helmpack/chart-testing:latest ct lint --target-branch master --validate-maintainers=false .PHONY: helm-docs -helm-docs: ## Generates markdown documentation for helm charts from requirements and values files. - docker run --rm --volume "$$(pwd):/helm-docs" -u "$(id -u)" jnorwood/helm-docs:latest +helm-docs: helm-docs-plugin ## Generates markdown documentation for helm charts from requirements and values files. + $(HELM_DOCS) --sort-values-order=file ##@ Deployment @@ -197,12 +223,27 @@ ifndef ignore-not-found ignore-not-found = false endif -.PHONY: install-crds -install-crds: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. - $(KUSTOMIZE) build config/crd | $(KUBECTL) create -f - +.PHONY: kind-create-cluster +kind-create-cluster: kind ## Create a kind cluster for integration tests. + if ! $(KIND) get clusters 2>/dev/null | grep -q "^$(KIND_CLUSTER_NAME)$$"; then \ + kind create cluster --name $(KIND_CLUSTER_NAME) --config $(KIND_CONFIG_FILE) --kubeconfig $(KIND_KUBE_CONFIG); \ + fi + +.PHONY: kind-load-image +kind-load-image: kind-create-cluster docker-build ## Load the image into the kind cluster. + kind load docker-image --name $(KIND_CLUSTER_NAME) $(IMAGE) + +.PHONY: kind-delete-custer +kind-delete-custer: kind ## Delete the created kind cluster. + $(KIND) delete cluster --name $(KIND_CLUSTER_NAME) && \ + rm -f $(KIND_KUBE_CONFIG) -.PHONY: uninstall-crds -uninstall-crds: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. +.PHONY: install +install-crd: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. + $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - + +.PHONY: uninstall +uninstall-crd: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - .PHONY: deploy @@ -231,6 +272,7 @@ GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION) GEN_CRD_API_REFERENCE_DOCS ?= $(LOCALBIN)/gen-crd-api-reference-docs-$(GEN_CRD_API_REFERENCE_DOCS_VERSION) HELM ?= helm HELM_UNITTEST ?= unittest +HELM_DOCS ?= $(LOCALBIN)/helm-docs-$(HELM_DOCS_VERSION) ## Tool Versions KUSTOMIZE_VERSION ?= v5.4.1 @@ -240,6 +282,7 @@ ENVTEST_VERSION ?= release-0.18 GOLANGCI_LINT_VERSION ?= v1.57.2 GEN_CRD_API_REFERENCE_DOCS_VERSION ?= v0.3.0 HELM_UNITTEST_VERSION ?= 0.5.1 +HELM_DOCS_VERSION ?= v1.14.2 .PHONY: kustomize kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. @@ -274,10 +317,14 @@ $(GEN_CRD_API_REFERENCE_DOCS): $(LOCALBIN) .PHONY: helm-unittest-plugin helm-unittest-plugin: ## Download helm unittest plugin locally if necessary. if [ -z "$(shell helm plugin list | grep unittest)" ]; then \ - echo "Installing helm unittest plugin..."; \ + echo "Installing helm unittest plugin"; \ helm plugin install https://github.com/helm-unittest/helm-unittest.git --version $(HELM_UNITTEST_VERSION); \ fi +.PHONY: helm-docs-plugin +helm-docs-plugin: ## Download helm-docs plugin locally if necessary. + $(call go-install-tool,$(HELM_DOCS),github.com/norwoodj/helm-docs/cmd/helm-docs,$(HELM_DOCS_VERSION)) + # go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist # $1 - target path with name of binary (ideally with version) # $2 - package url which can be installed diff --git a/PROJECT b/PROJECT new file mode 100644 index 0000000000..d71e616246 --- /dev/null +++ b/PROJECT @@ -0,0 +1,47 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html +domain: sparkoperator.k8s.io +layout: + - go.kubebuilder.io/v4 +projectName: spark-operator +repo: github.com/kubeflow/spark-operator +resources: + - api: + crdVersion: v1 + namespaced: true + controller: true + domain: sparkoperator.k8s.io + kind: SparkApplication + path: github.com/kubeflow/spark-operator/api/v1beta1 + version: v1beta1 + - api: + crdVersion: v1 + namespaced: true + controller: true + domain: sparkoperator.k8s.io + kind: ScheduledSparkApplication + path: github.com/kubeflow/spark-operator/api/v1beta1 + version: v1beta1 + - api: + crdVersion: v1 + namespaced: true + controller: true + domain: sparkoperator.k8s.io + kind: SparkApplication + path: github.com/kubeflow/spark-operator/api/v1beta2 + version: v1beta2 + webhooks: + defaulting: true + validation: true + webhookVersion: v1 + - api: + crdVersion: v1 + namespaced: true + controller: true + domain: sparkoperator.k8s.io + kind: ScheduledSparkApplication + path: github.com/kubeflow/spark-operator/api/v1beta2 + version: v1beta2 +version: "3" diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/defaults.go b/api/v1beta1/defaults.go similarity index 100% rename from pkg/apis/sparkoperator.k8s.io/v1beta1/defaults.go rename to api/v1beta1/defaults.go diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/doc.go b/api/v1beta1/doc.go similarity index 100% rename from pkg/apis/sparkoperator.k8s.io/v1beta1/doc.go rename to api/v1beta1/doc.go diff --git a/api/v1beta1/groupversion_info.go b/api/v1beta1/groupversion_info.go new file mode 100644 index 0000000000..05b48fe1d3 --- /dev/null +++ b/api/v1beta1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=sparkoperator.k8s.io +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "sparkoperator.k8s.io", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/register.go b/api/v1beta1/register.go similarity index 53% rename from pkg/apis/sparkoperator.k8s.io/v1beta1/register.go rename to api/v1beta1/register.go index 0280f01cd5..df08671c2a 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta1/register.go +++ b/api/v1beta1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,36 +17,18 @@ limitations under the License. package v1beta1 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io" ) -const Version = "v1beta1" - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme +const ( + Group = "sparkoperator.k8s.io" + Version = "v1beta1" ) // SchemeGroupVersion is the group version used to register these objects. -var SchemeGroupVersion = schema.GroupVersion{Group: sparkoperator.GroupName, Version: Version} +var SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} // Resource takes an unqualified resource and returns a Group-qualified GroupResource. func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } - -// addKnownTypes adds the set of types defined in this package to the supplied scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &SparkApplication{}, - &SparkApplicationList{}, - &ScheduledSparkApplication{}, - &ScheduledSparkApplicationList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/api/v1beta1/scheduledsparkapplication_types.go b/api/v1beta1/scheduledsparkapplication_types.go new file mode 100644 index 0000000000..fd489bacbf --- /dev/null +++ b/api/v1beta1/scheduledsparkapplication_types.go @@ -0,0 +1,104 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +kubebuilder:skip + +func init() { + SchemeBuilder.Register(&ScheduledSparkApplication{}, &ScheduledSparkApplicationList{}) +} + +// ScheduledSparkApplicationSpec defines the desired state of ScheduledSparkApplication +type ScheduledSparkApplicationSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // Schedule is a cron schedule on which the application should run. + Schedule string `json:"schedule"` + // Template is a template from which SparkApplication instances can be created. + Template SparkApplicationSpec `json:"template"` + // Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. + // Optional. + // Defaults to false. + Suspend *bool `json:"suspend,omitempty"` + // ConcurrencyPolicy is the policy governing concurrent SparkApplication runs. + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` + // SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. + // Optional. + // Defaults to 1. + SuccessfulRunHistoryLimit *int32 `json:"successfulRunHistoryLimit,omitempty"` + // FailedRunHistoryLimit is the number of past failed runs of the application to keep. + // Optional. + // Defaults to 1. + FailedRunHistoryLimit *int32 `json:"failedRunHistoryLimit,omitempty"` +} + +// ScheduledSparkApplicationStatus defines the observed state of ScheduledSparkApplication +type ScheduledSparkApplicationStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // LastRun is the time when the last run of the application started. + LastRun metav1.Time `json:"lastRun,omitempty"` + // NextRun is the time when the next run of the application will start. + NextRun metav1.Time `json:"nextRun,omitempty"` + // LastRunName is the name of the SparkApplication for the most recent run of the application. + LastRunName string `json:"lastRunName,omitempty"` + // PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs. + PastSuccessfulRunNames []string `json:"pastSuccessfulRunNames,omitempty"` + // PastFailedRunNames keeps the names of SparkApplications for past failed runs. + PastFailedRunNames []string `json:"pastFailedRunNames,omitempty"` + // ScheduleState is the current scheduling state of the application. + ScheduleState ScheduleState `json:"scheduleState,omitempty"` + // Reason tells why the ScheduledSparkApplication is in the particular ScheduleState. + Reason string `json:"reason,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ScheduledSparkApplication is the Schema for the scheduledsparkapplications API +type ScheduledSparkApplication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ScheduledSparkApplicationSpec `json:"spec,omitempty"` + Status ScheduledSparkApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScheduledSparkApplicationList contains a list of ScheduledSparkApplication +type ScheduledSparkApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScheduledSparkApplication `json:"items"` +} + +type ScheduleState string + +const ( + FailedValidationState ScheduleState = "FailedValidation" + ScheduledState ScheduleState = "Scheduled" +) diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go b/api/v1beta1/sparkapplication_types.go similarity index 83% rename from pkg/apis/sparkoperator.k8s.io/v1beta1/types.go rename to api/v1beta1/sparkapplication_types.go index 84654927d8..88f5533b9b 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta1/types.go +++ b/api/v1beta1/sparkapplication_types.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,149 +19,24 @@ limitations under the License. package v1beta1 import ( - apiv1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SparkApplicationType describes the type of a Spark application. -type SparkApplicationType string - -// Different types of Spark applications. -const ( - JavaApplicationType SparkApplicationType = "Java" - ScalaApplicationType SparkApplicationType = "Scala" - PythonApplicationType SparkApplicationType = "Python" - RApplicationType SparkApplicationType = "R" -) - -// DeployMode describes the type of deployment of a Spark application. -type DeployMode string - -// Different types of deployments. -const ( - ClusterMode DeployMode = "cluster" - ClientMode DeployMode = "client" - InClusterClientMode DeployMode = "in-cluster-client" -) - -// RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. -// This completely defines actions to be taken on any kind of Failures during an application run. -type RestartPolicy struct { - Type RestartPolicyType `json:"type,omitempty"` - - // FailureRetries are the number of times to retry a failed application before giving up in a particular case. - // This is best effort and actual retry attempts can be >= the value specified due to caching. - // These are required if RestartPolicy is OnFailure. - OnSubmissionFailureRetries *int32 `json:"onSubmissionFailureRetries,omitempty"` - OnFailureRetries *int32 `json:"onFailureRetries,omitempty"` - - // Interval to wait between successive retries of a failed application. - OnSubmissionFailureRetryInterval *int64 `json:"onSubmissionFailureRetryInterval,omitempty"` - OnFailureRetryInterval *int64 `json:"onFailureRetryInterval,omitempty"` -} - -type RestartPolicyType string - -const ( - Never RestartPolicyType = "Never" - OnFailure RestartPolicyType = "OnFailure" - Always RestartPolicyType = "Always" -) - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:defaulter-gen=true - -type ScheduledSparkApplication struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec ScheduledSparkApplicationSpec `json:"spec"` - Status ScheduledSparkApplicationStatus `json:"status,omitempty"` -} - -type ConcurrencyPolicy string +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. -const ( - // ConcurrencyAllow allows SparkApplications to run concurrently. - ConcurrencyAllow ConcurrencyPolicy = "Allow" - // ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous - // one hasn't finished yet. - ConcurrencyForbid ConcurrencyPolicy = "Forbid" - // ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one. - ConcurrencyReplace ConcurrencyPolicy = "Replace" -) - -type ScheduledSparkApplicationSpec struct { - // Schedule is a cron schedule on which the application should run. - Schedule string `json:"schedule"` - // Template is a template from which SparkApplication instances can be created. - Template SparkApplicationSpec `json:"template"` - // Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. - // Optional. - // Defaults to false. - Suspend *bool `json:"suspend,omitempty"` - // ConcurrencyPolicy is the policy governing concurrent SparkApplication runs. - ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` - // SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. - // Optional. - // Defaults to 1. - SuccessfulRunHistoryLimit *int32 `json:"successfulRunHistoryLimit,omitempty"` - // FailedRunHistoryLimit is the number of past failed runs of the application to keep. - // Optional. - // Defaults to 1. - FailedRunHistoryLimit *int32 `json:"failedRunHistoryLimit,omitempty"` -} - -type ScheduleState string - -const ( - FailedValidationState ScheduleState = "FailedValidation" - ScheduledState ScheduleState = "Scheduled" -) - -type ScheduledSparkApplicationStatus struct { - // LastRun is the time when the last run of the application started. - LastRun metav1.Time `json:"lastRun,omitempty"` - // NextRun is the time when the next run of the application will start. - NextRun metav1.Time `json:"nextRun,omitempty"` - // LastRunName is the name of the SparkApplication for the most recent run of the application. - LastRunName string `json:"lastRunName,omitempty"` - // PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs. - PastSuccessfulRunNames []string `json:"pastSuccessfulRunNames,omitempty"` - // PastFailedRunNames keeps the names of SparkApplications for past failed runs. - PastFailedRunNames []string `json:"pastFailedRunNames,omitempty"` - // ScheduleState is the current scheduling state of the application. - ScheduleState ScheduleState `json:"scheduleState,omitempty"` - // Reason tells why the ScheduledSparkApplication is in the particular ScheduleState. - Reason string `json:"reason,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ScheduledSparkApplicationList carries a list of ScheduledSparkApplication objects. -type ScheduledSparkApplicationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []ScheduledSparkApplication `json:"items,omitempty"` -} - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:defaulter-gen=true +// +kubebuilder:skip -// SparkApplication represents a Spark application running on and using Kubernetes as a cluster manager. -type SparkApplication struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec SparkApplicationSpec `json:"spec"` - Status SparkApplicationStatus `json:"status,omitempty"` +func init() { + SchemeBuilder.Register(&SparkApplication{}, &SparkApplicationList{}) } -// SparkApplicationSpec describes the specification of a Spark application using Kubernetes as a cluster manager. -// It carries every pieces of information a spark-submit command takes and recognizes. +// SparkApplicationSpec defines the desired state of SparkApplication type SparkApplicationSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + // Type tells the type of the Spark application. Type SparkApplicationType `json:"type"` // SparkVersion is the version of Spark the application uses. @@ -210,7 +85,7 @@ type SparkApplicationSpec struct { HadoopConfigMap *string `json:"hadoopConfigMap,omitempty"` // Volumes is the list of Kubernetes volumes that can be mounted by the driver and/or executors. // Optional. - Volumes []apiv1.Volume `json:"volumes,omitempty"` + Volumes []corev1.Volume `json:"volumes,omitempty"` // Driver is the driver specification. Driver DriverSpec `json:"driver"` // Executor is the executor specification. @@ -248,6 +123,111 @@ type SparkApplicationSpec struct { BatchScheduler *string `json:"batchScheduler,omitempty"` } +// SparkApplicationStatus defines the observed state of SparkApplication +type SparkApplicationStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // SparkApplicationID is set by the spark-distribution(via spark.app.id config) on the driver and executor pods + SparkApplicationID string `json:"sparkApplicationId,omitempty"` + // SubmissionID is a unique ID of the current submission of the application. + SubmissionID string `json:"submissionID,omitempty"` + // LastSubmissionAttemptTime is the time for the last application submission attempt. + LastSubmissionAttemptTime metav1.Time `json:"lastSubmissionAttemptTime,omitempty"` + // CompletionTime is the time when the application runs to completion if it does. + TerminationTime metav1.Time `json:"terminationTime,omitempty"` + // DriverInfo has information about the driver. + DriverInfo DriverInfo `json:"driverInfo"` + // AppState tells the overall application state. + AppState ApplicationState `json:"applicationState,omitempty"` + // ExecutorState records the state of executors by executor Pod names. + ExecutorState map[string]ExecutorState `json:"executorState,omitempty"` + // ExecutionAttempts is the total number of attempts to run a submitted application to completion. + // Incremented upon each attempted run of the application and reset upon invalidation. + ExecutionAttempts int32 `json:"executionAttempts,omitempty"` + // SubmissionAttempts is the total number of attempts to submit an application to run. + // Incremented upon each attempted submission of the application and reset upon invalidation and rerun. + SubmissionAttempts int32 `json:"submissionAttempts,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SparkApplication is the Schema for the sparkapplications API +type SparkApplication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SparkApplicationSpec `json:"spec,omitempty"` + Status SparkApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SparkApplicationList contains a list of SparkApplication +type SparkApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SparkApplication `json:"items"` +} + +// SparkApplicationType describes the type of a Spark application. +type SparkApplicationType string + +// Different types of Spark applications. +const ( + JavaApplicationType SparkApplicationType = "Java" + ScalaApplicationType SparkApplicationType = "Scala" + PythonApplicationType SparkApplicationType = "Python" + RApplicationType SparkApplicationType = "R" +) + +// DeployMode describes the type of deployment of a Spark application. +type DeployMode string + +// Different types of deployments. +const ( + ClusterMode DeployMode = "cluster" + ClientMode DeployMode = "client" + InClusterClientMode DeployMode = "in-cluster-client" +) + +// RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. +// This completely defines actions to be taken on any kind of Failures during an application run. +type RestartPolicy struct { + Type RestartPolicyType `json:"type,omitempty"` + + // FailureRetries are the number of times to retry a failed application before giving up in a particular case. + // This is best effort and actual retry attempts can be >= the value specified due to caching. + // These are required if RestartPolicy is OnFailure. + OnSubmissionFailureRetries *int32 `json:"onSubmissionFailureRetries,omitempty"` + OnFailureRetries *int32 `json:"onFailureRetries,omitempty"` + + // Interval to wait between successive retries of a failed application. + OnSubmissionFailureRetryInterval *int64 `json:"onSubmissionFailureRetryInterval,omitempty"` + OnFailureRetryInterval *int64 `json:"onFailureRetryInterval,omitempty"` +} + +type RestartPolicyType string + +const ( + Never RestartPolicyType = "Never" + OnFailure RestartPolicyType = "OnFailure" + Always RestartPolicyType = "Always" +) + +type ConcurrencyPolicy string + +const ( + // ConcurrencyAllow allows SparkApplications to run concurrently. + ConcurrencyAllow ConcurrencyPolicy = "Allow" + // ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous + // one hasn't finished yet. + ConcurrencyForbid ConcurrencyPolicy = "Forbid" + // ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one. + ConcurrencyReplace ConcurrencyPolicy = "Replace" +) + // ApplicationStateType represents the type of the current state of an application. type ApplicationStateType string @@ -284,39 +264,6 @@ const ( ExecutorUnknownState ExecutorState = "UNKNOWN" ) -// SparkApplicationStatus describes the current status of a Spark application. -type SparkApplicationStatus struct { - // SparkApplicationID is set by the spark-distribution(via spark.app.id config) on the driver and executor pods - SparkApplicationID string `json:"sparkApplicationId,omitempty"` - // SubmissionID is a unique ID of the current submission of the application. - SubmissionID string `json:"submissionID,omitempty"` - // LastSubmissionAttemptTime is the time for the last application submission attempt. - LastSubmissionAttemptTime metav1.Time `json:"lastSubmissionAttemptTime,omitempty"` - // CompletionTime is the time when the application runs to completion if it does. - TerminationTime metav1.Time `json:"terminationTime,omitempty"` - // DriverInfo has information about the driver. - DriverInfo DriverInfo `json:"driverInfo"` - // AppState tells the overall application state. - AppState ApplicationState `json:"applicationState,omitempty"` - // ExecutorState records the state of executors by executor Pod names. - ExecutorState map[string]ExecutorState `json:"executorState,omitempty"` - // ExecutionAttempts is the total number of attempts to run a submitted application to completion. - // Incremented upon each attempted run of the application and reset upon invalidation. - ExecutionAttempts int32 `json:"executionAttempts,omitempty"` - // SubmissionAttempts is the total number of attempts to submit an application to run. - // Incremented upon each attempted submission of the application and reset upon invalidation and rerun. - SubmissionAttempts int32 `json:"submissionAttempts,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SparkApplicationList carries a list of SparkApplication objects. -type SparkApplicationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []SparkApplication `json:"items,omitempty"` -} - // Dependencies specifies all possible types of dependencies of a Spark application. type Dependencies struct { // Jars is a list of JAR files the Spark application depends on. @@ -381,22 +328,22 @@ type SparkPodSpec struct { Annotations map[string]string `json:"annotations,omitempty"` // VolumeMounts specifies the volumes listed in ".spec.volumes" to mount into the main container's filesystem. // Optional. - VolumeMounts []apiv1.VolumeMount `json:"volumeMounts,omitempty"` + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` // Affinity specifies the affinity/anti-affinity settings for the pod. // Optional. - Affinity *apiv1.Affinity `json:"affinity,omitempty"` + Affinity *corev1.Affinity `json:"affinity,omitempty"` // Tolerations specifies the tolerations listed in ".spec.tolerations" to be applied to the pod. // Optional. - Tolerations []apiv1.Toleration `json:"tolerations,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` // SecurityContext specifies the PodSecurityContext to apply. // Optional. - SecurityContext *apiv1.PodSecurityContext `json:"securityContext,omitempty"` + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"` // SchedulerName specifies the scheduler that will be used for scheduling // Optional. SchedulerName *string `json:"schedulerName,omitempty"` // Sidecars is a list of sidecar containers that run along side the main Spark container. // Optional. - Sidecars []apiv1.Container `json:"sidecars,omitempty"` + Sidecars []corev1.Container `json:"sidecars,omitempty"` // HostNetwork indicates whether to request host networking for the pod or not. // Optional. HostNetwork *bool `json:"hostNetwork,omitempty"` @@ -406,7 +353,7 @@ type SparkPodSpec struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` // DnsConfig dns settings for the pod, following the Kubernetes specifications. // Optional. - DNSConfig *apiv1.PodDNSConfig `json:"dnsConfig,omitempty"` + DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"` } // DriverSpec is specification of the driver. diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go similarity index 99% rename from pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go rename to api/v1beta1/zz_generated.deepcopy.go index 4bd7d6ed69..719ff9e09d 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1beta1 import ( "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go b/api/v1beta2/defaults.go similarity index 92% rename from pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go rename to api/v1beta2/defaults.go index e46f4012df..aaf2ff25f3 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults.go +++ b/api/v1beta2/defaults.go @@ -24,15 +24,19 @@ func SetSparkApplicationDefaults(app *SparkApplication) { return } + if app.Spec.Type == "" { + app.Spec.Type = SparkApplicationTypeScala + } + if app.Spec.Mode == "" { - app.Spec.Mode = ClusterMode + app.Spec.Mode = DeployModeCluster } if app.Spec.RestartPolicy.Type == "" { - app.Spec.RestartPolicy.Type = Never + app.Spec.RestartPolicy.Type = RestartPolicyNever } - if app.Spec.RestartPolicy.Type != Never { + if app.Spec.RestartPolicy.Type != RestartPolicyNever { // Default to 5 sec if the RestartPolicy is OnFailure or Always and these values aren't specified. if app.Spec.RestartPolicy.OnFailureRetryInterval == nil { app.Spec.RestartPolicy.OnFailureRetryInterval = new(int64) diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go b/api/v1beta2/defaults_test.go similarity index 92% rename from pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go rename to api/v1beta2/defaults_test.go index 624374ee16..a516e41e3b 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/defaults_test.go +++ b/api/v1beta2/defaults_test.go @@ -36,11 +36,11 @@ func TestSetSparkApplicationDefaultsEmptyModeShouldDefaultToClusterMode(t *testi SetSparkApplicationDefaults(app) - assert.Equal(t, ClusterMode, app.Spec.Mode) + assert.Equal(t, DeployModeCluster, app.Spec.Mode) } func TestSetSparkApplicationDefaultsModeShouldNotChangeIfSet(t *testing.T) { - expectedMode := ClientMode + expectedMode := DeployModeClient app := &SparkApplication{ Spec: SparkApplicationSpec{ Mode: expectedMode, @@ -59,21 +59,21 @@ func TestSetSparkApplicationDefaultsEmptyRestartPolicyShouldDefaultToNever(t *te SetSparkApplicationDefaults(app) - assert.Equal(t, Never, app.Spec.RestartPolicy.Type) + assert.Equal(t, RestartPolicyNever, app.Spec.RestartPolicy.Type) } func TestSetSparkApplicationDefaultsOnFailureRestartPolicyShouldSetDefaultValues(t *testing.T) { app := &SparkApplication{ Spec: SparkApplicationSpec{ RestartPolicy: RestartPolicy{ - Type: OnFailure, + Type: RestartPolicyOnFailure, }, }, } SetSparkApplicationDefaults(app) - assert.Equal(t, OnFailure, app.Spec.RestartPolicy.Type) + assert.Equal(t, RestartPolicyOnFailure, app.Spec.RestartPolicy.Type) assert.NotNil(t, app.Spec.RestartPolicy.OnFailureRetryInterval) assert.Equal(t, int64(5), *app.Spec.RestartPolicy.OnFailureRetryInterval) assert.NotNil(t, app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval) @@ -85,7 +85,7 @@ func TestSetSparkApplicationDefaultsOnFailureRestartPolicyShouldSetDefaultValueF app := &SparkApplication{ Spec: SparkApplicationSpec{ RestartPolicy: RestartPolicy{ - Type: OnFailure, + Type: RestartPolicyOnFailure, OnSubmissionFailureRetryInterval: &expectedOnSubmissionFailureRetryInterval, }, }, @@ -93,7 +93,7 @@ func TestSetSparkApplicationDefaultsOnFailureRestartPolicyShouldSetDefaultValueF SetSparkApplicationDefaults(app) - assert.Equal(t, OnFailure, app.Spec.RestartPolicy.Type) + assert.Equal(t, RestartPolicyOnFailure, app.Spec.RestartPolicy.Type) assert.NotNil(t, app.Spec.RestartPolicy.OnFailureRetryInterval) assert.Equal(t, int64(5), *app.Spec.RestartPolicy.OnFailureRetryInterval) assert.NotNil(t, app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval) @@ -105,7 +105,7 @@ func TestSetSparkApplicationDefaultsOnFailureRestartPolicyShouldSetDefaultValueF app := &SparkApplication{ Spec: SparkApplicationSpec{ RestartPolicy: RestartPolicy{ - Type: OnFailure, + Type: RestartPolicyOnFailure, OnFailureRetryInterval: &expectedOnFailureRetryInterval, }, }, @@ -113,7 +113,7 @@ func TestSetSparkApplicationDefaultsOnFailureRestartPolicyShouldSetDefaultValueF SetSparkApplicationDefaults(app) - assert.Equal(t, OnFailure, app.Spec.RestartPolicy.Type) + assert.Equal(t, RestartPolicyOnFailure, app.Spec.RestartPolicy.Type) assert.NotNil(t, app.Spec.RestartPolicy.OnFailureRetryInterval) assert.Equal(t, expectedOnFailureRetryInterval, *app.Spec.RestartPolicy.OnFailureRetryInterval) assert.NotNil(t, app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval) diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/doc.go b/api/v1beta2/doc.go similarity index 100% rename from pkg/apis/sparkoperator.k8s.io/v1beta2/doc.go rename to api/v1beta2/doc.go diff --git a/api/v1beta2/groupversion_info.go b/api/v1beta2/groupversion_info.go new file mode 100644 index 0000000000..0f8277c704 --- /dev/null +++ b/api/v1beta2/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta2 contains API Schema definitions for the v1beta2 API group +// +kubebuilder:object:generate=true +// +groupName=sparkoperator.k8s.io +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "sparkoperator.k8s.io", Version: "v1beta2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/apis/sparkoperator.k8s.io/register.go b/api/v1beta2/pod_webhook.go similarity index 84% rename from pkg/apis/sparkoperator.k8s.io/register.go rename to api/v1beta2/pod_webhook.go index 65762067be..5cdbbd0e30 100644 --- a/pkg/apis/sparkoperator.k8s.io/register.go +++ b/api/v1beta2/pod_webhook.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,8 +14,4 @@ See the License for the specific language governing permissions and limitations under the License. */ -package sparkoperator - -const ( - GroupName = "sparkoperator.k8s.io" -) +package v1beta2 diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/register.go b/api/v1beta2/register.go similarity index 53% rename from pkg/apis/sparkoperator.k8s.io/v1beta2/register.go rename to api/v1beta2/register.go index 20d087b7fc..f6eea9ab85 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/register.go +++ b/api/v1beta2/register.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,36 +17,18 @@ limitations under the License. package v1beta2 import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io" ) -const Version = "v1beta2" - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme +const ( + Group = "sparkoperator.k8s.io" + Version = "v1beta2" ) // SchemeGroupVersion is the group version used to register these objects. -var SchemeGroupVersion = schema.GroupVersion{Group: sparkoperator.GroupName, Version: Version} +var SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} // Resource takes an unqualified resource and returns a Group-qualified GroupResource. func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } - -// addKnownTypes adds the set of types defined in this package to the supplied scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &SparkApplication{}, - &SparkApplicationList{}, - &ScheduledSparkApplication{}, - &ScheduledSparkApplicationList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/api/v1beta2/scheduledsparkapplication_types.go b/api/v1beta2/scheduledsparkapplication_types.go new file mode 100644 index 0000000000..486a890a1a --- /dev/null +++ b/api/v1beta2/scheduledsparkapplication_types.go @@ -0,0 +1,125 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +func init() { + SchemeBuilder.Register(&ScheduledSparkApplication{}, &ScheduledSparkApplicationList{}) +} + +// ScheduledSparkApplicationSpec defines the desired state of ScheduledSparkApplication. +type ScheduledSparkApplicationSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // Schedule is a cron schedule on which the application should run. + Schedule string `json:"schedule"` + // Template is a template from which SparkApplication instances can be created. + Template SparkApplicationSpec `json:"template"` + // Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. + // +optional + // Defaults to false. + Suspend *bool `json:"suspend,omitempty"` + // ConcurrencyPolicy is the policy governing concurrent SparkApplication runs. + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` + // SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. + // +optional + // Defaults to 1. + SuccessfulRunHistoryLimit *int32 `json:"successfulRunHistoryLimit,omitempty"` + // FailedRunHistoryLimit is the number of past failed runs of the application to keep. + // +optional + // Defaults to 1. + FailedRunHistoryLimit *int32 `json:"failedRunHistoryLimit,omitempty"` +} + +// ScheduledSparkApplicationStatus defines the observed state of ScheduledSparkApplication. +type ScheduledSparkApplicationStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // LastRun is the time when the last run of the application started. + // +nullable + LastRun metav1.Time `json:"lastRun,omitempty"` + // NextRun is the time when the next run of the application will start. + // +nullable + NextRun metav1.Time `json:"nextRun,omitempty"` + // LastRunName is the name of the SparkApplication for the most recent run of the application. + LastRunName string `json:"lastRunName,omitempty"` + // PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs. + PastSuccessfulRunNames []string `json:"pastSuccessfulRunNames,omitempty"` + // PastFailedRunNames keeps the names of SparkApplications for past failed runs. + PastFailedRunNames []string `json:"pastFailedRunNames,omitempty"` + // ScheduleState is the current scheduling state of the application. + ScheduleState ScheduleState `json:"scheduleState,omitempty"` + // Reason tells why the ScheduledSparkApplication is in the particular ScheduleState. + Reason string `json:"reason,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubeflow/spark-operator/pull/1298" +// +kubebuilder:resource:scope=Namespaced,shortName=scheduledsparkapp,singular=scheduledsparkapplication +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=.spec.schedule,name=Schedule,type=string +// +kubebuilder:printcolumn:JSONPath=.spec.suspend,name=Suspend,type=string +// +kubebuilder:printcolumn:JSONPath=.status.lastRun,name=Last Run,type=date +// +kubebuilder:printcolumn:JSONPath=.status.lastRunName,name=Last Run Name,type=string +// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date + +// ScheduledSparkApplication is the Schema for the scheduledsparkapplications API. +type ScheduledSparkApplication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ScheduledSparkApplicationSpec `json:"spec,omitempty"` + Status ScheduledSparkApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ScheduledSparkApplicationList contains a list of ScheduledSparkApplication. +type ScheduledSparkApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScheduledSparkApplication `json:"items"` +} + +type ConcurrencyPolicy string + +const ( + // ConcurrencyAllow allows SparkApplications to run concurrently. + ConcurrencyAllow ConcurrencyPolicy = "Allow" + // ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous + // one hasn't finished yet. + ConcurrencyForbid ConcurrencyPolicy = "Forbid" + // ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one. + ConcurrencyReplace ConcurrencyPolicy = "Replace" +) + +type ScheduleState string + +const ( + ScheduleStateNew ScheduleState = "" + ScheduleStateValidating ScheduleState = "Validating" + ScheduleStateScheduled ScheduleState = "Scheduled" + ScheduleStateFailedValidation ScheduleState = "FailedValidation" +) diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go b/api/v1beta2/sparkapplication_types.go similarity index 75% rename from pkg/apis/sparkoperator.k8s.io/v1beta2/types.go rename to api/v1beta2/sparkapplication_types.go index 3fe9e30622..4a9e13efb1 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/types.go +++ b/api/v1beta2/sparkapplication_types.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,180 +17,24 @@ limitations under the License. package v1beta2 import ( - apiv1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SparkApplicationType describes the type of a Spark application. -type SparkApplicationType string - -// Different types of Spark applications. -const ( - JavaApplicationType SparkApplicationType = "Java" - ScalaApplicationType SparkApplicationType = "Scala" - PythonApplicationType SparkApplicationType = "Python" - RApplicationType SparkApplicationType = "R" -) +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. -// DeployMode describes the type of deployment of a Spark application. -type DeployMode string - -// Different types of deployments. -const ( - ClusterMode DeployMode = "cluster" - ClientMode DeployMode = "client" - InClusterClientMode DeployMode = "in-cluster-client" -) - -// RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. -// This completely defines actions to be taken on any kind of Failures during an application run. -type RestartPolicy struct { - // Type specifies the RestartPolicyType. - // +kubebuilder:validation:Enum={Never,Always,OnFailure} - Type RestartPolicyType `json:"type,omitempty"` - - // OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. - // This is best effort and actual retry attempts can be >= the value specified due to caching. - // These are required if RestartPolicy is OnFailure. - // +kubebuilder:validation:Minimum=0 - // +optional - OnSubmissionFailureRetries *int32 `json:"onSubmissionFailureRetries,omitempty"` - - // OnFailureRetries the number of times to retry running an application before giving up. - // +kubebuilder:validation:Minimum=0 - // +optional - OnFailureRetries *int32 `json:"onFailureRetries,omitempty"` - - // OnSubmissionFailureRetryInterval is the interval in seconds between retries on failed submissions. - // +kubebuilder:validation:Minimum=1 - // +optional - OnSubmissionFailureRetryInterval *int64 `json:"onSubmissionFailureRetryInterval,omitempty"` - - // OnFailureRetryInterval is the interval in seconds between retries on failed runs. - // +kubebuilder:validation:Minimum=1 - // +optional - OnFailureRetryInterval *int64 `json:"onFailureRetryInterval,omitempty"` -} - -type RestartPolicyType string - -const ( - Never RestartPolicyType = "Never" - OnFailure RestartPolicyType = "OnFailure" - Always RestartPolicyType = "Always" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:defaulter-gen=true -// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubeflow/spark-operator/pull/1298" -// +kubebuilder:resource:scope=Namespaced,shortName=scheduledsparkapp,singular=scheduledsparkapplication -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:JSONPath=.spec.schedule,name=Schedule,type=string -// +kubebuilder:printcolumn:JSONPath=.spec.suspend,name=Suspend,type=string -// +kubebuilder:printcolumn:JSONPath=.status.lastRun,name=Last Run,type=date -// +kubebuilder:printcolumn:JSONPath=.status.lastRunName,name=Last Run Name,type=string -// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date - -type ScheduledSparkApplication struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec ScheduledSparkApplicationSpec `json:"spec"` - Status ScheduledSparkApplicationStatus `json:"status,omitempty"` +func init() { + SchemeBuilder.Register(&SparkApplication{}, &SparkApplicationList{}) } -type ConcurrencyPolicy string - -const ( - // ConcurrencyAllow allows SparkApplications to run concurrently. - ConcurrencyAllow ConcurrencyPolicy = "Allow" - // ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous - // one hasn't finished yet. - ConcurrencyForbid ConcurrencyPolicy = "Forbid" - // ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one. - ConcurrencyReplace ConcurrencyPolicy = "Replace" -) - -type ScheduledSparkApplicationSpec struct { - // Schedule is a cron schedule on which the application should run. - Schedule string `json:"schedule"` - // Template is a template from which SparkApplication instances can be created. - Template SparkApplicationSpec `json:"template"` - // Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. - // +optional - // Defaults to false. - Suspend *bool `json:"suspend,omitempty"` - // ConcurrencyPolicy is the policy governing concurrent SparkApplication runs. - ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty"` - // SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. - // +optional - // Defaults to 1. - SuccessfulRunHistoryLimit *int32 `json:"successfulRunHistoryLimit,omitempty"` - // FailedRunHistoryLimit is the number of past failed runs of the application to keep. - // +optional - // Defaults to 1. - FailedRunHistoryLimit *int32 `json:"failedRunHistoryLimit,omitempty"` -} - -type ScheduleState string - -const ( - FailedValidationState ScheduleState = "FailedValidation" - ScheduledState ScheduleState = "Scheduled" -) - -type ScheduledSparkApplicationStatus struct { - // LastRun is the time when the last run of the application started. - // +nullable - LastRun metav1.Time `json:"lastRun,omitempty"` - // NextRun is the time when the next run of the application will start. - // +nullable - NextRun metav1.Time `json:"nextRun,omitempty"` - // LastRunName is the name of the SparkApplication for the most recent run of the application. - LastRunName string `json:"lastRunName,omitempty"` - // PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs. - PastSuccessfulRunNames []string `json:"pastSuccessfulRunNames,omitempty"` - // PastFailedRunNames keeps the names of SparkApplications for past failed runs. - PastFailedRunNames []string `json:"pastFailedRunNames,omitempty"` - // ScheduleState is the current scheduling state of the application. - ScheduleState ScheduleState `json:"scheduleState,omitempty"` - // Reason tells why the ScheduledSparkApplication is in the particular ScheduleState. - Reason string `json:"reason,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ScheduledSparkApplicationList carries a list of ScheduledSparkApplication objects. -type ScheduledSparkApplicationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []ScheduledSparkApplication `json:"items,omitempty"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:defaulter-gen=true -// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubeflow/spark-operator/pull/1298" -// +kubebuilder:resource:scope=Namespaced,shortName=sparkapp,singular=sparkapplication -// +kubebuilder:subresource:status -// +kubebuilder:printcolumn:JSONPath=.status.applicationState.state,name=Status,type=string -// +kubebuilder:printcolumn:JSONPath=.status.executionAttempts,name=Attempts,type=string -// +kubebuilder:printcolumn:JSONPath=.status.lastSubmissionAttemptTime,name=Start,type=string -// +kubebuilder:printcolumn:JSONPath=.status.terminationTime,name=Finish,type=string -// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date - -// SparkApplication represents a Spark application running on and using Kubernetes as a cluster manager. -type SparkApplication struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - Spec SparkApplicationSpec `json:"spec"` - Status SparkApplicationStatus `json:"status,omitempty"` -} - -// SparkApplicationSpec describes the specification of a Spark application using Kubernetes as a cluster manager. +// SparkApplicationSpec defines the desired state of SparkApplication // It carries every pieces of information a spark-submit command takes and recognizes. type SparkApplicationSpec struct { + // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + // Type tells the type of the Spark application. // +kubebuilder:validation:Enum={Java,Python,Scala,R} Type SparkApplicationType `json:"type"` @@ -242,7 +86,7 @@ type SparkApplicationSpec struct { HadoopConfigMap *string `json:"hadoopConfigMap,omitempty"` // Volumes is the list of Kubernetes volumes that can be mounted by the driver and/or executors. // +optional - Volumes []apiv1.Volume `json:"volumes,omitempty"` + Volumes []corev1.Volume `json:"volumes,omitempty"` // Driver is the driver specification. Driver DriverSpec `json:"driver"` // Executor is the executor specification. @@ -301,6 +145,122 @@ type SparkApplicationSpec struct { DynamicAllocation *DynamicAllocation `json:"dynamicAllocation,omitempty"` } +// SparkApplicationStatus defines the observed state of SparkApplication +type SparkApplicationStatus struct { + // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster + // Important: Run "make generate" to regenerate code after modifying this file + + // SparkApplicationID is set by the spark-distribution(via spark.app.id config) on the driver and executor pods + SparkApplicationID string `json:"sparkApplicationId,omitempty"` + // SubmissionID is a unique ID of the current submission of the application. + SubmissionID string `json:"submissionID,omitempty"` + // LastSubmissionAttemptTime is the time for the last application submission attempt. + // +nullable + LastSubmissionAttemptTime metav1.Time `json:"lastSubmissionAttemptTime,omitempty"` + // CompletionTime is the time when the application runs to completion if it does. + // +nullable + TerminationTime metav1.Time `json:"terminationTime,omitempty"` + // DriverInfo has information about the driver. + DriverInfo DriverInfo `json:"driverInfo"` + // AppState tells the overall application state. + AppState ApplicationState `json:"applicationState,omitempty"` + // ExecutorState records the state of executors by executor Pod names. + ExecutorState map[string]ExecutorState `json:"executorState,omitempty"` + // ExecutionAttempts is the total number of attempts to run a submitted application to completion. + // Incremented upon each attempted run of the application and reset upon invalidation. + ExecutionAttempts int32 `json:"executionAttempts,omitempty"` + // SubmissionAttempts is the total number of attempts to submit an application to run. + // Incremented upon each attempted submission of the application and reset upon invalidation and rerun. + SubmissionAttempts int32 `json:"submissionAttempts,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:metadata:annotations="api-approved.kubernetes.io=https://github.com/kubeflow/spark-operator/pull/1298" +// +kubebuilder:resource:scope=Namespaced,shortName=sparkapp,singular=sparkapplication +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=.status.applicationState.state,name=Status,type=string +// +kubebuilder:printcolumn:JSONPath=.status.executionAttempts,name=Attempts,type=string +// +kubebuilder:printcolumn:JSONPath=.status.lastSubmissionAttemptTime,name=Start,type=string +// +kubebuilder:printcolumn:JSONPath=.status.terminationTime,name=Finish,type=string +// +kubebuilder:printcolumn:JSONPath=.metadata.creationTimestamp,name=Age,type=date + +// SparkApplication is the Schema for the sparkapplications API +type SparkApplication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SparkApplicationSpec `json:"spec,omitempty"` + Status SparkApplicationStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SparkApplicationList contains a list of SparkApplication +type SparkApplicationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SparkApplication `json:"items"` +} + +// SparkApplicationType describes the type of a Spark application. +type SparkApplicationType string + +// Different types of Spark applications. +const ( + SparkApplicationTypeJava SparkApplicationType = "Java" + SparkApplicationTypeScala SparkApplicationType = "Scala" + SparkApplicationTypePython SparkApplicationType = "Python" + SparkApplicationTypeR SparkApplicationType = "R" +) + +// DeployMode describes the type of deployment of a Spark application. +type DeployMode string + +// Different types of deployments. +const ( + DeployModeCluster DeployMode = "cluster" + DeployModeClient DeployMode = "client" + DeployModeInClusterClient DeployMode = "in-cluster-client" +) + +// RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. +// This completely defines actions to be taken on any kind of Failures during an application run. +type RestartPolicy struct { + // Type specifies the RestartPolicyType. + // +kubebuilder:validation:Enum={Never,Always,OnFailure} + Type RestartPolicyType `json:"type,omitempty"` + + // OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. + // This is best effort and actual retry attempts can be >= the value specified due to caching. + // These are required if RestartPolicy is OnFailure. + // +kubebuilder:validation:Minimum=0 + // +optional + OnSubmissionFailureRetries *int32 `json:"onSubmissionFailureRetries,omitempty"` + + // OnFailureRetries the number of times to retry running an application before giving up. + // +kubebuilder:validation:Minimum=0 + // +optional + OnFailureRetries *int32 `json:"onFailureRetries,omitempty"` + + // OnSubmissionFailureRetryInterval is the interval in seconds between retries on failed submissions. + // +kubebuilder:validation:Minimum=1 + // +optional + OnSubmissionFailureRetryInterval *int64 `json:"onSubmissionFailureRetryInterval,omitempty"` + + // OnFailureRetryInterval is the interval in seconds between retries on failed runs. + // +kubebuilder:validation:Minimum=1 + // +optional + OnFailureRetryInterval *int64 `json:"onFailureRetryInterval,omitempty"` +} + +type RestartPolicyType string + +const ( + RestartPolicyNever RestartPolicyType = "Never" + RestartPolicyOnFailure RestartPolicyType = "OnFailure" + RestartPolicyAlways RestartPolicyType = "Always" +) + // BatchSchedulerConfiguration used to configure how to batch scheduling Spark Application type BatchSchedulerConfiguration struct { // Queue stands for the resource queue which the application belongs to, it's being used in Volcano batch scheduler. @@ -312,7 +272,7 @@ type BatchSchedulerConfiguration struct { // Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. // If specified, volcano scheduler will consider it as the resources requested. // +optional - Resources apiv1.ResourceList `json:"resources,omitempty"` + Resources corev1.ResourceList `json:"resources,omitempty"` } // SparkUIConfiguration is for driver UI specific configuration parameters. @@ -328,11 +288,11 @@ type SparkUIConfiguration struct { ServicePortName *string `json:"servicePortName"` // ServiceType allows configuring the type of the service. Defaults to ClusterIP. // +optional - ServiceType *apiv1.ServiceType `json:"serviceType"` + ServiceType *corev1.ServiceType `json:"serviceType"` // ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object. // +optional ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` - // ServiceLables is a map of key,value pairs of labels that might be added to the service object. + // ServiceLabels is a map of key,value pairs of labels that might be added to the service object. // +optional ServiceLabels map[string]string `json:"serviceLabels,omitempty"` // IngressAnnotations is a map of key,value pairs of annotations that might be added to the ingress object. i.e. specify nginx as ingress.class @@ -352,11 +312,11 @@ type DriverIngressConfiguration struct { ServicePortName *string `json:"servicePortName"` // ServiceType allows configuring the type of the service. Defaults to ClusterIP. // +optional - ServiceType *apiv1.ServiceType `json:"serviceType"` + ServiceType *corev1.ServiceType `json:"serviceType"` // ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object. // +optional ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"` - // ServiceLables is a map of key,value pairs of labels that might be added to the service object. + // ServiceLabels is a map of key,value pairs of labels that might be added to the service object. // +optional ServiceLabels map[string]string `json:"serviceLabels,omitempty"` // IngressURLFormat is the URL for the ingress. @@ -374,17 +334,17 @@ type ApplicationStateType string // Different states an application may have. const ( - NewState ApplicationStateType = "" - SubmittedState ApplicationStateType = "SUBMITTED" - RunningState ApplicationStateType = "RUNNING" - CompletedState ApplicationStateType = "COMPLETED" - FailedState ApplicationStateType = "FAILED" - FailedSubmissionState ApplicationStateType = "SUBMISSION_FAILED" - PendingRerunState ApplicationStateType = "PENDING_RERUN" - InvalidatingState ApplicationStateType = "INVALIDATING" - SucceedingState ApplicationStateType = "SUCCEEDING" - FailingState ApplicationStateType = "FAILING" - UnknownState ApplicationStateType = "UNKNOWN" + ApplicationStateNew ApplicationStateType = "" + ApplicationStateSubmitted ApplicationStateType = "SUBMITTED" + ApplicationStateRunning ApplicationStateType = "RUNNING" + ApplicationStateCompleted ApplicationStateType = "COMPLETED" + ApplicationStateFailed ApplicationStateType = "FAILED" + ApplicationStateFailedSubmission ApplicationStateType = "SUBMISSION_FAILED" + ApplicationStatePendingRerun ApplicationStateType = "PENDING_RERUN" + ApplicationStateInvalidating ApplicationStateType = "INVALIDATING" + ApplicationStateSucceeding ApplicationStateType = "SUCCEEDING" + ApplicationStateFailing ApplicationStateType = "FAILING" + ApplicationStateUnknown ApplicationStateType = "UNKNOWN" ) // ApplicationState tells the current state of the application and an error message in case of failures. @@ -398,11 +358,11 @@ type DriverState string // Different states a spark driver may have. const ( - DriverPendingState DriverState = "PENDING" - DriverRunningState DriverState = "RUNNING" - DriverCompletedState DriverState = "COMPLETED" - DriverFailedState DriverState = "FAILED" - DriverUnknownState DriverState = "UNKNOWN" + DriverStatePending DriverState = "PENDING" + DriverStateRunning DriverState = "RUNNING" + DriverStateCompleted DriverState = "COMPLETED" + DriverStateFailed DriverState = "FAILED" + DriverStateUnknown DriverState = "UNKNOWN" ) // ExecutorState tells the current state of an executor. @@ -410,48 +370,13 @@ type ExecutorState string // Different states an executor may have. const ( - ExecutorPendingState ExecutorState = "PENDING" - ExecutorRunningState ExecutorState = "RUNNING" - ExecutorCompletedState ExecutorState = "COMPLETED" - ExecutorFailedState ExecutorState = "FAILED" - ExecutorUnknownState ExecutorState = "UNKNOWN" + ExecutorStatePending ExecutorState = "PENDING" + ExecutorStateRunning ExecutorState = "RUNNING" + ExecutorStateCompleted ExecutorState = "COMPLETED" + ExecutorStateFailed ExecutorState = "FAILED" + ExecutorStateUnknown ExecutorState = "UNKNOWN" ) -// SparkApplicationStatus describes the current status of a Spark application. -type SparkApplicationStatus struct { - // SparkApplicationID is set by the spark-distribution(via spark.app.id config) on the driver and executor pods - SparkApplicationID string `json:"sparkApplicationId,omitempty"` - // SubmissionID is a unique ID of the current submission of the application. - SubmissionID string `json:"submissionID,omitempty"` - // LastSubmissionAttemptTime is the time for the last application submission attempt. - // +nullable - LastSubmissionAttemptTime metav1.Time `json:"lastSubmissionAttemptTime,omitempty"` - // CompletionTime is the time when the application runs to completion if it does. - // +nullable - TerminationTime metav1.Time `json:"terminationTime,omitempty"` - // DriverInfo has information about the driver. - DriverInfo DriverInfo `json:"driverInfo"` - // AppState tells the overall application state. - AppState ApplicationState `json:"applicationState,omitempty"` - // ExecutorState records the state of executors by executor Pod names. - ExecutorState map[string]ExecutorState `json:"executorState,omitempty"` - // ExecutionAttempts is the total number of attempts to run a submitted application to completion. - // Incremented upon each attempted run of the application and reset upon invalidation. - ExecutionAttempts int32 `json:"executionAttempts,omitempty"` - // SubmissionAttempts is the total number of attempts to submit an application to run. - // Incremented upon each attempted submission of the application and reset upon invalidation and rerun. - SubmissionAttempts int32 `json:"submissionAttempts,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SparkApplicationList carries a list of SparkApplication objects. -type SparkApplicationList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []SparkApplication `json:"items,omitempty"` -} - // Dependencies specifies all possible types of dependencies of a Spark application. type Dependencies struct { // Jars is a list of JAR files the Spark application depends on. @@ -509,14 +434,14 @@ type SparkPodSpec struct { Secrets []SecretInfo `json:"secrets,omitempty"` // Env carries the environment variables to add to the pod. // +optional - Env []apiv1.EnvVar `json:"env,omitempty"` + Env []corev1.EnvVar `json:"env,omitempty"` // EnvVars carries the environment variables to add to the pod. // Deprecated. Consider using `env` instead. // +optional EnvVars map[string]string `json:"envVars,omitempty"` // EnvFrom is a list of sources to populate environment variables in the container. // +optional - EnvFrom []apiv1.EnvFromSource `json:"envFrom,omitempty"` + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty"` // EnvSecretKeyRefs holds a mapping from environment variable names to SecretKeyRefs. // Deprecated. Consider using `env` instead. // +optional @@ -529,28 +454,28 @@ type SparkPodSpec struct { Annotations map[string]string `json:"annotations,omitempty"` // VolumeMounts specifies the volumes listed in ".spec.volumes" to mount into the main container's filesystem. // +optional - VolumeMounts []apiv1.VolumeMount `json:"volumeMounts,omitempty"` + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"` // Affinity specifies the affinity/anti-affinity settings for the pod. // +optional - Affinity *apiv1.Affinity `json:"affinity,omitempty"` + Affinity *corev1.Affinity `json:"affinity,omitempty"` // Tolerations specifies the tolerations listed in ".spec.tolerations" to be applied to the pod. // +optional - Tolerations []apiv1.Toleration `json:"tolerations,omitempty"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` // PodSecurityContext specifies the PodSecurityContext to apply. // +optional - PodSecurityContext *apiv1.PodSecurityContext `json:"podSecurityContext,omitempty"` + PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` // SecurityContext specifies the container's SecurityContext to apply. // +optional - SecurityContext *apiv1.SecurityContext `json:"securityContext,omitempty"` + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"` // SchedulerName specifies the scheduler that will be used for scheduling // +optional SchedulerName *string `json:"schedulerName,omitempty"` // Sidecars is a list of sidecar containers that run along side the main Spark container. // +optional - Sidecars []apiv1.Container `json:"sidecars,omitempty"` + Sidecars []corev1.Container `json:"sidecars,omitempty"` // InitContainers is a list of init-containers that run to completion before the main Spark container. // +optional - InitContainers []apiv1.Container `json:"initContainers,omitempty"` + InitContainers []corev1.Container `json:"initContainers,omitempty"` // HostNetwork indicates whether to request host networking for the pod or not. // +optional HostNetwork *bool `json:"hostNetwork,omitempty"` @@ -560,7 +485,7 @@ type SparkPodSpec struct { NodeSelector map[string]string `json:"nodeSelector,omitempty"` // DnsConfig dns settings for the pod, following the Kubernetes specifications. // +optional - DNSConfig *apiv1.PodDNSConfig `json:"dnsConfig,omitempty"` + DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"` // Termination grace period seconds for the pod // +optional TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` @@ -569,7 +494,7 @@ type SparkPodSpec struct { ServiceAccount *string `json:"serviceAccount,omitempty"` // HostAliases settings for the pod, following the Kubernetes specifications. // +optional - HostAliases []apiv1.HostAlias `json:"hostAliases,omitempty"` + HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"` // ShareProcessNamespace settings for the pod, following the Kubernetes specifications. // +optional ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty"` @@ -595,7 +520,7 @@ type DriverSpec struct { JavaOptions *string `json:"javaOptions,omitempty"` // Lifecycle for running preStop or postStart commands // +optional - Lifecycle *apiv1.Lifecycle `json:"lifecycle,omitempty"` + Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty"` // KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and // other Kubernetes resources. Default to https://kubernetes.default.svc. // +optional @@ -630,7 +555,7 @@ type ExecutorSpec struct { JavaOptions *string `json:"javaOptions,omitempty"` // Lifecycle for running preStop or postStart commands // +optional - Lifecycle *apiv1.Lifecycle `json:"lifecycle,omitempty"` + Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty"` // DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. // Maps to `spark.kubernetes.executor.deleteOnTermination` that is available since Spark 3.0. // +optional @@ -651,22 +576,22 @@ type SecretType string // An enumeration of secret types supported. const ( - // GCPServiceAccountSecret is for secrets from a GCP service account Json key file that needs + // SecretTypeGCPServiceAccount is for secrets from a GCP service account Json key file that needs // the environment variable GOOGLE_APPLICATION_CREDENTIALS. - GCPServiceAccountSecret SecretType = "GCPServiceAccount" - // HadoopDelegationTokenSecret is for secrets from an Hadoop delegation token that needs the + SecretTypeGCPServiceAccount SecretType = "GCPServiceAccount" + // SecretTypeHadoopDelegationToken is for secrets from an Hadoop delegation token that needs the // environment variable HADOOP_TOKEN_FILE_LOCATION. - HadoopDelegationTokenSecret SecretType = "HadoopDelegationToken" - // GenericType is for secrets that needs no special handling. - GenericType SecretType = "Generic" + SecretTypeHadoopDelegationToken SecretType = "HadoopDelegationToken" + // SecretTypeGeneric is for secrets that needs no special handling. + SecretTypeGeneric SecretType = "Generic" ) // DriverInfo captures information about the driver. type DriverInfo struct { WebUIServiceName string `json:"webUIServiceName,omitempty"` // UI Details for the UI created via ClusterIP service accessible from within the cluster. - WebUIPort int32 `json:"webUIPort,omitempty"` WebUIAddress string `json:"webUIAddress,omitempty"` + WebUIPort int32 `json:"webUIPort,omitempty"` // Ingress Details if an ingress for the UI was created. WebUIIngressName string `json:"webUIIngressName,omitempty"` WebUIIngressAddress string `json:"webUIIngressAddress,omitempty"` @@ -764,39 +689,3 @@ type DynamicAllocation struct { // +optional ShuffleTrackingTimeout *int64 `json:"shuffleTrackingTimeout,omitempty"` } - -// PrometheusMonitoringEnabled returns if Prometheus monitoring is enabled or not. -func (s *SparkApplication) PrometheusMonitoringEnabled() bool { - return s.Spec.Monitoring != nil && s.Spec.Monitoring.Prometheus != nil -} - -// HasPrometheusConfigFile returns if Prometheus monitoring uses a configuration file in the container. -func (s *SparkApplication) HasPrometheusConfigFile() bool { - return s.PrometheusMonitoringEnabled() && - s.Spec.Monitoring.Prometheus.ConfigFile != nil && - *s.Spec.Monitoring.Prometheus.ConfigFile != "" -} - -// HasPrometheusConfig returns if Prometheus monitoring defines metricsProperties in the spec. -func (s *SparkApplication) HasMetricsProperties() bool { - return s.PrometheusMonitoringEnabled() && - s.Spec.Monitoring.MetricsProperties != nil && - *s.Spec.Monitoring.MetricsProperties != "" -} - -// HasPrometheusConfigFile returns if Monitoring defines metricsPropertiesFile in the spec. -func (s *SparkApplication) HasMetricsPropertiesFile() bool { - return s.PrometheusMonitoringEnabled() && - s.Spec.Monitoring.MetricsPropertiesFile != nil && - *s.Spec.Monitoring.MetricsPropertiesFile != "" -} - -// ExposeDriverMetrics returns if driver metrics should be exposed. -func (s *SparkApplication) ExposeDriverMetrics() bool { - return s.Spec.Monitoring != nil && s.Spec.Monitoring.ExposeDriverMetrics -} - -// ExposeExecutorMetrics returns if executor metrics should be exposed. -func (s *SparkApplication) ExposeExecutorMetrics() bool { - return s.Spec.Monitoring != nil && s.Spec.Monitoring.ExposeExecutorMetrics -} diff --git a/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go similarity index 99% rename from pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go rename to api/v1beta2/zz_generated.deepcopy.go index ffe6107d50..c369db9e00 100644 --- a/pkg/apis/sparkoperator.k8s.io/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -23,7 +23,7 @@ package v1beta2 import ( "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/runtime" + runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/charts/spark-operator-chart/Chart.yaml b/charts/spark-operator-chart/Chart.yaml index 6068bba17f..b36932321b 100644 --- a/charts/spark-operator-chart/Chart.yaml +++ b/charts/spark-operator-chart/Chart.yaml @@ -1,11 +1,39 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + apiVersion: v2 + name: spark-operator -description: A Helm chart for Spark on Kubernetes operator -version: 1.4.6 -appVersion: v1beta2-1.6.2-3.5.0 + +description: A Helm chart for Spark on Kubernetes operator. + +version: 2.0.0-rc.0 + +appVersion: 2.0.0-rc.0 + keywords: - - spark +- apache spark +- big data + home: https://github.com/kubeflow/spark-operator + maintainers: - - name: yuchaoran2011 - email: yuchaoran2011@gmail.com +- name: yuchaoran2011 + email: yuchaoran2011@gmail.com + url: https://github.com/yuchaoran2011 +- name: ChenYi015 + email: github@chenyicn.net + url: https://github.com/ChenYi015 diff --git a/charts/spark-operator-chart/README.md b/charts/spark-operator-chart/README.md index 5f9a6a2ad8..d07b315b38 100644 --- a/charts/spark-operator-chart/README.md +++ b/charts/spark-operator-chart/README.md @@ -1,8 +1,8 @@ # spark-operator -![Version: 1.4.6](https://img.shields.io/badge/Version-1.4.6-informational?style=flat-square) ![AppVersion: v1beta2-1.6.2-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.2--3.5.0-informational?style=flat-square) +![Version: 2.0.0-rc.0](https://img.shields.io/badge/Version-2.0.0--rc.0-informational?style=flat-square) ![AppVersion: 2.0.0-rc.0](https://img.shields.io/badge/AppVersion-2.0.0--rc.0-informational?style=flat-square) -A Helm chart for Spark on Kubernetes operator +A Helm chart for Spark on Kubernetes operator. **Homepage:** @@ -41,13 +41,7 @@ See [helm repo](https://helm.sh/docs/helm/helm_repo) for command documentation. helm install [RELEASE_NAME] spark-operator/spark-operator ``` -For example, if you want to create a release with name `spark-operator` in the `default` namespace: - -```shell -helm install spark-operator spark-operator/spark-operator -``` - -Note that `helm` will fail to install if the namespace doesn't exist. Either create the namespace beforehand or pass the `--create-namespace` flag to the `helm install` command. +For example, if you want to create a release with name `spark-operator` in the `spark-operator` namespace: ```shell helm install spark-operator spark-operator/spark-operator \ @@ -55,6 +49,8 @@ helm install spark-operator spark-operator/spark-operator \ --create-namespace ``` +Note that by passing the `--create-namespace` flag to the `helm install` command, `helm` will create the release namespace if it does not exist. + See [helm install](https://helm.sh/docs/helm/helm_install) for command documentation. ### Upgrade the chart @@ -79,72 +75,91 @@ See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command docum | Key | Type | Default | Description | |-----|------|---------|-------------| -| affinity | object | `{}` | Affinity for pod assignment | -| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application | -| commonLabels | object | `{}` | Common labels to add to the resources | -| controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage | -| envFrom | list | `[]` | Pod environment variable sources | -| fullnameOverride | string | `""` | String to override release name | -| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | -| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository | -| image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. | -| imagePullSecrets | list | `[]` | Image pull secrets | -| ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. | -| istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate | -| labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. | -| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. | -| leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace | -| logLevel | int | `2` | Set higher levels for more verbose logging | -| metrics.enable | bool | `true` | Enable prometheus metric scraping | -| metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint | -| metrics.port | int | `10254` | Metrics port | -| metrics.portName | string | `"metrics"` | Metrics port name | -| metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics | -| nameOverride | string | `""` | String to partially override `spark-operator.fullname` template (will maintain the release name) | -| nodeSelector | object | `{}` | Node labels for pod assignment | -| podAnnotations | object | `{}` | Additional annotations to add to the pod | -| podDisruptionBudget | object | `{"enable":false,"minAvailable":1}` | podDisruptionBudget to avoid service degradation | -| podDisruptionBudget.enable | bool | `false` | Specifies whether to enable pod disruption budget. Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) | -| podDisruptionBudget.minAvailable | int | `1` | The number of pods that must be available. Require `replicaCount` to be greater than 1 | -| podLabels | object | `{}` | Additional labels to add to the pod | -| podMonitor | object | `{"enable":false,"jobLabel":"spark-operator-podmonitor","labels":{},"podMetricsEndpoint":{"interval":"5s","scheme":"http"}}` | Prometheus pod monitor for operator's pod. | -| podMonitor.enable | bool | `false` | If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. | -| podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from | -| podMonitor.labels | object | `{}` | Pod monitor labels | -| podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | -| podSecurityContext | object | `{}` | Pod security context | -| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. | -| rbac.annotations | object | `{}` | Optional annotations for rbac | -| rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` | -| rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources | -| rbac.createRole | bool | `true` | Create and use RBAC `Role` resources | -| replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 | -| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. | -| resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | -| resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting | -| securityContext | object | `{}` | Operator container security context | -| serviceAccounts.spark.annotations | object | `{}` | Optional annotations for the spark service account | -| serviceAccounts.spark.create | bool | `true` | Create a service account for spark apps | -| serviceAccounts.spark.name | string | `""` | Optional name for the spark service account | -| serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account | -| serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator | -| serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account | -| sidecars | list | `[]` | Sidecar containers | -| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs | -| tolerations | list | `[]` | List of node taints to tolerate | -| topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) Specify topologySpreadConstraints without the labelSelector field, the labelSelector field will be set to "spark-operator.selectorLabels" subtemplate in the deployment.yaml file. | -| uiService.enable | bool | `true` | Enable UI service creation for Spark application | -| volumeMounts | list | `[]` | | -| volumes | list | `[]` | | -| webhook.enable | bool | `false` | Enable webhook server | -| webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces | -| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects | -| webhook.port | int | `8080` | Webhook service port | -| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name | -| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade | +| nameOverride | string | `""` | String to partially override release name. | +| fullnameOverride | string | `""` | String to fully override release name. | +| commonLabels | object | `{}` | Common labels to add to the resources. | +| image.registry | string | `"docker.io"` | Image registry. | +| image.repository | string | `"kubeflow/spark-operator"` | Image repository. | +| image.tag | string | If not set, the chart appVersion will be used. | Image tag. | +| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy. | +| image.pullSecrets | list | `[]` | Image pull secrets for private image registry. | +| controller.replicas | int | `1` | Number of replicas of controller. | +| controller.workers | int | `10` | Reconcile concurrency, higher values might increase memory usage. | +| controller.logLevel | string | `"info"` | Configure the verbosity of logging, can be one of `debug`, `info`, `error`. | +| controller.uiService.enable | bool | `true` | Specifies whether to create service for Spark web UI. | +| controller.uiIngress.enable | bool | `false` | Specifies whether to create ingress for Spark web UI. `controller.uiService.enable` must be `true` to enable ingress. | +| controller.uiIngress.urlFormat | string | `""` | Ingress URL format. Required if `controller.uiIngress.enable` is true. | +| controller.batchScheduler.enable | bool | `false` | Specifies whether to enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application. | +| controller.serviceAccount.create | bool | `true` | Specifies whether to create a service account for the controller. | +| controller.serviceAccount.name | string | `""` | Optional name for the controller service account. | +| controller.serviceAccount.annotations | object | `{}` | Extra annotations for the controller service account. | +| controller.rbac.create | bool | `true` | Specifies whether to create RBAC resources for the controller. | +| controller.rbac.annotations | object | `{}` | Extra annotations for the controller RBAC resources. | +| controller.labels | object | `{}` | Extra labels for controller pods. | +| controller.annotations | object | `{}` | Extra annotations for controller pods. | +| controller.volumes | list | `[]` | Volumes for controller pods. | +| controller.nodeSelector | object | `{}` | Node selector for controller pods. | +| controller.affinity | object | `{}` | Affinity for controller pods. | +| controller.tolerations | list | `[]` | List of node taints to tolerate for controller pods. | +| controller.priorityClassName | string | `""` | Priority class for controller pods. | +| controller.podSecurityContext | object | `{}` | Security context for controller pods. | +| controller.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). The labelSelector field in topology spread constraint will be set to the selector labels for controller pods if not specified. | +| controller.env | list | `[]` | Environment variables for controller containers. | +| controller.envFrom | list | `[]` | Environment variable sources for controller containers. | +| controller.volumeMounts | list | `[]` | Volume mounts for controller containers. | +| controller.resources | object | `{}` | Pod resource requests and limits for controller containers. Note, that each job submission will spawn a JVM within the controller pods using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. | +| controller.securityContext | object | `{}` | Security context for controller containers. | +| controller.sidecars | list | `[]` | Sidecar containers for controller pods. | +| controller.podDisruptionBudget.enable | bool | `false` | Specifies whether to create pod disruption budget for controller. Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) | +| controller.podDisruptionBudget.minAvailable | int | `1` | The number of pods that must be available. Require `controller.replicas` to be greater than 1 | +| webhook.replicas | int | `1` | Number of replicas of webhook server. | +| webhook.logLevel | string | `"info"` | Configure the verbosity of logging, can be one of `debug`, `info`, `error`. | +| webhook.port | int | `9443` | Specifies webhook port. | +| webhook.portName | string | `"webhook"` | Specifies webhook service port name. | +| webhook.failurePolicy | string | `"Fail"` | Specifies how unrecognized errors are handled. Available options are `Ignore` or `Fail`. | +| webhook.timeoutSeconds | int | `10` | Specifies the timeout seconds of the webhook, the value must be between 1 and 30. | +| webhook.resourceQuotaEnforcement.enable | bool | `false` | Specifies whether to enable the ResourceQuota enforcement for SparkApplication resources. | +| webhook.serviceAccount.create | bool | `true` | Specifies whether to create a service account for the webhook. | +| webhook.serviceAccount.name | string | `""` | Optional name for the webhook service account. | +| webhook.serviceAccount.annotations | object | `{}` | Extra annotations for the webhook service account. | +| webhook.rbac.create | bool | `true` | Specifies whether to create RBAC resources for the webhook. | +| webhook.rbac.annotations | object | `{}` | Extra annotations for the webhook RBAC resources. | +| webhook.labels | object | `{}` | Extra labels for webhook pods. | +| webhook.annotations | object | `{}` | Extra annotations for webhook pods. | +| webhook.sidecars | list | `[]` | Sidecar containers for webhook pods. | +| webhook.volumes | list | `[]` | Volumes for webhook pods. | +| webhook.nodeSelector | object | `{}` | Node selector for webhook pods. | +| webhook.affinity | object | `{}` | Affinity for webhook pods. | +| webhook.tolerations | list | `[]` | List of node taints to tolerate for webhook pods. | +| webhook.priorityClassName | string | `""` | Priority class for webhook pods. | +| webhook.podSecurityContext | object | `{}` | Security context for webhook pods. | +| webhook.topologySpreadConstraints | list | `[]` | Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). The labelSelector field in topology spread constraint will be set to the selector labels for webhook pods if not specified. | +| webhook.env | list | `[]` | Environment variables for webhook containers. | +| webhook.envFrom | list | `[]` | Environment variable sources for webhook containers. | +| webhook.volumeMounts | list | `[]` | Volume mounts for webhook containers. | +| webhook.resources | object | `{}` | Pod resource requests and limits for webhook pods. | +| webhook.securityContext | object | `{}` | Security context for webhook containers. | +| webhook.podDisruptionBudget.enable | bool | `false` | Specifies whether to create pod disruption budget for webhook. Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) | +| webhook.podDisruptionBudget.minAvailable | int | `1` | The number of pods that must be available. Require `webhook.replicas` to be greater than 1 | +| spark.jobNamespaces | list | `["default"]` | List of namespaces where to run spark jobs. If empty string is included, all namespaces will be allowed. Make sure the namespaces have already existed. | +| spark.serviceAccount.create | bool | `true` | Specifies whether to create a service account for spark applications. | +| spark.serviceAccount.name | string | `""` | Optional name for the spark service account. | +| spark.serviceAccount.annotations | object | `{}` | Optional annotations for the spark service account. | +| spark.rbac.create | bool | `true` | Specifies whether to create RBAC resources for spark applications. | +| spark.rbac.annotations | object | `{}` | Optional annotations for the spark application RBAC resources. | +| prometheus.metrics.enable | bool | `true` | Specifies whether to enable prometheus metrics scraping. | +| prometheus.metrics.port | int | `8080` | Metrics port. | +| prometheus.metrics.portName | string | `"metrics"` | Metrics port name. | +| prometheus.metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint. | +| prometheus.metrics.prefix | string | `""` | Metrics prefix, will be added to all exported metrics. | +| prometheus.podMonitor.create | bool | `false` | Specifies whether to create pod monitor. Note that prometheus metrics should be enabled as well. | +| prometheus.podMonitor.labels | object | `{}` | Pod monitor labels | +| prometheus.podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from | +| prometheus.podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port | ## Maintainers | Name | Email | Url | | ---- | ------ | --- | -| yuchaoran2011 | | | +| yuchaoran2011 | | | +| ChenYi015 | | | diff --git a/charts/spark-operator-chart/README.md.gotmpl b/charts/spark-operator-chart/README.md.gotmpl index a20ed517ee..0c94c12d06 100644 --- a/charts/spark-operator-chart/README.md.gotmpl +++ b/charts/spark-operator-chart/README.md.gotmpl @@ -43,13 +43,7 @@ See [helm repo](https://helm.sh/docs/helm/helm_repo) for command documentation. helm install [RELEASE_NAME] spark-operator/spark-operator ``` -For example, if you want to create a release with name `spark-operator` in the `default` namespace: - -```shell -helm install spark-operator spark-operator/spark-operator -``` - -Note that `helm` will fail to install if the namespace doesn't exist. Either create the namespace beforehand or pass the `--create-namespace` flag to the `helm install` command. +For example, if you want to create a release with name `spark-operator` in the `spark-operator` namespace: ```shell helm install spark-operator spark-operator/spark-operator \ @@ -57,6 +51,8 @@ helm install spark-operator spark-operator/spark-operator \ --create-namespace ``` +Note that by passing the `--create-namespace` flag to the `helm install` command, `helm` will create the release namespace if it does not exist. + See [helm install](https://helm.sh/docs/helm/helm_install) for command documentation. ### Upgrade the chart diff --git a/charts/spark-operator-chart/ci/ci-values.yaml b/charts/spark-operator-chart/ci/ci-values.yaml index 13d37731c8..23b5e1e364 100644 --- a/charts/spark-operator-chart/ci/ci-values.yaml +++ b/charts/spark-operator-chart/ci/ci-values.yaml @@ -1,2 +1,2 @@ image: - tag: "local" + tag: local diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml index b37b7a0008..7f77e1bb92 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -36,6 +36,8 @@ spec: name: v1beta2 schema: openAPIV3Schema: + description: ScheduledSparkApplication is the Schema for the scheduledsparkapplications + API. properties: apiVersion: description: |- @@ -55,6 +57,8 @@ spec: metadata: type: object spec: + description: ScheduledSparkApplicationSpec defines the desired state of + ScheduledSparkApplication. properties: concurrencyPolicy: description: ConcurrencyPolicy is the policy governing concurrent @@ -4883,7 +4887,7 @@ spec: serviceLabels: additionalProperties: type: string - description: ServiceLables is a map of key,value pairs of + description: ServiceLabels is a map of key,value pairs of labels that might be added to the service object. type: object servicePort: @@ -9820,7 +9824,7 @@ spec: serviceLabels: additionalProperties: type: string - description: ServiceLables is a map of key,value pairs of + description: ServiceLabels is a map of key,value pairs of labels that might be added to the service object. type: object servicePort: @@ -11563,6 +11567,8 @@ spec: - template type: object status: + description: ScheduledSparkApplicationStatus defines the observed state + of ScheduledSparkApplication. properties: lastRun: description: LastRun is the time when the last run of the application @@ -11601,9 +11607,6 @@ spec: application. type: string type: object - required: - - metadata - - spec type: object served: true storage: true diff --git a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml index c23d69264a..afc07c2530 100644 --- a/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml +++ b/charts/spark-operator-chart/crds/sparkoperator.k8s.io_sparkapplications.yaml @@ -36,8 +36,7 @@ spec: name: v1beta2 schema: openAPIV3Schema: - description: SparkApplication represents a Spark application running on and - using Kubernetes as a cluster manager. + description: SparkApplication is the Schema for the sparkapplications API properties: apiVersion: description: |- @@ -58,7 +57,7 @@ spec: type: object spec: description: |- - SparkApplicationSpec describes the specification of a Spark application using Kubernetes as a cluster manager. + SparkApplicationSpec defines the desired state of SparkApplication It carries every pieces of information a spark-submit command takes and recognizes. properties: arguments: @@ -4827,7 +4826,7 @@ spec: serviceLabels: additionalProperties: type: string - description: ServiceLables is a map of key,value pairs of labels + description: ServiceLabels is a map of key,value pairs of labels that might be added to the service object. type: object servicePort: @@ -9734,7 +9733,7 @@ spec: serviceLabels: additionalProperties: type: string - description: ServiceLables is a map of key,value pairs of labels + description: ServiceLabels is a map of key,value pairs of labels that might be added to the service object. type: object servicePort: @@ -11466,8 +11465,7 @@ spec: - type type: object status: - description: SparkApplicationStatus describes the current status of a - Spark application. + description: SparkApplicationStatus defines the observed state of SparkApplication properties: applicationState: description: AppState tells the overall application state. @@ -11487,6 +11485,8 @@ spec: podName: type: string webUIAddress: + description: UI Details for the UI created via ClusterIP service + accessible from within the cluster. type: string webUIIngressAddress: type: string @@ -11494,8 +11494,6 @@ spec: description: Ingress Details if an ingress for the UI was created. type: string webUIPort: - description: UI Details for the UI created via ClusterIP service - accessible from within the cluster. format: int32 type: integer webUIServiceName: @@ -11543,9 +11541,6 @@ spec: required: - driverInfo type: object - required: - - metadata - - spec type: object served: true storage: true diff --git a/charts/spark-operator-chart/templates/_helpers.tpl b/charts/spark-operator-chart/templates/_helpers.tpl index 8e884ee9db..82845b4e49 100644 --- a/charts/spark-operator-chart/templates/_helpers.tpl +++ b/charts/spark-operator-chart/templates/_helpers.tpl @@ -1,3 +1,19 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + {{/* vim: set filetype=mustache: */}} {{/* Expand the name of the chart. @@ -37,13 +53,13 @@ Common labels {{- define "spark-operator.labels" -}} helm.sh/chart: {{ include "spark-operator.chart" . }} {{ include "spark-operator.selectorLabels" . }} -{{- if .Values.commonLabels }} -{{ toYaml .Values.commonLabels }} -{{- end }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.commonLabels }} +{{ toYaml . }} +{{- end }} {{- end }} {{/* @@ -55,25 +71,8 @@ app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* -Create the name of the service account to be used by the operator +Spark Operator image */}} -{{- define "spark-operator.serviceAccountName" -}} -{{- if .Values.serviceAccounts.sparkoperator.create -}} -{{ default (include "spark-operator.fullname" .) .Values.serviceAccounts.sparkoperator.name }} -{{- else -}} -{{ default "default" .Values.serviceAccounts.sparkoperator.name }} +{{- define "spark-operator.image" -}} +{{ printf "%s/%s:%s" .Values.image.registry .Values.image.repository (.Values.image.tag | default .Chart.AppVersion) }} {{- end -}} -{{- end -}} - -{{/* -Create the name of the service account to be used by spark apps -*/}} -{{- define "spark.serviceAccountName" -}} -{{- if .Values.serviceAccounts.spark.create -}} -{{- $sparkServiceaccount := printf "%s-%s" .Release.Name "spark" -}} - {{ default $sparkServiceaccount .Values.serviceAccounts.spark.name }} -{{- else -}} - {{ default "default" .Values.serviceAccounts.spark.name }} -{{- end -}} -{{- end -}} - diff --git a/charts/spark-operator-chart/templates/controller/_helpers.tpl b/charts/spark-operator-chart/templates/controller/_helpers.tpl new file mode 100644 index 0000000000..e5b9457b2b --- /dev/null +++ b/charts/spark-operator-chart/templates/controller/_helpers.tpl @@ -0,0 +1,70 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{/* +Create the name of controller component +*/}} +{{- define "spark-operator.controller.name" -}} +{{- include "spark-operator.fullname" . }}-controller +{{- end -}} + +{{/* +Common labels for the controller +*/}} +{{- define "spark-operator.controller.labels" -}} +{{ include "spark-operator.labels" . }} +app.kubernetes.io/component: controller +{{- end -}} + +{{/* +Selector labels for the controller +*/}} +{{- define "spark-operator.controller.selectorLabels" -}} +{{ include "spark-operator.selectorLabels" . }} +app.kubernetes.io/component: controller +{{- end -}} + +{{/* +Create the name of the service account to be used by the controller +*/}} +{{- define "spark-operator.controller.serviceAccountName" -}} +{{- if .Values.controller.serviceAccount.create -}} +{{ .Values.controller.serviceAccount.name | default (include "spark-operator.controller.name" .) }} +{{- else -}} +{{ .Values.controller.serviceAccount.name | default "default" }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the deployment to be used by controller +*/}} +{{- define "spark-operator.controller.deploymentName" -}} +{{ include "spark-operator.controller.name" . }} +{{- end -}} + +{{/* +Create the name of the lease resource to be used by leader election +*/}} +{{- define "spark-operator.controller.leaderElectionName" -}} +{{ include "spark-operator.controller.name" . }}-lock +{{- end -}} + +{{/* +Create the name of the pod disruption budget to be used by controller +*/}} +{{- define "spark-operator.controller.podDisruptionBudgetName" -}} +{{ include "spark-operator.controller.name" . }}-pdb +{{- end -}} diff --git a/charts/spark-operator-chart/templates/controller/deployment.yaml b/charts/spark-operator-chart/templates/controller/deployment.yaml new file mode 100644 index 0000000000..02f9c2c90b --- /dev/null +++ b/charts/spark-operator-chart/templates/controller/deployment.yaml @@ -0,0 +1,162 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "spark-operator.controller.deploymentName" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} +spec: + {{- with .Values.controller.replicas }} + replicas: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "spark-operator.controller.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "spark-operator.controller.selectorLabels" . | nindent 8 }} + {{- with .Values.controller.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if or .Values.controller.annotations .Values.prometheus.metrics.enable }} + annotations: + {{- if .Values.prometheus.metrics.enable }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ .Values.prometheus.metrics.port | quote }} + prometheus.io/path: {{ .Values.prometheus.metrics.endpoint }} + {{- end }} + {{- with .Values.controller.annotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + spec: + containers: + - name: spark-operator-controller + image: {{ include "spark-operator.image" . }} + {{- with .Values.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + args: + - controller + - start + {{- with .Values.controller.logLevel }} + - --zap-log-level={{ . }} + {{- end }} + {{- with .Values.spark.jobNamespaces }} + - --namespaces={{ . | join "," }} + {{- end }} + - --controller-threads={{ .Values.controller.workers }} + {{- with .Values.controller.uiService.enable }} + - --enable-ui-service=true + {{- end }} + {{- if .Values.controller.uiIngress.enable }} + {{- with .Values.controller.uiIngress.urlFormat }} + - --ingress-url-format={{ . }} + {{- end }} + {{- end }} + {{- with .Values.controller.batchScheduler.enable }} + - --enable-batch-scheduler=true + {{- end }} + {{- if .Values.prometheus.metrics.enable }} + - --enable-metrics=true + - --metrics-bind-address=:{{ .Values.prometheus.metrics.port }} + - --metrics-endpoint={{ .Values.prometheus.metrics.endpoint }} + - --metrics-prefix={{ .Values.prometheus.metrics.prefix }} + - --metrics-labels=app_type + {{- end }} + - --leader-election=true + - --leader-election-lock-name={{ include "spark-operator.controller.leaderElectionName" . }} + - --leader-election-lock-namespace={{ .Release.Namespace }} + {{- if .Values.prometheus.metrics.enable }} + ports: + - name: {{ .Values.prometheus.metrics.portName | quote }} + containerPort: {{ .Values.prometheus.metrics.port }} + {{- end }} + {{- with .Values.controller.env }} + env: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.envFrom }} + envFrom: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.resources }} + resources: + {{- toYaml . | nindent 10 }} + {{- end }} + livenessProbe: + httpGet: + port: 8081 + scheme: HTTP + path: /healthz + readinessProbe: + httpGet: + port: 8081 + scheme: HTTP + path: /readyz + {{- with .Values.controller.securityContext }} + securityContext: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.controller.sidecars }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.controller.volumes }} + volumes: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.controller.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.controller.tolerations }} + tolerations: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.controller.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + serviceAccountName: {{ include "spark-operator.controller.serviceAccountName" . }} + {{- with .Values.controller.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.controller.topologySpreadConstraints }} + {{- if le (int .Values.controller.replicas) 1 }} + {{- fail "controller.replicas must be greater than 1 to enable topology spread constraints for controller pods"}} + {{- end }} + {{- $selectorLabels := include "spark-operator.controller.selectorLabels" . | fromYaml }} + {{- $labelSelectorDict := dict "labelSelector" ( dict "matchLabels" $selectorLabels ) }} + topologySpreadConstraints: + {{- range .Values.controller.topologySpreadConstraints }} + - {{ mergeOverwrite . $labelSelectorDict | toYaml | nindent 8 | trim }} + {{- end }} + {{- end }} diff --git a/charts/spark-operator-chart/templates/controller/poddisruptionbudget.yaml b/charts/spark-operator-chart/templates/controller/poddisruptionbudget.yaml new file mode 100644 index 0000000000..38c748d6f4 --- /dev/null +++ b/charts/spark-operator-chart/templates/controller/poddisruptionbudget.yaml @@ -0,0 +1,34 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.podDisruptionBudget.enable }} +{{- if le (int .Values.controller.replicas) 1 }} +{{- fail "controller.replicas must be greater than 1 to enable pod disruption budget for controller" }} +{{- end -}} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "spark-operator.controller.podDisruptionBudgetName" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "spark-operator.controller.selectorLabels" . | nindent 6 }} + {{- with .Values.controller.podDisruptionBudget.minAvailable }} + minAvailable: {{ . }} + {{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/controller/rbac.yaml b/charts/spark-operator-chart/templates/controller/rbac.yaml new file mode 100644 index 0000000000..472d0fcc7c --- /dev/null +++ b/charts/spark-operator-chart/templates/controller/rbac.yaml @@ -0,0 +1,201 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "spark-operator.controller.name" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} + {{- with .Values.controller.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - create + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +- apiGroups: + - "" + resources: + - events + verbs: + - create + - update + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications + - scheduledsparkapplications + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/status + - scheduledsparkapplications/status + verbs: + - get + - update + - patch +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/finalizers + - scheduledsparkapplications/finalizers + verbs: + - update +{{- if .Values.controller.batchScheduler.enable }} +{{/* required for the `volcano` batch scheduler */}} +- apiGroups: + - scheduling.incubator.k8s.io + - scheduling.sigs.dev + - scheduling.volcano.sh + resources: + - podgroups + verbs: + - "*" +{{- end }} +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "spark-operator.controller.name" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} + {{- with .Values.controller.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "spark-operator.controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "spark-operator.controller.name" . }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "spark-operator.controller.name" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} + {{- with .Values.controller.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - {{ include "spark-operator.controller.leaderElectionName" . }} + verbs: + - get + - update + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "spark-operator.controller.name" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} + {{- with .Values.controller.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "spark-operator.controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "spark-operator.controller.name" . }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/controller/serviceaccount.yaml b/charts/spark-operator-chart/templates/controller/serviceaccount.yaml new file mode 100644 index 0000000000..126e4245c8 --- /dev/null +++ b/charts/spark-operator-chart/templates/controller/serviceaccount.yaml @@ -0,0 +1,28 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.controller.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "spark-operator.controller.serviceAccountName" . }} + labels: + {{- include "spark-operator.controller.labels" . | nindent 4 }} + {{- with .Values.controller.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/deployment.yaml b/charts/spark-operator-chart/templates/deployment.yaml deleted file mode 100644 index 396f8ae019..0000000000 --- a/charts/spark-operator-chart/templates/deployment.yaml +++ /dev/null @@ -1,150 +0,0 @@ -# If the admission webhook is enabled, then a post-install step is required -# to generate and install the secret in the operator namespace. - -# In the post-install hook, the token corresponding to the operator service account -# is used to authenticate with the Kubernetes API server to install the secret bundle. -{{- $jobNamespaces := .Values.sparkJobNamespaces | default list }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "spark-operator.fullname" . }} - labels: - {{- include "spark-operator.labels" . | nindent 4 }} -spec: - replicas: {{ .Values.replicaCount }} - selector: - matchLabels: - {{- include "spark-operator.selectorLabels" . | nindent 6 }} - strategy: - type: Recreate - template: - metadata: - {{- if or .Values.podAnnotations .Values.metrics.enable }} - annotations: - {{- if .Values.metrics.enable }} - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.port }}" - prometheus.io/path: {{ .Values.metrics.endpoint }} - {{- end }} - {{- if .Values.podAnnotations }} - {{- toYaml .Values.podAnnotations | trim | nindent 8 }} - {{- end }} - {{- end }} - labels: - {{- include "spark-operator.selectorLabels" . | nindent 8 }} - {{- with .Values.podLabels }} - {{- toYaml . | trim | nindent 8 }} - {{- end }} - spec: - serviceAccountName: {{ include "spark-operator.serviceAccountName" . }} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - image: {{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if gt (int .Values.replicaCount) 1 }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - {{- end }} - envFrom: - {{- toYaml .Values.envFrom | nindent 10 }} - securityContext: - {{- toYaml .Values.securityContext | nindent 10 }} - {{- if or .Values.metrics.enable .Values.webhook.enable }} - ports: - {{ if .Values.metrics.enable -}} - - name: {{ .Values.metrics.portName | quote }} - containerPort: {{ .Values.metrics.port }} - {{- end }} - {{ if .Values.webhook.enable -}} - - name: {{ .Values.webhook.portName | quote }} - containerPort: {{ .Values.webhook.port }} - {{- end }} - {{ end -}} - args: - - -v={{ .Values.logLevel }} - - -logtostderr - {{- if eq (len $jobNamespaces) 1 }} - - -namespace={{ index $jobNamespaces 0 }} - {{- end }} - - -enable-ui-service={{ .Values.uiService.enable}} - - -ingress-url-format={{ .Values.ingressUrlFormat }} - - -controller-threads={{ .Values.controllerThreads }} - - -resync-interval={{ .Values.resyncInterval }} - - -enable-batch-scheduler={{ .Values.batchScheduler.enable }} - - -label-selector-filter={{ .Values.labelSelectorFilter }} - {{- if .Values.metrics.enable }} - - -enable-metrics=true - - -metrics-labels=app_type - - -metrics-port={{ .Values.metrics.port }} - - -metrics-endpoint={{ .Values.metrics.endpoint }} - - -metrics-prefix={{ .Values.metrics.prefix }} - {{- end }} - {{- if .Values.webhook.enable }} - - -enable-webhook=true - - -webhook-secret-name={{ include "spark-operator.webhookSecretName" . }} - - -webhook-secret-namespace={{ .Release.Namespace }} - - -webhook-svc-name={{ include "spark-operator.webhookServiceName" . }} - - -webhook-svc-namespace={{ .Release.Namespace }} - - -webhook-config-name={{ include "spark-operator.fullname" . }}-webhook-config - - -webhook-port={{ .Values.webhook.port }} - - -webhook-timeout={{ .Values.webhook.timeout }} - - -webhook-namespace-selector={{ .Values.webhook.namespaceSelector }} - - -webhook-object-selector={{ .Values.webhook.objectSelector }} - {{- end }} - - -enable-resource-quota-enforcement={{ .Values.resourceQuotaEnforcement.enable }} - {{- if gt (int .Values.replicaCount) 1 }} - - -leader-election=true - - -leader-election-lock-namespace={{ default .Release.Namespace .Values.leaderElection.lockNamespace }} - - -leader-election-lock-name={{ .Values.leaderElection.lockName }} - {{- end }} - {{- with .Values.resources }} - resources: - {{- toYaml . | nindent 10 }} - {{- end }} - {{- with .Values.volumeMounts }} - volumeMounts: - {{- toYaml . | nindent 10 }} - {{- end }} - {{- with .Values.sidecars }} - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.volumes }} - volumes: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- if .Values.priorityClassName }} - priorityClassName: {{ .Values.priorityClassName }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- if and .Values.topologySpreadConstraints (gt (int .Values.replicaCount) 1) }} - {{- $selectorLabels := include "spark-operator.selectorLabels" . | fromYaml -}} - {{- $labelSelectorDict := dict "labelSelector" ( dict "matchLabels" $selectorLabels ) }} - topologySpreadConstraints: - {{- range .Values.topologySpreadConstraints }} - - {{ mergeOverwrite . $labelSelectorDict | toYaml | nindent 8 | trim }} - {{- end }} - {{ else if and .Values.topologySpreadConstraints (eq (int .Values.replicaCount) 1) }} - {{ fail "replicaCount must be greater than 1 to enable topologySpreadConstraints."}} - {{- end }} diff --git a/charts/spark-operator-chart/templates/poddisruptionbudget.yaml b/charts/spark-operator-chart/templates/poddisruptionbudget.yaml deleted file mode 100644 index 317f8bdb9d..0000000000 --- a/charts/spark-operator-chart/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if $.Values.podDisruptionBudget.enable }} -{{- if (gt (int $.Values.replicaCount) 1) }} -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: {{ include "spark-operator.fullname" . }}-pdb - labels: - {{- include "spark-operator.labels" . | nindent 4 }} -spec: - selector: - matchLabels: - {{- include "spark-operator.selectorLabels" . | nindent 6 }} - minAvailable: {{ $.Values.podDisruptionBudget.minAvailable }} -{{- else }} -{{- fail "replicaCount must be greater than 1 to enable PodDisruptionBudget" }} -{{- end }} -{{- end }} diff --git a/charts/spark-operator-chart/templates/prometheus-podmonitor.yaml b/charts/spark-operator-chart/templates/prometheus-podmonitor.yaml deleted file mode 100644 index eec380d74f..0000000000 --- a/charts/spark-operator-chart/templates/prometheus-podmonitor.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{ if and .Values.metrics.enable .Values.podMonitor.enable }} -apiVersion: monitoring.coreos.com/v1 -kind: PodMonitor -metadata: - name: {{ include "spark-operator.name" . -}}-podmonitor - labels: {{ toYaml .Values.podMonitor.labels | nindent 4 }} -spec: - podMetricsEndpoints: - - interval: {{ .Values.podMonitor.podMetricsEndpoint.interval }} - port: {{ .Values.metrics.portName | quote }} - scheme: {{ .Values.podMonitor.podMetricsEndpoint.scheme }} - jobLabel: {{ .Values.podMonitor.jobLabel }} - namespaceSelector: - matchNames: - - {{ .Release.Namespace }} - selector: - matchLabels: - {{- include "spark-operator.selectorLabels" . | nindent 6 }} -{{ end }} \ No newline at end of file diff --git a/charts/spark-operator-chart/templates/prometheus/_helpers.tpl b/charts/spark-operator-chart/templates/prometheus/_helpers.tpl new file mode 100644 index 0000000000..d767419b56 --- /dev/null +++ b/charts/spark-operator-chart/templates/prometheus/_helpers.tpl @@ -0,0 +1,22 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{/* +Create the name of pod monitor +*/}} +{{- define "spark-operator.prometheus.podMonitorName" -}} +{{- include "spark-operator.fullname" . }}-podmonitor +{{- end -}} diff --git a/charts/spark-operator-chart/templates/prometheus/podmonitor.yaml b/charts/spark-operator-chart/templates/prometheus/podmonitor.yaml new file mode 100644 index 0000000000..a9c5289c06 --- /dev/null +++ b/charts/spark-operator-chart/templates/prometheus/podmonitor.yaml @@ -0,0 +1,44 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.prometheus.podMonitor.create -}} +{{- if not .Values.prometheus.metrics.enable }} +{{- fail "`metrics.enable` must be set to true when `podMonitor.create` is true." }} +{{- end }} +{{- if not (.Capabilities.APIVersions.Has "monitoring.coreos.com/v1/PodMonitor") }} +{{- fail "The cluster does not support the required API version `monitoring.coreos.com/v1` for `PodMonitor`." }} +{{- end }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "spark-operator.prometheus.podMonitorName" . }} + {{- with .Values.prometheus.podMonitor.labels }} + labels: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + podMetricsEndpoints: + - interval: {{ .Values.prometheus.podMonitor.podMetricsEndpoint.interval }} + port: {{ .Values.prometheus.metrics.portName | quote }} + scheme: {{ .Values.prometheus.podMonitor.podMetricsEndpoint.scheme }} + jobLabel: {{ .Values.prometheus.podMonitor.jobLabel }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + {{- include "spark-operator.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/rbac.yaml b/charts/spark-operator-chart/templates/rbac.yaml deleted file mode 100644 index aa110ff497..0000000000 --- a/charts/spark-operator-chart/templates/rbac.yaml +++ /dev/null @@ -1,148 +0,0 @@ -{{- if or .Values.rbac.create .Values.rbac.createClusterRole -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ include "spark-operator.fullname" . }} - labels: - {{- include "spark-operator.labels" . | nindent 4 }} - {{- with .Values.rbac.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -rules: -- apiGroups: - - "" - resources: - - pods - - persistentvolumeclaims - verbs: - - "*" -- apiGroups: - - "" - resources: - - services - - configmaps - - secrets - verbs: - - create - - get - - delete - - update - - patch -- apiGroups: - - extensions - - networking.k8s.io - resources: - - ingresses - verbs: - - create - - get - - delete -- apiGroups: - - "" - resources: - - nodes - verbs: - - get -- apiGroups: - - "" - resources: - - events - verbs: - - create - - update - - patch -- apiGroups: - - "" - resources: - - resourcequotas - verbs: - - get - - list - - watch -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - get -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - create - - get - - update - - delete -- apiGroups: - - sparkoperator.k8s.io - resources: - - sparkapplications - - sparkapplications/status - - sparkapplications/finalizers - - scheduledsparkapplications - - scheduledsparkapplications/status - - scheduledsparkapplications/finalizers - verbs: - - "*" - {{- if .Values.batchScheduler.enable }} - # required for the `volcano` batch scheduler -- apiGroups: - - scheduling.incubator.k8s.io - - scheduling.sigs.dev - - scheduling.volcano.sh - resources: - - podgroups - verbs: - - "*" - {{- end }} - {{ if .Values.webhook.enable }} -- apiGroups: - - batch - resources: - - jobs - verbs: - - delete - {{- end }} - {{- if gt (int .Values.replicaCount) 1 }} -- apiGroups: - - coordination.k8s.io - resources: - - leases - resourceNames: - - {{ .Values.leaderElection.lockName }} - verbs: - - get - - update - - patch - - delete -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - {{- end }} ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ include "spark-operator.fullname" . }} - labels: - {{- include "spark-operator.labels" . | nindent 4 }} - {{- with .Values.rbac.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -subjects: - - kind: ServiceAccount - name: {{ include "spark-operator.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} -roleRef: - kind: ClusterRole - name: {{ include "spark-operator.fullname" . }} - apiGroup: rbac.authorization.k8s.io -{{- end }} diff --git a/charts/spark-operator-chart/templates/serviceaccount.yaml b/charts/spark-operator-chart/templates/serviceaccount.yaml deleted file mode 100644 index a75f231901..0000000000 --- a/charts/spark-operator-chart/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccounts.sparkoperator.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "spark-operator.serviceAccountName" . }} - labels: - {{- include "spark-operator.labels" . | nindent 4 }} - {{- with .Values.serviceAccounts.sparkoperator.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/charts/spark-operator-chart/templates/spark-rbac.yaml b/charts/spark-operator-chart/templates/spark-rbac.yaml deleted file mode 100644 index bbf9da6201..0000000000 --- a/charts/spark-operator-chart/templates/spark-rbac.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if or .Values.rbac.create .Values.rbac.createRole }} -{{- $jobNamespaces := .Values.sparkJobNamespaces | default list }} -{{- range $jobNamespace := $jobNamespaces }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: spark-role - namespace: {{ $jobNamespace }} - labels: - {{- include "spark-operator.labels" $ | nindent 4 }} -rules: -- apiGroups: - - "" - resources: - - pods - - services - - configmaps - - persistentvolumeclaims - verbs: - - "*" ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: spark - namespace: {{ $jobNamespace }} - labels: - {{- include "spark-operator.labels" $ | nindent 4 }} -subjects: -- kind: ServiceAccount - name: {{ include "spark.serviceAccountName" $ }} - namespace: {{ $jobNamespace }} -roleRef: - kind: Role - name: spark-role - apiGroup: rbac.authorization.k8s.io -{{- end }} -{{- end }} diff --git a/charts/spark-operator-chart/templates/spark-serviceaccount.yaml b/charts/spark-operator-chart/templates/spark-serviceaccount.yaml deleted file mode 100644 index af8e8d7f9d..0000000000 --- a/charts/spark-operator-chart/templates/spark-serviceaccount.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.serviceAccounts.spark.create }} -{{- range $sparkJobNamespace := .Values.sparkJobNamespaces | default (list .Release.Namespace) }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "spark.serviceAccountName" $ }} - namespace: {{ $sparkJobNamespace }} - {{- with $.Values.serviceAccounts.spark.annotations }} - annotations: {{ toYaml . | nindent 4 }} - {{- end }} - labels: {{ include "spark-operator.labels" $ | nindent 4 }} -{{- end }} -{{- end }} diff --git a/charts/spark-operator-chart/templates/spark/_helpers.tpl b/charts/spark-operator-chart/templates/spark/_helpers.tpl new file mode 100644 index 0000000000..150ae966f4 --- /dev/null +++ b/charts/spark-operator-chart/templates/spark/_helpers.tpl @@ -0,0 +1,47 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{/* +Create the name of spark component +*/}} +{{- define "spark-operator.spark.name" -}} +{{- include "spark-operator.fullname" . }}-spark +{{- end -}} + +{{/* +Create the name of the service account to be used by spark applications +*/}} +{{- define "spark-operator.spark.serviceAccountName" -}} +{{- if .Values.spark.serviceAccount.create -}} +{{- .Values.spark.serviceAccount.name | default (include "spark-operator.spark.name" .) -}} +{{- else -}} +{{- .Values.spark.serviceAccount.name | default "default" -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the role to be used by spark service account +*/}} +{{- define "spark-operator.spark.roleName" -}} +{{- include "spark-operator.spark.name" . }} +{{- end -}} + +{{/* +Create the name of the role binding to be used by spark service account +*/}} +{{- define "spark-operator.spark.roleBindingName" -}} +{{- include "spark-operator.spark.name" . }} +{{- end -}} diff --git a/charts/spark-operator-chart/templates/spark/rbac.yaml b/charts/spark-operator-chart/templates/spark/rbac.yaml new file mode 100644 index 0000000000..e850b1e507 --- /dev/null +++ b/charts/spark-operator-chart/templates/spark/rbac.yaml @@ -0,0 +1,73 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.spark.rbac.create -}} +{{- range $jobNamespace := .Values.spark.jobNamespaces | default list }} +{{- if $jobNamespace }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "spark-operator.spark.roleName" $ }} + namespace: {{ $jobNamespace }} + labels: + {{- include "spark-operator.labels" $ | nindent 4 }} + {{- with $.Values.spark.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +rules: +- apiGroups: + - "" + resources: + - pods + - configmaps + - persistentvolumeclaims + - services + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - deletecollection + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "spark-operator.spark.roleBindingName" $ }} + namespace: {{ $jobNamespace }} + labels: + {{- include "spark-operator.labels" $ | nindent 4 }} + {{- with $.Values.spark.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "spark-operator.spark.serviceAccountName" $ }} + namespace: {{ $jobNamespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "spark-operator.spark.serviceAccountName" $ }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/spark/serviceaccount.yaml b/charts/spark-operator-chart/templates/spark/serviceaccount.yaml new file mode 100644 index 0000000000..f05d8fae3b --- /dev/null +++ b/charts/spark-operator-chart/templates/spark/serviceaccount.yaml @@ -0,0 +1,30 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.spark.serviceAccount.create }} +{{- range $sparkJobNamespace := .Values.spark.jobNamespaces | default list }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "spark-operator.spark.serviceAccountName" $ }} + namespace: {{ $sparkJobNamespace }} + labels: {{ include "spark-operator.labels" $ | nindent 4 }} + {{- with $.Values.spark.serviceAccount.annotations }} + annotations: {{ toYaml . | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/_helpers.tpl b/charts/spark-operator-chart/templates/webhook/_helpers.tpl index 9600011294..71588123b7 100644 --- a/charts/spark-operator-chart/templates/webhook/_helpers.tpl +++ b/charts/spark-operator-chart/templates/webhook/_helpers.tpl @@ -1,14 +1,113 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{/* +Create the name of webhook component +*/}} +{{- define "spark-operator.webhook.name" -}} +{{- include "spark-operator.fullname" . }}-webhook +{{- end -}} + +{{/* +Common labels for the webhook +*/}} +{{- define "spark-operator.webhook.labels" -}} +{{ include "spark-operator.labels" . }} +app.kubernetes.io/component: webhook +{{- end -}} + +{{/* +Selector labels for the webhook +*/}} +{{- define "spark-operator.webhook.selectorLabels" -}} +{{ include "spark-operator.selectorLabels" . }} +app.kubernetes.io/component: webhook +{{- end -}} + +{{/* +Create the name of service account to be used by webhook +*/}} +{{- define "spark-operator.webhook.serviceAccountName" -}} +{{- if .Values.webhook.serviceAccount.create -}} +{{ .Values.webhook.serviceAccount.name | default (include "spark-operator.webhook.name" .) }} +{{- else -}} +{{ .Values.webhook.serviceAccount.name | default "default" }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the role to be used by webhook +*/}} +{{- define "spark-operator.webhook.roleName" -}} +{{- include "spark-operator.webhook.name" . }} +{{- end -}} + +{{/* +Create the name of the role binding to be used by webhook +*/}} +{{- define "spark-operator.webhook.roleBindingName" -}} +{{- include "spark-operator.webhook.name" . }} +{{- end -}} + {{/* Create the name of the secret to be used by webhook */}} -{{- define "spark-operator.webhookSecretName" -}} -{{ include "spark-operator.fullname" . }}-webhook-certs +{{- define "spark-operator.webhook.secretName" -}} +{{ include "spark-operator.webhook.name" . }}-certs {{- end -}} {{/* Create the name of the service to be used by webhook */}} -{{- define "spark-operator.webhookServiceName" -}} -{{ include "spark-operator.fullname" . }}-webhook-svc +{{- define "spark-operator.webhook.serviceName" -}} +{{ include "spark-operator.webhook.name" . }}-svc +{{- end -}} + +{{/* +Create the name of mutating webhook configuration +*/}} +{{- define "spark-operator.mutatingWebhookConfigurationName" -}} +webhook.sparkoperator.k8s.io +{{- end -}} + +{{/* +Create the name of mutating webhook configuration +*/}} +{{- define "spark-operator.validatingWebhookConfigurationName" -}} +quotaenforcer.sparkoperator.k8s.io +{{- end -}} + +{{/* +Create the name of the deployment to be used by webhook +*/}} +{{- define "spark-operator.webhook.deploymentName" -}} +{{ include "spark-operator.webhook.name" . }} +{{- end -}} + +{{/* +Create the name of the lease resource to be used by leader election +*/}} +{{- define "spark-operator.webhook.leaderElectionName" -}} +{{ include "spark-operator.webhook.name" . }}-lock +{{- end -}} + +{{/* +Create the name of the pod disruption budget to be used by webhook +*/}} +{{- define "spark-operator.webhook.podDisruptionBudgetName" -}} +{{ include "spark-operator.webhook.name" . }}-pdb {{- end -}} diff --git a/charts/spark-operator-chart/templates/webhook/deployment.yaml b/charts/spark-operator-chart/templates/webhook/deployment.yaml new file mode 100644 index 0000000000..89b07e3df0 --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/deployment.yaml @@ -0,0 +1,155 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "spark-operator.webhook.deploymentName" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} +spec: + {{- with .Values.webhook.replicas }} + replicas: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "spark-operator.webhook.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "spark-operator.webhook.selectorLabels" . | nindent 8 }} + {{- with .Values.webhook.labels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + containers: + - name: spark-operator-webhook + image: {{ include "spark-operator.image" . }} + {{- with .Values.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + args: + - webhook + - start + {{- with .Values.webhook.logLevel }} + - --zap-log-level={{ . }} + {{- end }} + {{- with .Values.spark.jobNamespaces }} + - --namespaces={{ . | join "," }} + {{- end }} + - --webhook-secret-name={{ include "spark-operator.webhook.secretName" . }} + - --webhook-secret-namespace={{ .Release.Namespace }} + - --webhook-svc-name={{ include "spark-operator.webhook.serviceName" . }} + - --webhook-svc-namespace={{ .Release.Namespace }} + - --webhook-port={{ .Values.webhook.port }} + - --mutating-webhook-name={{ include "spark-operator.webhook.name" . }} + - --validating-webhook-name={{ include "spark-operator.webhook.name" . }} + {{- with .Values.webhook.resourceQuotaEnforcement.enable }} + - --enable-resource-quota-enforcement=true + {{- end }} + {{- if .Values.prometheus.metrics.enable }} + - --enable-metrics=true + - --metrics-bind-address=:{{ .Values.prometheus.metrics.port }} + - --metrics-endpoint={{ .Values.prometheus.metrics.endpoint }} + - --metrics-prefix={{ .Values.prometheus.metrics.prefix }} + - --metrics-labels=app_type + {{- end }} + - --leader-election=true + - --leader-election-lock-name={{ include "spark-operator.webhook.leaderElectionName" . }} + - --leader-election-lock-namespace={{ .Release.Namespace }} + ports: + - name: {{ .Values.webhook.portName | quote }} + containerPort: {{ .Values.webhook.port }} + {{- if .Values.prometheus.metrics.enable }} + - name: {{ .Values.prometheus.metrics.portName | quote }} + containerPort: {{ .Values.prometheus.metrics.port }} + {{- end }} + {{- with .Values.webhook.env }} + env: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.envFrom }} + envFrom: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.volumeMounts }} + volumeMounts: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.webhook.resources }} + resources: + {{- toYaml . | nindent 10 }} + {{- end }} + livenessProbe: + httpGet: + port: 8081 + scheme: HTTP + path: /healthz + readinessProbe: + httpGet: + port: 8081 + scheme: HTTP + path: /readyz + {{- with .Values.webhook.securityContext }} + securityContext: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.webhook.sidecars }} + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.volumes }} + volumes: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.webhook.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + serviceAccountName: {{ include "spark-operator.webhook.serviceAccountName" . }} + {{- with .Values.webhook.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.webhook.topologySpreadConstraints }} + {{- if le (int .Values.webhook.replicas) 1 }} + {{- fail "webhook.replicas must be greater than 1 to enable topology spread constraints for webhook pods"}} + {{- end }} + {{- $selectorLabels := include "spark-operator.webhook.selectorLabels" . | fromYaml }} + {{- $labelSelectorDict := dict "labelSelector" ( dict "matchLabels" $selectorLabels ) }} + topologySpreadConstraints: + {{- range .Values.webhook.topologySpreadConstraints }} + - {{ mergeOverwrite . $labelSelectorDict | toYaml | nindent 8 | trim }} + {{- end }} + {{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/mutatingwebhookconfiguration.yaml b/charts/spark-operator-chart/templates/webhook/mutatingwebhookconfiguration.yaml new file mode 100644 index 0000000000..f48a04320f --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/mutatingwebhookconfiguration.yaml @@ -0,0 +1,116 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: {{ include "spark-operator.webhook.name" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} +webhooks: +- name: mutate--v1-pod.sparkoperator.k8s.io + admissionReviewVersions: ["v1"] + clientConfig: + service: + name: {{ include "spark-operator.webhook.serviceName" . }} + namespace: {{ .Release.Namespace }} + port: {{ .Values.webhook.port }} + path: /mutate--v1-pod + sideEffects: NoneOnDryRun + {{- with .Values.webhook.failurePolicy }} + failurePolicy: {{ . }} + {{- end }} + {{- if .Values.spark.jobNamespaces }} + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + {{- range .Values.spark.jobNamespaces }} + - {{ . }} + {{- end }} + {{- end }} + objectSelector: + matchLabels: + sparkoperator.k8s.io/launched-by-spark-operator: "true" + rules: + - apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] + operations: ["CREATE"] + {{- with .Values.webhook.timeoutSeconds }} + timeoutSeconds: {{ . }} + {{- end }} +- name: mutate-sparkoperator-k8s-io-v1beta2-sparkapplication.sparkoperator.k8s.io + admissionReviewVersions: ["v1"] + clientConfig: + service: + name: {{ include "spark-operator.webhook.serviceName" . }} + namespace: {{ .Release.Namespace }} + port: {{ .Values.webhook.port }} + path: /mutate-sparkoperator-k8s-io-v1beta2-sparkapplication + sideEffects: NoneOnDryRun + {{- with .Values.webhook.failurePolicy }} + failurePolicy: {{ . }} + {{- end }} + {{- if .Values.spark.jobNamespaces }} + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + {{- range .Values.spark.jobNamespaces }} + - {{ . }} + {{- end }} + {{- end }} + rules: + - apiGroups: ["sparkoperator.k8s.io"] + apiVersions: ["v1beta2"] + resources: ["sparkapplications"] + operations: ["CREATE", "UPDATE"] + {{- with .Values.webhook.timeoutSeconds }} + timeoutSeconds: {{ . }} + {{- end }} +- name: mutate-sparkoperator-k8s-io-v1beta2-scheduledsparkapplication.sparkoperator.k8s.io + admissionReviewVersions: ["v1"] + clientConfig: + service: + name: {{ include "spark-operator.webhook.serviceName" . }} + namespace: {{ .Release.Namespace }} + port: {{ .Values.webhook.port }} + path: /mutate-sparkoperator-k8s-io-v1beta2-scheduledsparkapplication + sideEffects: NoneOnDryRun + {{- with .Values.webhook.failurePolicy }} + failurePolicy: {{ . }} + {{- end }} + {{- if .Values.spark.jobNamespaces }} + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + {{- range .Values.spark.jobNamespaces }} + - {{ . }} + {{- end }} + {{- end }} + rules: + - apiGroups: ["sparkoperator.k8s.io"] + apiVersions: ["v1beta2"] + resources: ["scheduledsparkapplications"] + operations: ["CREATE", "UPDATE"] + {{- with .Values.webhook.timeoutSeconds }} + timeoutSeconds: {{ . }} + {{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/poddisruptionbudget.yaml b/charts/spark-operator-chart/templates/webhook/poddisruptionbudget.yaml new file mode 100644 index 0000000000..6de7e6ef5f --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/poddisruptionbudget.yaml @@ -0,0 +1,34 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.webhook.podDisruptionBudget.enable }} +{{- if le (int .Values.webhook.replicas) 1 }} +{{- fail "webhook.replicas must be greater than 1 to enable pod disruption budget for webhook" }} +{{- end -}} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "spark-operator.webhook.podDisruptionBudgetName" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "spark-operator.webhook.selectorLabels" . | nindent 6 }} + {{- with .Values.webhook.podDisruptionBudget.minAvailable }} + minAvailable: {{ . }} + {{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/rbac.yaml b/charts/spark-operator-chart/templates/webhook/rbac.yaml new file mode 100644 index 0000000000..b1c5d426f4 --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/rbac.yaml @@ -0,0 +1,171 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.webhook.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "spark-operator.webhook.name" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} + {{- with .Values.webhook.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - resourcequotas + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - update + - patch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - list + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + resourceNames: + - {{ include "spark-operator.webhook.name" . }} + verbs: + - get + - update +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications + - sparkapplications/status + - sparkapplications/finalizers + - scheduledsparkapplications + - scheduledsparkapplications/status + - scheduledsparkapplications/finalizers + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "spark-operator.webhook.name" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} + {{- with .Values.webhook.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "spark-operator.webhook.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "spark-operator.webhook.name" . }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "spark-operator.webhook.name" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} + {{- with .Values.webhook.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create +- apiGroups: + - "" + resources: + - secrets + resourceNames: + - {{ include "spark-operator.webhook.secretName" . }} + verbs: + - get + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create +- apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - {{ include "spark-operator.webhook.leaderElectionName" . }} + verbs: + - get + - update + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "spark-operator.webhook.name" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} + {{- with .Values.webhook.rbac.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "spark-operator.webhook.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "spark-operator.webhook.name" . }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/secret.yaml b/charts/spark-operator-chart/templates/webhook/secret.yaml deleted file mode 100644 index 672738f2c0..0000000000 --- a/charts/spark-operator-chart/templates/webhook/secret.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.webhook.enable -}} -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "spark-operator.webhookSecretName" . }} - labels: - {{- include "spark-operator.labels" . | nindent 4 }} -data: - ca-key.pem: "" - ca-cert.pem: "" - server-key.pem: "" - server-cert.pem: "" -{{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/service.yaml b/charts/spark-operator-chart/templates/webhook/service.yaml index e31f8236b5..45064a8076 100644 --- a/charts/spark-operator-chart/templates/webhook/service.yaml +++ b/charts/spark-operator-chart/templates/webhook/service.yaml @@ -1,15 +1,29 @@ -{{- if .Values.webhook.enable -}} +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + apiVersion: v1 kind: Service metadata: - name: {{ include "spark-operator.webhookServiceName" . }} + name: {{ include "spark-operator.webhook.serviceName" . }} labels: - {{- include "spark-operator.labels" . | nindent 4 }} + {{- include "spark-operator.webhook.labels" . | nindent 4 }} spec: selector: - {{- include "spark-operator.selectorLabels" . | nindent 4 }} + {{- include "spark-operator.webhook.selectorLabels" . | nindent 4 }} ports: - - port: 443 + - port: {{ .Values.webhook.port }} targetPort: {{ .Values.webhook.portName | quote }} name: {{ .Values.webhook.portName }} -{{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/serviceaccount.yaml b/charts/spark-operator-chart/templates/webhook/serviceaccount.yaml new file mode 100644 index 0000000000..77944b83ca --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/serviceaccount.yaml @@ -0,0 +1,28 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +{{- if .Values.webhook.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "spark-operator.webhook.serviceAccountName" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} + {{- with .Values.webhook.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/spark-operator-chart/templates/webhook/validatingwebhookconfiguration.yaml b/charts/spark-operator-chart/templates/webhook/validatingwebhookconfiguration.yaml new file mode 100644 index 0000000000..3fbf55184d --- /dev/null +++ b/charts/spark-operator-chart/templates/webhook/validatingwebhookconfiguration.yaml @@ -0,0 +1,83 @@ +{{/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/}} + +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: {{ include "spark-operator.webhook.name" . }} + labels: + {{- include "spark-operator.webhook.labels" . | nindent 4 }} +webhooks: +- name: validate-sparkoperator-k8s-io-v1beta2-sparkapplication.sparkoperator.k8s.io + admissionReviewVersions: ["v1"] + clientConfig: + service: + name: {{ include "spark-operator.webhook.serviceName" . }} + namespace: {{ .Release.Namespace }} + port: {{ .Values.webhook.port }} + path: /validate-sparkoperator-k8s-io-v1beta2-sparkapplication + sideEffects: NoneOnDryRun + {{- with .Values.webhook.failurePolicy }} + failurePolicy: {{ . }} + {{- end }} + {{- if .Values.spark.jobNamespaces }} + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + {{- range .Values.spark.jobNamespaces }} + - {{ . }} + {{- end }} + {{- end }} + rules: + - apiGroups: ["sparkoperator.k8s.io"] + apiVersions: ["v1beta2"] + resources: ["sparkapplications"] + operations: ["CREATE", "UPDATE"] + {{- with .Values.webhook.timeoutSeconds }} + timeoutSeconds: {{ . }} + {{- end }} +- name: validate-sparkoperator-k8s-io-v1beta2-scheduledsparkapplication.sparkoperator.k8s.io + admissionReviewVersions: ["v1"] + clientConfig: + service: + name: {{ include "spark-operator.webhook.serviceName" . }} + namespace: {{ .Release.Namespace }} + port: {{ .Values.webhook.port }} + path: /validate-sparkoperator-k8s-io-v1beta2-scheduledsparkapplication + sideEffects: NoneOnDryRun + {{- with .Values.webhook.failurePolicy }} + failurePolicy: {{ . }} + {{- end }} + {{- if .Values.spark.jobNamespaces }} + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + {{- range .Values.spark.jobNamespaces }} + - {{ . }} + {{- end }} + {{- end }} + rules: + - apiGroups: ["sparkoperator.k8s.io"] + apiVersions: ["v1beta2"] + resources: ["scheduledsparkapplications"] + operations: ["CREATE", "UPDATE"] + {{- with .Values.webhook.timeoutSeconds }} + timeoutSeconds: {{ . }} + {{- end }} diff --git a/charts/spark-operator-chart/tests/controller/deployment_test.yaml b/charts/spark-operator-chart/tests/controller/deployment_test.yaml new file mode 100644 index 0000000000..e4b6983a76 --- /dev/null +++ b/charts/spark-operator-chart/tests/controller/deployment_test.yaml @@ -0,0 +1,537 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test controller deployment + +templates: + - controller/deployment.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should use the specified image repository if `image.registry`, `image.repository` and `image.tag` are set + set: + image: + registry: test-registry + repository: test-repository + tag: test-tag + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: test-registry/test-repository:test-tag + + - it: Should use the specified image pull policy if `image.pullPolicy` is set + set: + image: + pullPolicy: Always + asserts: + - equal: + path: spec.template.spec.containers[*].imagePullPolicy + value: Always + + - it: Should set replicas if `controller.replicas` is set + set: + controller: + replicas: 10 + asserts: + - equal: + path: spec.replicas + value: 10 + + - it: Should add pod labels if `controller.labels` is set + set: + controller: + labels: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.metadata.labels.key1 + value: value1 + - equal: + path: spec.template.metadata.labels.key2 + value: value2 + + - it: Should add prometheus annotations if `metrics.enable` is true + set: + prometheus: + metrics: + enable: true + port: 10254 + endpoint: /metrics + asserts: + - equal: + path: spec.template.metadata.annotations["prometheus.io/scrape"] + value: "true" + - equal: + path: spec.template.metadata.annotations["prometheus.io/port"] + value: "10254" + - equal: + path: spec.template.metadata.annotations["prometheus.io/path"] + value: /metrics + + - it: Should add pod annotations if `controller.annotations` is set + set: + controller: + annotations: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.metadata.annotations.key1 + value: value1 + - equal: + path: spec.template.metadata.annotations.key2 + value: value2 + + - it: Should contain `--zap-log-level` arg if `controller.logLevel` is set + set: + controller: + logLevel: debug + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --zap-log-level=debug + + - it: Should contain `--namespaces` arg if `spark.jobNamespaces` is set + set: + spark.jobNamespaces: + - ns1 + - ns2 + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --namespaces=ns1,ns2 + + - it: Should contain `--controller-threads` arg if `controller.workers` is set + set: + controller: + workers: 30 + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --controller-threads=30 + + - it: Should contain `--enable-ui-service` arg if `controller.uiService.enable` is set to `true` + set: + controller: + uiService: + enable: true + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --enable-ui-service=true + + - it: Should contain `--ingress-url-format` arg if `controller.uiIngress.enable` is set to `true` and `controller.uiIngress.urlFormat` is set + set: + controller: + uiService: + enable: true + uiIngress: + enable: true + urlFormat: "{{$appName}}.example.com/{{$appNamespace}}/{{$appName}}" + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --ingress-url-format={{$appName}}.example.com/{{$appNamespace}}/{{$appName}} + + - it: Should contain `--enable-batch-scheduler` arg if `controller.batchScheduler.enable` is `true` + set: + controller: + batchScheduler: + enable: true + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --enable-batch-scheduler=true + + - it: Should contain `--enable-metrics` arg if `prometheus.metrics.enable` is set to `true` + set: + prometheus: + metrics: + enable: true + port: 12345 + portName: test-port + endpoint: /test-endpoint + prefix: test-prefix + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --enable-metrics=true + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --metrics-bind-address=:12345 + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --metrics-endpoint=/test-endpoint + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --metrics-prefix=test-prefix + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --metrics-labels=app_type + + - it: Should enable leader election by default + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --leader-election=true + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --leader-election-lock-name=spark-operator-controller-lock + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-controller")].args + content: --leader-election-lock-namespace=spark-operator + + - it: Should add metric ports if `prometheus.metrics.enable` is true + set: + prometheus: + metrics: + enable: true + port: 10254 + portName: metrics + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: metrics + containerPort: 10254 + count: 1 + + - it: Should add environment variables if `controller.env` is set + set: + controller: + env: + - name: ENV_NAME_1 + value: ENV_VALUE_1 + - name: ENV_NAME_2 + valueFrom: + configMapKeyRef: + name: test-configmap + key: test-key + optional: false + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ENV_NAME_1 + value: ENV_VALUE_1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ENV_NAME_2 + valueFrom: + configMapKeyRef: + name: test-configmap + key: test-key + optional: false + + - it: Should add environment variable sources if `controller.envFrom` is set + set: + controller: + envFrom: + - configMapRef: + name: test-configmap + optional: false + - secretRef: + name: test-secret + optional: false + asserts: + - contains: + path: spec.template.spec.containers[0].envFrom + content: + configMapRef: + name: test-configmap + optional: false + - contains: + path: spec.template.spec.containers[0].envFrom + content: + secretRef: + name: test-secret + optional: false + + - it: Should add volume mounts if `controller.volumeMounts` is set + set: + controller: + volumeMounts: + - name: volume1 + mountPath: /volume1 + - name: volume2 + mountPath: /volume2 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: volume1 + mountPath: /volume1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: volume2 + mountPath: /volume2 + + - it: Should add resources if `controller.resources` is set + set: + controller: + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + asserts: + - equal: + path: spec.template.spec.containers[0].resources + value: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + + - it: Should add container securityContext if `controller.securityContext` is set + set: + controller: + securityContext: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + asserts: + - equal: + path: spec.template.spec.containers[0].securityContext + value: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + + - it: Should add sidecars if `controller.sidecars` is set + set: + controller: + sidecars: + - name: sidecar1 + image: sidecar-image1 + - name: sidecar2 + image: sidecar-image2 + asserts: + - contains: + path: spec.template.spec.containers + content: + name: sidecar1 + image: sidecar-image1 + - contains: + path: spec.template.spec.containers + content: + name: sidecar2 + image: sidecar-image2 + + - it: Should add secrets if `image.pullSecrets` is set + set: + image: + pullSecrets: + - name: test-secret1 + - name: test-secret2 + asserts: + - equal: + path: spec.template.spec.imagePullSecrets[0].name + value: test-secret1 + - equal: + path: spec.template.spec.imagePullSecrets[1].name + value: test-secret2 + + - it: Should add volumes if `controller.volumes` is set + set: + controller: + volumes: + - name: volume1 + emptyDir: {} + - name: volume2 + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: volume1 + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: volume2 + emptyDir: {} + count: 1 + + - it: Should add nodeSelector if `controller.nodeSelector` is set + set: + controller: + nodeSelector: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.spec.nodeSelector.key1 + value: value1 + - equal: + path: spec.template.spec.nodeSelector.key2 + value: value2 + + - it: Should add affinity if `controller.affinity` is set + set: + controller: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - antarctica-east1 + - antarctica-west1 + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: another-node-label-key + operator: In + values: + - another-node-label-value + asserts: + - equal: + path: spec.template.spec.affinity + value: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - antarctica-east1 + - antarctica-west1 + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: another-node-label-key + operator: In + values: + - another-node-label-value + + - it: Should add tolerations if `controller.tolerations` is set + set: + controller: + tolerations: + - key: key1 + operator: Equal + value: value1 + effect: NoSchedule + - key: key2 + operator: Exists + effect: NoSchedule + asserts: + - equal: + path: spec.template.spec.tolerations + value: + - key: key1 + operator: Equal + value: value1 + effect: NoSchedule + - key: key2 + operator: Exists + effect: NoSchedule + + - it: Should add priorityClassName if `controller.priorityClassName` is set + set: + controller: + priorityClassName: test-priority-class + asserts: + - equal: + path: spec.template.spec.priorityClassName + value: test-priority-class + + - it: Should add pod securityContext if `controller.podSecurityContext` is set + set: + controller: + podSecurityContext: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + asserts: + - equal: + path: spec.template.spec.securityContext + value: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + + - it: Should not contain topologySpreadConstraints if `controller.topologySpreadConstraints` is not set + set: + controller: + topologySpreadConstraints: [] + asserts: + - notExists: + path: spec.template.spec.topologySpreadConstraints + + - it: Should add topologySpreadConstraints if `controller.topologySpreadConstraints` is set and `controller.replicas` is greater than 1 + set: + controller: + replicas: 2 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + asserts: + - equal: + path: spec.template.spec.topologySpreadConstraints + value: + - labelSelector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: spark-operator + app.kubernetes.io/name: spark-operator + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: spark-operator + app.kubernetes.io/name: spark-operator + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + + - it: Should fail if `controller.topologySpreadConstraints` is set and `controller.replicas` is not greater than 1 + set: + controller: + replicas: 1 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + asserts: + - failedTemplate: + errorMessage: "controller.replicas must be greater than 1 to enable topology spread constraints for controller pods" diff --git a/charts/spark-operator-chart/tests/controller/poddisruptionbudget_test.yaml b/charts/spark-operator-chart/tests/controller/poddisruptionbudget_test.yaml new file mode 100644 index 0000000000..dd3a47bc5a --- /dev/null +++ b/charts/spark-operator-chart/tests/controller/poddisruptionbudget_test.yaml @@ -0,0 +1,68 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test controller pod disruption budget + +templates: + - controller/poddisruptionbudget.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not render podDisruptionBudget if `controller.podDisruptionBudget.enable` is false + set: + controller: + podDisruptionBudget: + enable: false + asserts: + - hasDocuments: + count: 0 + + - it: Should fail if `controller.replicas` is less than 2 when `controller.podDisruptionBudget.enable` is true + set: + controller: + replicas: 1 + podDisruptionBudget: + enable: true + asserts: + - failedTemplate: + errorMessage: "controller.replicas must be greater than 1 to enable pod disruption budget for controller" + + - it: Should render spark operator podDisruptionBudget if `controller.podDisruptionBudget.enable` is true + set: + controller: + replicas: 2 + podDisruptionBudget: + enable: true + asserts: + - containsDocument: + apiVersion: policy/v1 + kind: PodDisruptionBudget + name: spark-operator-controller-pdb + + - it: Should set minAvailable if `controller.podDisruptionBudget.minAvailable` is specified + set: + controller: + replicas: 2 + podDisruptionBudget: + enable: true + minAvailable: 3 + asserts: + - equal: + path: spec.minAvailable + value: 3 diff --git a/charts/spark-operator-chart/tests/controller/rbac_test.yaml b/charts/spark-operator-chart/tests/controller/rbac_test.yaml new file mode 100644 index 0000000000..4a910adcbd --- /dev/null +++ b/charts/spark-operator-chart/tests/controller/rbac_test.yaml @@ -0,0 +1,79 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test controller rbac + +templates: + - controller/rbac.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not create controller RBAC resources if `controller.rbac.create` is false + set: + controller: + rbac: + create: false + asserts: + - hasDocuments: + count: 0 + + - it: Should create controller ClusterRole by default + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + name: spark-operator-controller + + - it: Should create controller ClusterRoleBinding by default + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + name: spark-operator-controller + - contains: + path: subjects + content: + kind: ServiceAccount + name: spark-operator-controller + namespace: spark-operator + count: 1 + - equal: + path: roleRef + value: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: spark-operator-controller + + - it: Should add extra annotations to controller ClusterRole if `controller.rbac.annotations` is set + set: + controller: + rbac: + annotations: + key1: value1 + key2: value2 + documentIndex: 0 + asserts: + - equal: + path: metadata.annotations.key1 + value: value1 + - equal: + path: metadata.annotations.key2 + value: value2 diff --git a/charts/spark-operator-chart/tests/controller/serviceaccount_test.yaml b/charts/spark-operator-chart/tests/controller/serviceaccount_test.yaml new file mode 100644 index 0000000000..4891a9a1b3 --- /dev/null +++ b/charts/spark-operator-chart/tests/controller/serviceaccount_test.yaml @@ -0,0 +1,67 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test controller service account + +templates: + - controller/serviceaccount.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not create controller service account if `controller.serviceAccount.create` is false + set: + controller: + serviceAccount: + create: false + asserts: + - hasDocuments: + count: 0 + + - it: Should create controller service account by default + asserts: + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark-operator-controller + + - it: Should use the specified service account name if `controller.serviceAccount.name` is set + set: + controller: + serviceAccount: + name: custom-service-account + asserts: + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: custom-service-account + + - it: Should add extra annotations if `controller.serviceAccount.annotations` is set + set: + controller: + serviceAccount: + annotations: + key1: value1 + key2: value2 + asserts: + - equal: + path: metadata.annotations.key1 + value: value1 + - equal: + path: metadata.annotations.key2 + value: value2 diff --git a/charts/spark-operator-chart/tests/deployment_test.yaml b/charts/spark-operator-chart/tests/deployment_test.yaml deleted file mode 100644 index 055d3b25fa..0000000000 --- a/charts/spark-operator-chart/tests/deployment_test.yaml +++ /dev/null @@ -1,352 +0,0 @@ -suite: Test spark operator deployment - -templates: - - deployment.yaml - -release: - name: spark-operator - -tests: - - it: Should contain namespace arg when sparkJobNamespaces is equal to 1 - set: - sparkJobNamespaces: - - ns1 - asserts: - - contains: - path: spec.template.spec.containers[0].args - content: -namespace=ns1 - - - it: Should add pod annotations if podAnnotations is set - set: - podAnnotations: - key1: value1 - key2: value2 - asserts: - - equal: - path: spec.template.metadata.annotations.key1 - value: value1 - - equal: - path: spec.template.metadata.annotations.key2 - value: value2 - - - it: Should add prometheus annotations if metrics.enable is true - set: - metrics: - enable: true - port: 10254 - endpoint: /metrics - asserts: - - equal: - path: spec.template.metadata.annotations["prometheus.io/scrape"] - value: "true" - - equal: - path: spec.template.metadata.annotations["prometheus.io/port"] - value: "10254" - - equal: - path: spec.template.metadata.annotations["prometheus.io/path"] - value: /metrics - - - it: Should add secrets if imagePullSecrets is set - set: - imagePullSecrets: - - name: test-secret1 - - name: test-secret2 - asserts: - - equal: - path: spec.template.spec.imagePullSecrets[0].name - value: test-secret1 - - equal: - path: spec.template.spec.imagePullSecrets[1].name - value: test-secret2 - - - it: Should add pod securityContext if podSecurityContext is set - set: - podSecurityContext: - runAsUser: 1000 - runAsGroup: 2000 - fsGroup: 3000 - asserts: - - equal: - path: spec.template.spec.securityContext.runAsUser - value: 1000 - - equal: - path: spec.template.spec.securityContext.runAsGroup - value: 2000 - - equal: - path: spec.template.spec.securityContext.fsGroup - value: 3000 - - - it: Should use the specified image repository if image.repository and image.tag is set - set: - image: - repository: test-repository - tag: test-tag - asserts: - - equal: - path: spec.template.spec.containers[0].image - value: test-repository:test-tag - - - it: Should use the specified image pull policy if image.pullPolicy is set - set: - image: - pullPolicy: Always - asserts: - - equal: - path: spec.template.spec.containers[0].imagePullPolicy - value: Always - - - it: Should add container securityContext if securityContext is set - set: - securityContext: - runAsUser: 1000 - runAsGroup: 2000 - fsGroup: 3000 - asserts: - - equal: - path: spec.template.spec.containers[0].securityContext.runAsUser - value: 1000 - - equal: - path: spec.template.spec.containers[0].securityContext.runAsGroup - value: 2000 - - equal: - path: spec.template.spec.containers[0].securityContext.fsGroup - value: 3000 - - - it: Should add metric ports if metrics.enable is true - set: - metrics: - enable: true - port: 10254 - portName: metrics - asserts: - - contains: - path: spec.template.spec.containers[0].ports - content: - name: metrics - containerPort: 10254 - count: 1 - - - it: Should add webhook ports if webhook.enable is true - set: - webhook: - enable: true - port: 8080 - portName: webhook - asserts: - - contains: - path: spec.template.spec.containers[0].ports - content: - name: webhook - containerPort: 8080 - count: 1 - - - it: Should add resources if resources is set - set: - resources: - requests: - memory: "64Mi" - cpu: "250m" - limits: - memory: "128Mi" - cpu: "500m" - asserts: - - equal: - path: spec.template.spec.containers[0].resources - value: - requests: - memory: "64Mi" - cpu: "250m" - limits: - memory: "128Mi" - cpu: "500m" - - - it: Should add sidecars if sidecars is set - set: - sidecars: - - name: sidecar1 - image: sidecar-image1 - - name: sidecar2 - image: sidecar-image2 - asserts: - - contains: - path: spec.template.spec.containers - content: - name: sidecar1 - image: sidecar-image1 - count: 1 - - contains: - path: spec.template.spec.containers - content: - name: sidecar2 - image: sidecar-image2 - count: 1 - - - it: Should add volumes if volumes is set - set: - volumes: - - name: volume1 - emptyDir: {} - - name: volume2 - emptyDir: {} - asserts: - - contains: - path: spec.template.spec.volumes - content: - name: volume1 - emptyDir: {} - count: 1 - - contains: - path: spec.template.spec.volumes - content: - name: volume2 - emptyDir: {} - count: 1 - - - it: Should add volume mounts if volumeMounts is set - set: - volumeMounts: - - name: volume1 - mountPath: /volume1 - - name: volume2 - mountPath: /volume2 - asserts: - - contains: - path: spec.template.spec.containers[0].volumeMounts - content: - name: volume1 - mountPath: /volume1 - count: 1 - - contains: - path: spec.template.spec.containers[0].volumeMounts - content: - name: volume2 - mountPath: /volume2 - count: 1 - - - it: Should add nodeSelector if nodeSelector is set - set: - nodeSelector: - key1: value1 - key2: value2 - asserts: - - equal: - path: spec.template.spec.nodeSelector.key1 - value: value1 - - equal: - path: spec.template.spec.nodeSelector.key2 - value: value2 - - - it: Should add affinity if affinity is set - set: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: topology.kubernetes.io/zone - operator: In - values: - - antarctica-east1 - - antarctica-west1 - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: another-node-label-key - operator: In - values: - - another-node-label-value - asserts: - - equal: - path: spec.template.spec.affinity - value: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: topology.kubernetes.io/zone - operator: In - values: - - antarctica-east1 - - antarctica-west1 - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 1 - preference: - matchExpressions: - - key: another-node-label-key - operator: In - values: - - another-node-label-value - - - it: Should add tolerations if tolerations is set - set: - tolerations: - - key: key1 - operator: Equal - value: value1 - effect: NoSchedule - - key: key2 - operator: Exists - effect: NoSchedule - asserts: - - equal: - path: spec.template.spec.tolerations - value: - - key: key1 - operator: Equal - value: value1 - effect: NoSchedule - - key: key2 - operator: Exists - effect: NoSchedule - - - it: Should not contain topologySpreadConstraints if topologySpreadConstraints is not set - set: - topologySpreadConstraints: [] - asserts: - - notExists: - path: spec.template.spec.topologySpreadConstraints - - - it: Should add topologySpreadConstraints if topologySpreadConstraints is set and replicaCount is greater than 1 - set: - replicaCount: 2 - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: topology.kubernetes.io/zone - whenUnsatisfiable: ScheduleAnyway - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - asserts: - - equal: - path: spec.template.spec.topologySpreadConstraints - value: - - labelSelector: - matchLabels: - app.kubernetes.io/instance: spark-operator - app.kubernetes.io/name: spark-operator - maxSkew: 1 - topologyKey: topology.kubernetes.io/zone - whenUnsatisfiable: ScheduleAnyway - - labelSelector: - matchLabels: - app.kubernetes.io/instance: spark-operator - app.kubernetes.io/name: spark-operator - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - - - it: Should fail if topologySpreadConstraints is set and replicaCount is not greater than 1 - set: - replicaCount: 1 - topologySpreadConstraints: - - maxSkew: 1 - topologyKey: topology.kubernetes.io/zone - whenUnsatisfiable: ScheduleAnyway - - maxSkew: 1 - topologyKey: kubernetes.io/hostname - whenUnsatisfiable: DoNotSchedule - asserts: - - failedTemplate: - errorMessage: "replicaCount must be greater than 1 to enable topologySpreadConstraints." - \ No newline at end of file diff --git a/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml b/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml deleted file mode 100644 index 56b9e4fe3d..0000000000 --- a/charts/spark-operator-chart/tests/poddisruptionbudget_test.yaml +++ /dev/null @@ -1,38 +0,0 @@ -suite: Test spark operator podDisruptionBudget - -templates: - - poddisruptionbudget.yaml - -release: - name: spark-operator - -tests: - - it: Should not render spark operator podDisruptionBudget if podDisruptionBudget.enable is false - set: - podDisruptionBudget: - enable: false - asserts: - - hasDocuments: - count: 0 - - - it: Should render spark operator podDisruptionBudget if podDisruptionBudget.enable is true - set: - replicaCount: 2 - podDisruptionBudget: - enable: true - asserts: - - containsDocument: - apiVersion: policy/v1 - kind: PodDisruptionBudget - name: spark-operator-pdb - - - it: Should set minAvailable from values - set: - replicaCount: 2 - podDisruptionBudget: - enable: true - minAvailable: 3 - asserts: - - equal: - path: spec.minAvailable - value: 3 diff --git a/charts/spark-operator-chart/tests/prometheus/podmonitor_test.yaml b/charts/spark-operator-chart/tests/prometheus/podmonitor_test.yaml new file mode 100644 index 0000000000..7e8bc54aa8 --- /dev/null +++ b/charts/spark-operator-chart/tests/prometheus/podmonitor_test.yaml @@ -0,0 +1,102 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test prometheus pod monitor + +templates: + - prometheus/podmonitor.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not create pod monitor by default + asserts: + - hasDocuments: + count: 0 + + - it: Should fail if `prometheus.podMonitor.create` is true and `prometheus.metrics.enable` is false + set: + prometheus: + metrics: + enable: false + podMonitor: + create: true + asserts: + - failedTemplate: + errorMessage: "`metrics.enable` must be set to true when `podMonitor.create` is true." + + - it: Should fail if the cluster does not support `monitoring.coreos.com/v1/PodMonitor` even if`prometheus.podMonitor.create` and `prometheus.metrics.enable` are both true + set: + prometheus: + metrics: + enable: true + podMonitor: + create: true + asserts: + - failedTemplate: + errorMessage: "The cluster does not support the required API version `monitoring.coreos.com/v1` for `PodMonitor`." + + - it: Should create pod monitor if the cluster support `monitoring.coreos.com/v1/PodMonitor` and `prometheus.podMonitor.create` and `prometheus.metrics.enable` are both true + capabilities: + apiVersions: + - monitoring.coreos.com/v1/PodMonitor + set: + prometheus: + metrics: + enable: true + podMonitor: + create: true + asserts: + - containsDocument: + apiVersion: monitoring.coreos.com/v1 + kind: PodMonitor + name: spark-operator-podmonitor + + - it: Should use the specified labels, jobLabel and podMetricsEndpoint + capabilities: + apiVersions: + - monitoring.coreos.com/v1/PodMonitor + set: + prometheus: + metrics: + enable: true + portName: custom-port + podMonitor: + create: true + labels: + key1: value1 + key2: value2 + jobLabel: custom-job-label + podMetricsEndpoint: + scheme: https + interval: 10s + asserts: + - equal: + path: metadata.labels + value: + key1: value1 + key2: value2 + - equal: + path: spec.podMetricsEndpoints[0] + value: + port: custom-port + scheme: https + interval: 10s + - equal: + path: spec.jobLabel + value: custom-job-label diff --git a/charts/spark-operator-chart/tests/rbac_test.yaml b/charts/spark-operator-chart/tests/rbac_test.yaml deleted file mode 100644 index f411c4def1..0000000000 --- a/charts/spark-operator-chart/tests/rbac_test.yaml +++ /dev/null @@ -1,90 +0,0 @@ -suite: Test spark operator rbac - -templates: - - rbac.yaml - -release: - name: spark-operator - -tests: - - it: Should not render spark operator rbac resources if rbac.create is false and rbac.createClusterRole is false - set: - rbac: - create: false - createClusterRole: false - asserts: - - hasDocuments: - count: 0 - - - it: Should render spark operator cluster role if rbac.create is true - set: - rbac: - create: true - documentIndex: 0 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - name: spark-operator - - - it: Should render spark operator cluster role if rbac.createClusterRole is true - set: - rbac: - createClusterRole: true - documentIndex: 0 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - name: spark-operator - - - it: Should render spark operator cluster role binding if rbac.create is true - set: - rbac: - create: true - documentIndex: 1 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - name: spark-operator - - - it: Should render spark operator cluster role binding correctly if rbac.createClusterRole is true - set: - rbac: - createClusterRole: true - release: - documentIndex: 1 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - name: spark-operator - - contains: - path: subjects - content: - kind: ServiceAccount - name: spark-operator - namespace: NAMESPACE - count: 1 - - equal: - path: roleRef - value: - kind: ClusterRole - name: spark-operator - apiGroup: rbac.authorization.k8s.io - - - it: Should add extra annotations to spark operator cluster role if rbac.annotations is set - set: - rbac: - annotations: - key1: value1 - key2: value2 - documentIndex: 0 - asserts: - - equal: - path: metadata.annotations.key1 - value: value1 - - equal: - path: metadata.annotations.key2 - value: value2 diff --git a/charts/spark-operator-chart/tests/serviceaccount_test.yaml b/charts/spark-operator-chart/tests/serviceaccount_test.yaml deleted file mode 100644 index a9a1e39c60..0000000000 --- a/charts/spark-operator-chart/tests/serviceaccount_test.yaml +++ /dev/null @@ -1,54 +0,0 @@ -suite: Test spark operator service account - -templates: - - serviceaccount.yaml - -release: - name: spark-operator - -tests: - - it: Should not render service account if serviceAccounts.sparkoperator.create is false - set: - serviceAccounts: - sparkoperator: - create: false - asserts: - - hasDocuments: - count: 0 - - - it: Should render service account if serviceAccounts.sparkoperator.create is true - set: - serviceAccounts: - sparkoperator: - create: true - asserts: - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: spark-operator - - - it: Should use the specified service account name if serviceAccounts.sparkoperator.name is set - set: - serviceAccounts: - sparkoperator: - name: custom-service-account - asserts: - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: custom-service-account - - - it: Should add extra annotations if serviceAccounts.sparkoperator.annotations is set - set: - serviceAccounts: - sparkoperator: - annotations: - key1: value1 - key2: value2 - asserts: - - equal: - path: metadata.annotations.key1 - value: value1 - - equal: - path: metadata.annotations.key2 - value: value2 diff --git a/charts/spark-operator-chart/tests/spark-rbac_test.yaml b/charts/spark-operator-chart/tests/spark-rbac_test.yaml deleted file mode 100644 index 6d194fa3a6..0000000000 --- a/charts/spark-operator-chart/tests/spark-rbac_test.yaml +++ /dev/null @@ -1,133 +0,0 @@ -suite: Test spark rbac - -templates: - - spark-rbac.yaml - -release: - name: spark-operator - -tests: - - it: Should not render spark rbac resources if rbac.create is false and rbac.createRole is false - set: - rbac: - create: false - createRole: false - asserts: - - hasDocuments: - count: 0 - - - it: Should render spark role if rbac.create is true - set: - rbac: - create: true - documentIndex: 0 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - name: spark-role - - - it: Should render spark role if rbac.createRole is true - set: - rbac: - createRole: true - documentIndex: 0 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - name: spark-role - - - it: Should render spark role binding if rbac.create is true - set: - rbac: - create: true - documentIndex: 1 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - name: spark - - - it: Should render spark role binding if rbac.createRole is true - set: - rbac: - createRole: true - documentIndex: 1 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - name: spark - - - it: Should create a single spark role with namespace "" by default - documentIndex: 0 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - name: spark-role - namespace: "" - - - it: Should create a single spark role binding with namespace "" by default - values: - - ../values.yaml - documentIndex: 1 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - name: spark - namespace: "" - - - it: Should render multiple spark roles if sparkJobNamespaces is set with multiple values - set: - sparkJobNamespaces: - - ns1 - - ns2 - documentIndex: 0 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - name: spark-role - namespace: ns1 - - - it: Should render multiple spark role bindings if sparkJobNamespaces is set with multiple values - set: - sparkJobNamespaces: - - ns1 - - ns2 - documentIndex: 1 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - name: spark - namespace: ns1 - - - it: Should render multiple spark roles if sparkJobNamespaces is set with multiple values - set: - sparkJobNamespaces: - - ns1 - - ns2 - documentIndex: 2 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - name: spark-role - namespace: ns2 - - - it: Should render multiple spark role bindings if sparkJobNamespaces is set with multiple values - set: - sparkJobNamespaces: - - ns1 - - ns2 - documentIndex: 3 - asserts: - - containsDocument: - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - name: spark - namespace: ns2 diff --git a/charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml b/charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml deleted file mode 100644 index f7140f84fb..0000000000 --- a/charts/spark-operator-chart/tests/spark-serviceaccount_test.yaml +++ /dev/null @@ -1,112 +0,0 @@ -suite: Test spark service account - -templates: - - spark-serviceaccount.yaml - -release: - name: spark-operator - -tests: - - it: Should not render service account if serviceAccounts.spark.create is false - set: - serviceAccounts: - spark: - create: false - asserts: - - hasDocuments: - count: 0 - - - it: Should render service account if serviceAccounts.spark.create is true - set: - serviceAccounts: - spark: - create: true - asserts: - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: spark-operator-spark - - - it: Should use the specified service account name if serviceAccounts.spark.name is set - set: - serviceAccounts: - spark: - name: spark - asserts: - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: spark - - - it: Should add extra annotations if serviceAccounts.spark.annotations is set - set: - serviceAccounts: - spark: - annotations: - key1: value1 - key2: value2 - asserts: - - equal: - path: metadata.annotations.key1 - value: value1 - - equal: - path: metadata.annotations.key2 - value: value2 - - - it: Should create multiple service accounts if sparkJobNamespaces is set - set: - serviceAccounts: - spark: - name: spark - sparkJobNamespaces: - - ns1 - - ns2 - - ns3 - documentIndex: 0 - asserts: - - hasDocuments: - count: 3 - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: spark - namespace: ns1 - - - - it: Should create multiple service accounts if sparkJobNamespaces is set - set: - serviceAccounts: - spark: - name: spark - sparkJobNamespaces: - - ns1 - - ns2 - - ns3 - documentIndex: 1 - asserts: - - hasDocuments: - count: 3 - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: spark - namespace: ns2 - - - it: Should create multiple service accounts if sparkJobNamespaces is set - set: - serviceAccounts: - spark: - name: spark - sparkJobNamespaces: - - ns1 - - ns2 - - ns3 - documentIndex: 2 - asserts: - - hasDocuments: - count: 3 - - containsDocument: - apiVersion: v1 - kind: ServiceAccount - name: spark - namespace: ns3 diff --git a/charts/spark-operator-chart/tests/spark/rbac_test.yaml b/charts/spark-operator-chart/tests/spark/rbac_test.yaml new file mode 100644 index 0000000000..2de678b542 --- /dev/null +++ b/charts/spark-operator-chart/tests/spark/rbac_test.yaml @@ -0,0 +1,123 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test spark rbac + +templates: + - spark/rbac.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not create spark RBAC resources if `spark.rbac.create` is false + set: + spark: + rbac: + create: false + asserts: + - hasDocuments: + count: 0 + + - it: Should create spark role by default + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + name: spark-operator-spark + + - it: Should create spark role binding by default + set: + rbac: + spark: + create: true + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + name: spark-operator-spark + + - it: Should create a single spark role with namespace "" by default + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + name: spark-operator-spark + + - it: Should create a single spark role binding with namespace "" by default + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + name: spark-operator-spark + namespace: "" + + - it: Should create multiple spark roles if `spark.jobNamespaces` is set with multiple values + set: + spark.jobNamespaces: + - ns1 + - ns2 + documentIndex: 0 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + name: spark-operator-spark + namespace: ns1 + + - it: Should create multiple spark role bindings if `spark.jobNamespaces` is set with multiple values + set: + spark.jobNamespaces: + - ns1 + - ns2 + documentIndex: 1 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + name: spark-operator-spark + namespace: ns1 + + - it: Should create multiple spark roles if `spark.jobNamespaces` is set with multiple values + set: + spark.jobNamespaces: + - ns1 + - ns2 + documentIndex: 2 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + name: spark-operator-spark + namespace: ns2 + + - it: Should create multiple spark role bindings if `spark.jobNamespaces` is set with multiple values + set: + spark.jobNamespaces: + - ns1 + - ns2 + documentIndex: 3 + asserts: + - containsDocument: + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + name: spark-operator-spark + namespace: ns2 diff --git a/charts/spark-operator-chart/tests/spark/serviceaccount_test.yaml b/charts/spark-operator-chart/tests/spark/serviceaccount_test.yaml new file mode 100644 index 0000000000..a1f1898b43 --- /dev/null +++ b/charts/spark-operator-chart/tests/spark/serviceaccount_test.yaml @@ -0,0 +1,124 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test spark service account + +templates: + - spark/serviceaccount.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not create service account if `spark.serviceAccount.create` is false + set: + spark: + serviceAccount: + create: false + asserts: + - hasDocuments: + count: 0 + + - it: Should create service account by default + asserts: + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark-operator-spark + + - it: Should use the specified service account name if `spark.serviceAccount.name` is set + set: + spark: + serviceAccount: + name: spark + asserts: + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark + + - it: Should add extra annotations if `spark.serviceAccount.annotations` is set + set: + spark: + serviceAccount: + annotations: + key1: value1 + key2: value2 + asserts: + - equal: + path: metadata.annotations.key1 + value: value1 + - equal: + path: metadata.annotations.key2 + value: value2 + + - it: Should create multiple service accounts if `spark.jobNamespaces` is set + set: + spark: + serviceAccount: + name: spark + jobNamespaces: + - ns1 + - ns2 + - ns3 + documentIndex: 0 + asserts: + - hasDocuments: + count: 3 + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark + namespace: ns1 + + - it: Should create multiple service accounts if `spark.jobNamespaces` is set + set: + spark: + serviceAccount: + name: spark + jobNamespaces: + - ns1 + - ns2 + - ns3 + documentIndex: 1 + asserts: + - hasDocuments: + count: 3 + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark + namespace: ns2 + + - it: Should create multiple service accounts if `spark.jobNamespaces` is set + set: + spark: + serviceAccount: + name: spark + jobNamespaces: + - ns1 + - ns2 + - ns3 + documentIndex: 2 + asserts: + - hasDocuments: + count: 3 + - containsDocument: + apiVersion: v1 + kind: ServiceAccount + name: spark + namespace: ns3 diff --git a/charts/spark-operator-chart/tests/webhook/deployment_test.yaml b/charts/spark-operator-chart/tests/webhook/deployment_test.yaml new file mode 100644 index 0000000000..14c34f7a88 --- /dev/null +++ b/charts/spark-operator-chart/tests/webhook/deployment_test.yaml @@ -0,0 +1,504 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test webhook deployment + +templates: + - webhook/deployment.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should create webhook deployment by default + asserts: + - containsDocument: + apiVersion: apps/v1 + kind: Deployment + name: spark-operator-webhook + + - it: Should set replicas if `webhook.replicas` is set + set: + webhook: + replicas: 10 + asserts: + - equal: + path: spec.replicas + value: 10 + + - it: Should add pod labels if `webhook.labels` is set + set: + webhook: + labels: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.metadata.labels.key1 + value: value1 + - equal: + path: spec.template.metadata.labels.key2 + value: value2 + + - it: Should add pod annotations if `webhook.annotations` is set + set: + webhook: + annotations: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.metadata.annotations.key1 + value: value1 + - equal: + path: spec.template.metadata.annotations.key2 + value: value2 + + - it: Should use the specified image repository if `image.registry`, `image.repository` and `image.tag` are set + set: + image: + registry: test-registry + repository: test-repository + tag: test-tag + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: test-registry/test-repository:test-tag + + - it: Should use the specified image pull policy if `image.pullPolicy` is set + set: + image: + pullPolicy: Always + asserts: + - equal: + path: spec.template.spec.containers[0].imagePullPolicy + value: Always + + - it: Should contain `--zap-log-level` arg if `webhook.logLevel` is set + set: + webhook: + logLevel: debug + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --zap-log-level=debug + + - it: Should contain `--namespaces` arg if `spark.jobNamespaces` is set + set: + spark.jobNamespaces: + - ns1 + - ns2 + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --namespaces=ns1,ns2 + + - it: Should contain `--enable-metrics` arg if `prometheus.metrics.enable` is set to `true` + set: + prometheus: + metrics: + enable: true + port: 12345 + portName: test-port + endpoint: /test-endpoint + prefix: test-prefix + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --enable-metrics=true + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --metrics-bind-address=:12345 + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --metrics-endpoint=/test-endpoint + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --metrics-prefix=test-prefix + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --metrics-labels=app_type + + - it: Should enable leader election by default + asserts: + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --leader-election=true + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --leader-election-lock-name=spark-operator-webhook-lock + - contains: + path: spec.template.spec.containers[?(@.name=="spark-operator-webhook")].args + content: --leader-election-lock-namespace=spark-operator + + - it: Should add webhook port + set: + webhook: + port: 12345 + portName: test-port + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: test-port + containerPort: 12345 + + - it: Should add metric port if `prometheus.metrics.enable` is true + set: + prometheus: + metrics: + enable: true + port: 10254 + portName: metrics + asserts: + - contains: + path: spec.template.spec.containers[0].ports + content: + name: metrics + containerPort: 10254 + count: 1 + + - it: Should add environment variables if `webhook.env` is set + set: + webhook: + env: + - name: ENV_NAME_1 + value: ENV_VALUE_1 + - name: ENV_NAME_2 + valueFrom: + configMapKeyRef: + name: test-configmap + key: test-key + optional: false + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: ENV_NAME_1 + value: ENV_VALUE_1 + - contains: + path: spec.template.spec.containers[0].env + content: + name: ENV_NAME_2 + valueFrom: + configMapKeyRef: + name: test-configmap + key: test-key + optional: false + + - it: Should add environment variable sources if `webhook.envFrom` is set + set: + webhook: + envFrom: + - configMapRef: + name: test-configmap + optional: false + - secretRef: + name: test-secret + optional: false + asserts: + - contains: + path: spec.template.spec.containers[0].envFrom + content: + configMapRef: + name: test-configmap + optional: false + - contains: + path: spec.template.spec.containers[0].envFrom + content: + secretRef: + name: test-secret + optional: false + + - it: Should add volume mounts if `webhook.volumeMounts` is set + set: + webhook: + volumeMounts: + - name: volume1 + mountPath: /volume1 + - name: volume2 + mountPath: /volume2 + asserts: + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: volume1 + mountPath: /volume1 + count: 1 + - contains: + path: spec.template.spec.containers[0].volumeMounts + content: + name: volume2 + mountPath: /volume2 + count: 1 + + - it: Should add resources if `webhook.resources` is set + set: + webhook: + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + asserts: + - equal: + path: spec.template.spec.containers[0].resources + value: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + + - it: Should add container securityContext if `webhook.securityContext` is set + set: + webhook: + securityContext: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + asserts: + - equal: + path: spec.template.spec.containers[0].securityContext.runAsUser + value: 1000 + - equal: + path: spec.template.spec.containers[0].securityContext.runAsGroup + value: 2000 + - equal: + path: spec.template.spec.containers[0].securityContext.fsGroup + value: 3000 + + - it: Should add sidecars if `webhook.sidecars` is set + set: + webhook: + sidecars: + - name: sidecar1 + image: sidecar-image1 + - name: sidecar2 + image: sidecar-image2 + asserts: + - contains: + path: spec.template.spec.containers + content: + name: sidecar1 + image: sidecar-image1 + - contains: + path: spec.template.spec.containers + content: + name: sidecar2 + image: sidecar-image2 + + - it: Should add secrets if `image.pullSecrets` is set + set: + image: + pullSecrets: + - name: test-secret1 + - name: test-secret2 + asserts: + - equal: + path: spec.template.spec.imagePullSecrets[0].name + value: test-secret1 + - equal: + path: spec.template.spec.imagePullSecrets[1].name + value: test-secret2 + + - it: Should add volumes if `webhook.volumes` is set + set: + webhook: + volumes: + - name: volume1 + emptyDir: {} + - name: volume2 + emptyDir: {} + asserts: + - contains: + path: spec.template.spec.volumes + content: + name: volume1 + emptyDir: {} + count: 1 + - contains: + path: spec.template.spec.volumes + content: + name: volume2 + emptyDir: {} + count: 1 + + - it: Should add nodeSelector if `webhook.nodeSelector` is set + set: + webhook: + nodeSelector: + key1: value1 + key2: value2 + asserts: + - equal: + path: spec.template.spec.nodeSelector.key1 + value: value1 + - equal: + path: spec.template.spec.nodeSelector.key2 + value: value2 + + - it: Should add affinity if `webhook.affinity` is set + set: + webhook: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - antarctica-east1 + - antarctica-west1 + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: another-node-label-key + operator: In + values: + - another-node-label-value + asserts: + - equal: + path: spec.template.spec.affinity + value: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: topology.kubernetes.io/zone + operator: In + values: + - antarctica-east1 + - antarctica-west1 + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: another-node-label-key + operator: In + values: + - another-node-label-value + + - it: Should add tolerations if `webhook.tolerations` is set + set: + webhook: + tolerations: + - key: key1 + operator: Equal + value: value1 + effect: NoSchedule + - key: key2 + operator: Exists + effect: NoSchedule + asserts: + - equal: + path: spec.template.spec.tolerations + value: + - key: key1 + operator: Equal + value: value1 + effect: NoSchedule + - key: key2 + operator: Exists + effect: NoSchedule + + - it: Should add priorityClassName if `webhook.priorityClassName` is set + set: + webhook: + priorityClassName: test-priority-class + asserts: + - equal: + path: spec.template.spec.priorityClassName + value: test-priority-class + + - it: Should add pod securityContext if `webhook.podSecurityContext` is set + set: + webhook: + podSecurityContext: + runAsUser: 1000 + runAsGroup: 2000 + fsGroup: 3000 + asserts: + - equal: + path: spec.template.spec.securityContext.runAsUser + value: 1000 + - equal: + path: spec.template.spec.securityContext.runAsGroup + value: 2000 + - equal: + path: spec.template.spec.securityContext.fsGroup + value: 3000 + + - it: Should not contain topologySpreadConstraints if `webhook.topologySpreadConstraints` is not set + set: + webhook: + topologySpreadConstraints: [] + asserts: + - notExists: + path: spec.template.spec.topologySpreadConstraints + + - it: Should add topologySpreadConstraints if `webhook.topologySpreadConstraints` is set and `webhook.replicas` is greater than 1 + set: + webhook: + replicas: 2 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + asserts: + - equal: + path: spec.template.spec.topologySpreadConstraints + value: + - labelSelector: + matchLabels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: spark-operator + app.kubernetes.io/name: spark-operator + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: spark-operator + app.kubernetes.io/name: spark-operator + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + + - it: Should fail if `webhook.topologySpreadConstraints` is set and `webhook.replicas` is not greater than 1 + set: + webhook: + replicas: 1 + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + asserts: + - failedTemplate: + errorMessage: "webhook.replicas must be greater than 1 to enable topology spread constraints for webhook pods" diff --git a/charts/spark-operator-chart/tests/webhook/mutatingwebhookconfiguration_test.yaml b/charts/spark-operator-chart/tests/webhook/mutatingwebhookconfiguration_test.yaml new file mode 100644 index 0000000000..54273df187 --- /dev/null +++ b/charts/spark-operator-chart/tests/webhook/mutatingwebhookconfiguration_test.yaml @@ -0,0 +1,78 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test mutating webhook configuration + +templates: + - webhook/mutatingwebhookconfiguration.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should create the mutating webhook configuration by default + asserts: + - containsDocument: + apiVersion: admissionregistration.k8s.io/v1 + kind: MutatingWebhookConfiguration + name: spark-operator-webhook + + - it: Should use the specified webhook port + set: + webhook: + port: 12345 + asserts: + - equal: + path: webhooks[*].clientConfig.service.port + value: 12345 + + - it: Should use the specified failure policy + set: + webhook: + failurePolicy: Fail + asserts: + - equal: + path: webhooks[*].failurePolicy + value: Fail + + - it: Should set namespaceSelector if sparkJobNamespaces is not empty + set: + spark: + jobNamespaces: + - ns1 + - ns2 + - ns3 + asserts: + - equal: + path: webhooks[*].namespaceSelector + value: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - ns1 + - ns2 + - ns3 + + - it: Should should use the specified timeoutSeconds + set: + webhook: + timeoutSeconds: 5 + asserts: + - equal: + path: webhooks[*].timeoutSeconds + value: 5 diff --git a/charts/spark-operator-chart/tests/webhook/poddisruptionbudget_test.yaml b/charts/spark-operator-chart/tests/webhook/poddisruptionbudget_test.yaml new file mode 100644 index 0000000000..f45350dbb7 --- /dev/null +++ b/charts/spark-operator-chart/tests/webhook/poddisruptionbudget_test.yaml @@ -0,0 +1,68 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test webhook pod disruption budget + +templates: + - webhook/poddisruptionbudget.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should not render podDisruptionBudget if `webhook.podDisruptionBudget.enable` is false + set: + webhook: + podDisruptionBudget: + enable: false + asserts: + - hasDocuments: + count: 0 + + - it: Should fail if `webhook.replicas` is less than 2 when `webhook.podDisruptionBudget.enable` is true + set: + webhook: + replicas: 1 + podDisruptionBudget: + enable: true + asserts: + - failedTemplate: + errorMessage: "webhook.replicas must be greater than 1 to enable pod disruption budget for webhook" + + - it: Should render spark operator podDisruptionBudget if `webhook.podDisruptionBudget.enable` is true + set: + webhook: + replicas: 2 + podDisruptionBudget: + enable: true + asserts: + - containsDocument: + apiVersion: policy/v1 + kind: PodDisruptionBudget + name: spark-operator-webhook-pdb + + - it: Should set minAvailable if `webhook.podDisruptionBudget.minAvailable` is specified + set: + webhook: + replicas: 2 + podDisruptionBudget: + enable: true + minAvailable: 3 + asserts: + - equal: + path: spec.minAvailable + value: 3 diff --git a/charts/spark-operator-chart/tests/webhook/secret_test.yaml b/charts/spark-operator-chart/tests/webhook/secret_test.yaml deleted file mode 100644 index 0e9c3b4cfd..0000000000 --- a/charts/spark-operator-chart/tests/webhook/secret_test.yaml +++ /dev/null @@ -1,31 +0,0 @@ -suite: Test spark operator webhook secret - -templates: - - webhook/secret.yaml - -release: - name: spark-operator - namespace: spark-operator - -tests: - - it: Should not render the webhook secret if webhook.enable is false - asserts: - - hasDocuments: - count: 0 - - - it: Should render the webhook secret with empty data fields - set: - webhook: - enable: true - asserts: - - containsDocument: - apiVersion: v1 - kind: Secret - name: spark-operator-webhook-certs - - equal: - path: data - value: - ca-key.pem: "" - ca-cert.pem: "" - server-key.pem: "" - server-cert.pem: "" diff --git a/charts/spark-operator-chart/tests/webhook/service_test.yaml b/charts/spark-operator-chart/tests/webhook/service_test.yaml index d3b6b1cc26..c06631f975 100644 --- a/charts/spark-operator-chart/tests/webhook/service_test.yaml +++ b/charts/spark-operator-chart/tests/webhook/service_test.yaml @@ -1,24 +1,32 @@ -suite: Test spark operator webhook service +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test webhook service templates: - webhook/service.yaml release: name: spark-operator + namespace: spark-operator tests: - - it: Should not render the webhook service if webhook.enable is false - set: - webhook: - enable: false - asserts: - - hasDocuments: - count: 0 - - - it: Should render the webhook service correctly if webhook.enable is true + - it: Should create the webhook service correctly set: webhook: - enable: true portName: webhook asserts: - containsDocument: @@ -28,6 +36,6 @@ tests: - equal: path: spec.ports[0] value: - port: 443 + port: 9443 targetPort: webhook name: webhook diff --git a/charts/spark-operator-chart/tests/webhook/validatingwebhookconfiguration_test.yaml b/charts/spark-operator-chart/tests/webhook/validatingwebhookconfiguration_test.yaml new file mode 100644 index 0000000000..9c7fa4daad --- /dev/null +++ b/charts/spark-operator-chart/tests/webhook/validatingwebhookconfiguration_test.yaml @@ -0,0 +1,77 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +suite: Test validating webhook configuration + +templates: + - webhook/validatingwebhookconfiguration.yaml + +release: + name: spark-operator + namespace: spark-operator + +tests: + - it: Should create the validating webhook configuration by default + asserts: + - containsDocument: + apiVersion: admissionregistration.k8s.io/v1 + kind: ValidatingWebhookConfiguration + name: spark-operator-webhook + + - it: Should use the specified webhook port + set: + webhook: + port: 12345 + asserts: + - equal: + path: webhooks[*].clientConfig.service.port + value: 12345 + + - it: Should use the specified failure policy + set: + webhook: + failurePolicy: Fail + asserts: + - equal: + path: webhooks[*].failurePolicy + value: Fail + + - it: Should set namespaceSelector if `spark.jobNamespaces` is not empty + set: + spark.jobNamespaces: + - ns1 + - ns2 + - ns3 + asserts: + - equal: + path: webhooks[*].namespaceSelector + value: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - ns1 + - ns2 + - ns3 + + - it: Should should use the specified timeoutSeconds + set: + webhook: + timeoutSeconds: 5 + asserts: + - equal: + path: webhooks[*].timeoutSeconds + value: 5 diff --git a/charts/spark-operator-chart/values.yaml b/charts/spark-operator-chart/values.yaml index bcb3a100a1..a5adbe4778 100644 --- a/charts/spark-operator-chart/values.yaml +++ b/charts/spark-operator-chart/values.yaml @@ -1,210 +1,328 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + # Default values for spark-operator. # This is a YAML-formatted file. # Declare variables to be passed into your templates. -# -- Common labels to add to the resources -commonLabels: {} - -# replicaCount -- Desired number of pods, leaderElection will be enabled -# if this is greater than 1 -replicaCount: 1 - -image: - # -- Image repository - repository: docker.io/kubeflow/spark-operator - # -- Image pull policy - pullPolicy: IfNotPresent - # -- if set, override the image tag whose default is the chart appVersion. - tag: "" - -# -- Image pull secrets -imagePullSecrets: [] - -# -- String to partially override `spark-operator.fullname` template (will maintain the release name) +# -- String to partially override release name. nameOverride: "" -# -- String to override release name +# -- String to fully override release name. fullnameOverride: "" -rbac: - # -- **DEPRECATED** use `createRole` and `createClusterRole` - create: false - # -- Create and use RBAC `Role` resources - createRole: true - # -- Create and use RBAC `ClusterRole` resources - createClusterRole: true - # -- Optional annotations for rbac - annotations: {} +# -- Common labels to add to the resources. +commonLabels: {} -serviceAccounts: - spark: - # -- Create a service account for spark apps +image: + # -- Image registry. + registry: docker.io + # -- Image repository. + repository: kubeflow/spark-operator + # -- Image tag. + # @default -- If not set, the chart appVersion will be used. + tag: "" + # -- Image pull policy. + pullPolicy: IfNotPresent + # -- Image pull secrets for private image registry. + pullSecrets: [] + # - name: + +controller: + # -- Number of replicas of controller. + replicas: 1 + + # -- Reconcile concurrency, higher values might increase memory usage. + workers: 10 + + # -- Configure the verbosity of logging, can be one of `debug`, `info`, `error`. + logLevel: info + + uiService: + # -- Specifies whether to create service for Spark web UI. + enable: true + + uiIngress: + # -- Specifies whether to create ingress for Spark web UI. + # `controller.uiService.enable` must be `true` to enable ingress. + enable: false + # -- Ingress URL format. + # Required if `controller.uiIngress.enable` is true. + urlFormat: "" + + batchScheduler: + # -- Specifies whether to enable batch scheduler for spark jobs scheduling. + # If enabled, users can specify batch scheduler name in spark application. + enable: false + + serviceAccount: + # -- Specifies whether to create a service account for the controller. create: true - # -- Optional name for the spark service account + # -- Optional name for the controller service account. name: "" - # -- Optional annotations for the spark service account + # -- Extra annotations for the controller service account. annotations: {} - sparkoperator: - # -- Create a service account for the operator + + rbac: + # -- Specifies whether to create RBAC resources for the controller. create: true - # -- Optional name for the operator service account - name: "" - # -- Optional annotations for the operator service account + # -- Extra annotations for the controller RBAC resources. annotations: {} -# -- List of namespaces where to run spark jobs -sparkJobNamespaces: - - "" -# - ns1 - -# -- Operator concurrency, higher values might increase memory usage -controllerThreads: 10 + # -- Extra labels for controller pods. + labels: {} + # key1: value1 + # key2: value2 -# -- Operator resync interval. Note that the operator will respond to events (e.g. create, update) -# unrelated to this setting -resyncInterval: 30 + # -- Extra annotations for controller pods. + annotations: {} + # key1: value1 + # key2: value2 + + # -- Volumes for controller pods. + volumes: [] + + # -- Node selector for controller pods. + nodeSelector: {} + + # -- Affinity for controller pods. + affinity: {} + + # -- List of node taints to tolerate for controller pods. + tolerations: [] + + # -- Priority class for controller pods. + priorityClassName: "" + + # -- Security context for controller pods. + podSecurityContext: {} + # runAsUser: 1000 + # runAsGroup: 2000 + # fsGroup: 3000 + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + # Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). + # The labelSelector field in topology spread constraint will be set to the selector labels for controller pods if not specified. + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: ScheduleAnyway + # - maxSkew: 1 + # topologyKey: kubernetes.io/hostname + # whenUnsatisfiable: DoNotSchedule + + # -- Environment variables for controller containers. + env: [] + + # -- Environment variable sources for controller containers. + envFrom: [] + + # -- Volume mounts for controller containers. + volumeMounts: [] + + # -- Pod resource requests and limits for controller containers. + # Note, that each job submission will spawn a JVM within the controller pods using "/usr/local/openjdk-11/bin/java -Xmx128m". + # Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: + # 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. + resources: {} + # limits: + # cpu: 100m + # memory: 300Mi + # requests: + # cpu: 100m + # memory: 300Mi + + # -- Security context for controller containers. + securityContext: {} + # runAsUser: 1000 + # runAsGroup: 2000 + # fsGroup: 3000 + + # -- Sidecar containers for controller pods. + sidecars: [] + + # Pod disruption budget for controller to avoid service degradation. + podDisruptionBudget: + # -- Specifies whether to create pod disruption budget for controller. + # Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) + enable: false + # -- The number of pods that must be available. + # Require `controller.replicas` to be greater than 1 + minAvailable: 1 -uiService: - # -- Enable UI service creation for Spark application - enable: true +webhook: + # -- Number of replicas of webhook server. + replicas: 1 -# -- Ingress URL format. -# Requires the UI service to be enabled by setting `uiService.enable` to true. -ingressUrlFormat: "" + # -- Configure the verbosity of logging, can be one of `debug`, `info`, `error`. + logLevel: info -# -- Set higher levels for more verbose logging -logLevel: 2 + # -- Specifies webhook port. + port: 9443 -# -- Pod environment variable sources -envFrom: [] + # -- Specifies webhook service port name. + portName: webhook -# podSecurityContext -- Pod security context -podSecurityContext: {} + # -- Specifies how unrecognized errors are handled. + # Available options are `Ignore` or `Fail`. + failurePolicy: Fail -# securityContext -- Operator container security context -securityContext: {} + # -- Specifies the timeout seconds of the webhook, the value must be between 1 and 30. + timeoutSeconds: 10 -# sidecars -- Sidecar containers -sidecars: [] + resourceQuotaEnforcement: + # -- Specifies whether to enable the ResourceQuota enforcement for SparkApplication resources. + enable: false -# volumes - Operator volumes -volumes: [] + serviceAccount: + # -- Specifies whether to create a service account for the webhook. + create: true + # -- Optional name for the webhook service account. + name: "" + # -- Extra annotations for the webhook service account. + annotations: {} -# volumeMounts - Operator volumeMounts -volumeMounts: [] + rbac: + # -- Specifies whether to create RBAC resources for the webhook. + create: true + # -- Extra annotations for the webhook RBAC resources. + annotations: {} -webhook: - # -- Enable webhook server - enable: false - # -- Webhook service port - port: 8080 - # -- Webhook container port name and service target port name - portName: webhook - # -- The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. - # Empty string (default) will operate on all namespaces - namespaceSelector: "" - # -- The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). - # Empty string (default) will operate on all objects - objectSelector: "" - # -- The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade - timeout: 30 - -metrics: - # -- Enable prometheus metric scraping - enable: true - # -- Metrics port - port: 10254 - # -- Metrics port name - portName: metrics - # -- Metrics serving endpoint - endpoint: /metrics - # -- Metric prefix, will be added to all exported metrics - prefix: "" - -# -- Prometheus pod monitor for operator's pod. -podMonitor: - # -- If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. - enable: false - # -- Pod monitor labels + # -- Extra labels for webhook pods. labels: {} - # -- The label to use to retrieve the job name from - jobLabel: spark-operator-podmonitor - # -- Prometheus metrics endpoint properties. `metrics.portName` will be used as a port - podMetricsEndpoint: - scheme: http - interval: 5s - -# -- podDisruptionBudget to avoid service degradation -podDisruptionBudget: - # -- Specifies whether to enable pod disruption budget. - # Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) - enable: false - # -- The number of pods that must be available. - # Require `replicaCount` to be greater than 1 - minAvailable: 1 - -# -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. -# Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) -# Specify topologySpreadConstraints without the labelSelector field, the labelSelector field will be set -# to "spark-operator.selectorLabels" subtemplate in the deployment.yaml file. -topologySpreadConstraints: [] -# - maxSkew: 1 -# topologyKey: topology.kubernetes.io/zone -# whenUnsatisfiable: ScheduleAnyway -# - maxSkew: 1 -# topologyKey: kubernetes.io/hostname -# whenUnsatisfiable: DoNotSchedule - -# nodeSelector -- Node labels for pod assignment -nodeSelector: {} - -# tolerations -- List of node taints to tolerate -tolerations: [] - -# affinity -- Affinity for pod assignment -affinity: {} - -# podAnnotations -- Additional annotations to add to the pod -podAnnotations: {} - -# podLabels -- Additional labels to add to the pod -podLabels: {} - -# resources -- Pod resource requests and limits -# Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". -# Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: -# 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. -resources: {} - # limits: - # cpu: 100m - # memory: 300Mi - # requests: - # cpu: 100m - # memory: 300Mi - -batchScheduler: - # -- Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application - enable: false - -resourceQuotaEnforcement: - # -- Whether to enable the ResourceQuota enforcement for SparkApplication resources. - # Requires the webhook to be enabled by setting `webhook.enable` to true. - # Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. - enable: false - -leaderElection: - # -- Leader election lock name. - # Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. - lockName: "spark-operator-lock" - # -- Optionally store the lock in another namespace. Defaults to operator's namespace - lockNamespace: "" - -istio: - # -- When using `istio`, spark jobs need to run without a sidecar to properly terminate - enabled: false - -# labelSelectorFilter -- A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. -labelSelectorFilter: "" - -# priorityClassName -- A priority class to be used for running spark-operator pod. -priorityClassName: "" + # key1: value1 + # key2: value2 + + # -- Extra annotations for webhook pods. + annotations: {} + # key1: value1 + # key2: value2 + + # -- Sidecar containers for webhook pods. + sidecars: [] + + # -- Volumes for webhook pods. + volumes: [] + + # -- Node selector for webhook pods. + nodeSelector: {} + + # -- Affinity for webhook pods. + affinity: {} + + # -- List of node taints to tolerate for webhook pods. + tolerations: [] + + # -- Priority class for webhook pods. + priorityClassName: "" + + # -- Security context for webhook pods. + podSecurityContext: {} + # runAsUser: 1000 + # runAsGroup: 2000 + # fsGroup: 3000 + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + # Ref: [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/). + # The labelSelector field in topology spread constraint will be set to the selector labels for webhook pods if not specified. + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: ScheduleAnyway + # - maxSkew: 1 + # topologyKey: kubernetes.io/hostname + # whenUnsatisfiable: DoNotSchedule + + # -- Environment variables for webhook containers. + env: [] + + # -- Environment variable sources for webhook containers. + envFrom: [] + + # -- Volume mounts for webhook containers. + volumeMounts: [] + + # -- Pod resource requests and limits for webhook pods. + resources: {} + # limits: + # cpu: 100m + # memory: 300Mi + # requests: + # cpu: 100m + # memory: 300Mi + + # -- Security context for webhook containers. + securityContext: {} + # runAsUser: 1000 + # runAsGroup: 2000 + # fsGroup: 3000 + + # Pod disruption budget for webhook to avoid service degradation. + podDisruptionBudget: + # -- Specifies whether to create pod disruption budget for webhook. + # Ref: [Specifying a Disruption Budget for your Application](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) + enable: false + # -- The number of pods that must be available. + # Require `webhook.replicas` to be greater than 1 + minAvailable: 1 + +spark: + # -- List of namespaces where to run spark jobs. + # If empty string is included, all namespaces will be allowed. + # Make sure the namespaces have already existed. + jobNamespaces: + - default + + serviceAccount: + # -- Specifies whether to create a service account for spark applications. + create: true + # -- Optional name for the spark service account. + name: "" + # -- Optional annotations for the spark service account. + annotations: {} + + rbac: + # -- Specifies whether to create RBAC resources for spark applications. + create: true + # -- Optional annotations for the spark application RBAC resources. + annotations: {} + +prometheus: + metrics: + # -- Specifies whether to enable prometheus metrics scraping. + enable: true + # -- Metrics port. + port: 8080 + # -- Metrics port name. + portName: metrics + # -- Metrics serving endpoint. + endpoint: /metrics + # -- Metrics prefix, will be added to all exported metrics. + prefix: "" + + # Prometheus pod monitor for controller pods + podMonitor: + # -- Specifies whether to create pod monitor. + # Note that prometheus metrics should be enabled as well. + create: false + # -- Pod monitor labels + labels: {} + # -- The label to use to retrieve the job name from + jobLabel: spark-operator-podmonitor + # -- Prometheus metrics endpoint properties. `metrics.portName` will be used as a port + podMetricsEndpoint: + scheme: http + interval: 5s diff --git a/cmd/main.go b/cmd/main.go new file mode 100644 index 0000000000..38085497b7 --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,31 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "os" + + "github.com/kubeflow/spark-operator/cmd/operator" +) + +func main() { + if err := operator.NewCommand().Execute(); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} diff --git a/pkg/batchscheduler/interface/interface.go b/cmd/operator/controller/root.go similarity index 60% rename from pkg/batchscheduler/interface/interface.go rename to cmd/operator/controller/root.go index 6ed18c8cd7..eeaa8edcd2 100644 --- a/pkg/batchscheduler/interface/interface.go +++ b/cmd/operator/controller/root.go @@ -1,5 +1,5 @@ /* -Copyright 2019 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,16 +14,20 @@ See the License for the specific language governing permissions and limitations under the License. */ -package schedulerinterface +package controller import ( - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/spf13/cobra" ) -type BatchScheduler interface { - Name() string - - ShouldSchedule(app *v1beta2.SparkApplication) bool - DoBatchSchedulingOnSubmission(app *v1beta2.SparkApplication) error - CleanupOnCompletion(app *v1beta2.SparkApplication) error +func NewCommand() *cobra.Command { + command := &cobra.Command{ + Use: "controller", + Short: "Spark operator controller", + RunE: func(cmd *cobra.Command, _ []string) error { + return cmd.Help() + }, + } + command.AddCommand(NewStartCommand()) + return command } diff --git a/cmd/operator/controller/start.go b/cmd/operator/controller/start.go new file mode 100644 index 0000000000..8fb54d7eab --- /dev/null +++ b/cmd/operator/controller/start.go @@ -0,0 +1,364 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "crypto/tls" + "flag" + "os" + "time" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/clock" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/healthz" + logzap "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook" + + sparkoperator "github.com/kubeflow/spark-operator" + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/internal/controller/scheduledsparkapplication" + "github.com/kubeflow/spark-operator/internal/controller/sparkapplication" + "github.com/kubeflow/spark-operator/internal/metrics" + "github.com/kubeflow/spark-operator/internal/scheduler" + "github.com/kubeflow/spark-operator/internal/scheduler/volcano" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + logger = ctrl.Log.WithName("") +) + +var ( + namespaces []string + + // Controller + controllerThreads int + cacheSyncTimeout time.Duration + + // Batch scheduler + enableBatchScheduler bool + + // Spark web UI service and ingress + enableUIService bool + ingressClassName string + ingressURLFormat string + + // Leader election + enableLeaderElection bool + leaderElectionLockName string + leaderElectionLockNamespace string + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + + // Metrics + enableMetrics bool + metricsBindAddress string + metricsEndpoint string + metricsPrefix string + metricsLabels []string + metricsJobStartLatencyBuckets []float64 + + healthProbeBindAddress string + secureMetrics bool + enableHTTP2 bool + development bool + zapOptions = logzap.Options{} +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(v1beta1.AddToScheme(scheme)) + utilruntime.Must(v1beta2.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +func NewStartCommand() *cobra.Command { + var command = &cobra.Command{ + Use: "start", + Short: "Start controller and webhook", + PreRun: func(_ *cobra.Command, args []string) { + development = viper.GetBool("development") + }, + Run: func(_ *cobra.Command, args []string) { + sparkoperator.PrintVersion(false) + start() + }, + } + + command.Flags().IntVar(&controllerThreads, "controller-threads", 10, "Number of worker threads used by the SparkApplication controller.") + command.Flags().StringSliceVar(&namespaces, "namespaces", []string{}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.") + command.Flags().DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 30*time.Second, "Informer cache sync timeout.") + + command.Flags().BoolVar(&enableBatchScheduler, "enable-batch-scheduler", false, "Enable batch schedulers.") + command.Flags().BoolVar(&enableUIService, "enable-ui-service", true, "Enable Spark Web UI service.") + command.Flags().StringVar(&ingressClassName, "ingress-class-name", "", "Set ingressClassName for ingress resources created.") + command.Flags().StringVar(&ingressURLFormat, "ingress-url-format", "", "Ingress URL format.") + + command.Flags().BoolVar(&enableLeaderElection, "leader-election", false, "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + command.Flags().StringVar(&leaderElectionLockName, "leader-election-lock-name", "spark-operator-lock", "Name of the ConfigMap for leader election.") + command.Flags().StringVar(&leaderElectionLockNamespace, "leader-election-lock-namespace", "spark-operator", "Namespace in which to create the ConfigMap for leader election.") + command.Flags().DurationVar(&leaderElectionLeaseDuration, "leader-election-lease-duration", 15*time.Second, "Leader election lease duration.") + command.Flags().DurationVar(&leaderElectionRenewDeadline, "leader-election-renew-deadline", 14*time.Second, "Leader election renew deadline.") + command.Flags().DurationVar(&leaderElectionRetryPeriod, "leader-election-retry-period", 4*time.Second, "Leader election retry period.") + + command.Flags().BoolVar(&enableMetrics, "enable-metrics", false, "Enable metrics.") + command.Flags().StringVar(&metricsBindAddress, "metrics-bind-address", "0", "The address the metric endpoint binds to. "+ + "Use the port :8080. If not set, it will be 0 in order to disable the metrics server") + command.Flags().StringVar(&metricsEndpoint, "metrics-endpoint", "/metrics", "Metrics endpoint.") + command.Flags().StringVar(&metricsPrefix, "metrics-prefix", "", "Prefix for the metrics.") + command.Flags().StringSliceVar(&metricsLabels, "metrics-labels", []string{}, "Labels to be added to the metrics.") + command.Flags().Float64SliceVar(&metricsJobStartLatencyBuckets, "metrics-job-start-latency-buckets", []float64{30, 60, 90, 120, 150, 180, 210, 240, 270, 300}, "Buckets for the job start latency histogram.") + + command.Flags().StringVar(&healthProbeBindAddress, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + command.Flags().BoolVar(&secureMetrics, "secure-metrics", false, "If set the metrics endpoint is served securely") + command.Flags().BoolVar(&enableHTTP2, "enable-http2", false, "If set, HTTP/2 will be enabled for the metrics and webhook servers") + + flagSet := flag.NewFlagSet("controller", flag.ExitOnError) + ctrl.RegisterFlags(flagSet) + zapOptions.BindFlags(flagSet) + command.Flags().AddGoFlagSet(flagSet) + + return command +} + +func start() { + setupLog() + + // Create the client rest config. Use kubeConfig if given, otherwise assume in-cluster. + cfg, err := ctrl.GetConfig() + if err != nil { + logger.Error(err, "failed to get kube config") + os.Exit(1) + } + + // Create the manager. + tlsOptions := newTLSOptions() + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme, + Cache: newCacheOptions(), + Metrics: metricsserver.Options{ + BindAddress: metricsBindAddress, + SecureServing: secureMetrics, + TLSOpts: tlsOptions, + }, + WebhookServer: ctrlwebhook.NewServer(ctrlwebhook.Options{ + TLSOpts: tlsOptions, + }), + HealthProbeBindAddress: healthProbeBindAddress, + LeaderElection: enableLeaderElection, + LeaderElectionID: leaderElectionLockName, + LeaderElectionNamespace: leaderElectionLockNamespace, + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + logger.Error(err, "failed to create manager") + os.Exit(1) + } + + var registry *scheduler.Registry + if enableBatchScheduler { + registry = scheduler.GetRegistry() + + // Register volcano scheduler. + registry.Register(common.VolcanoSchedulerName, volcano.Factory) + } + + // Setup controller for SparkApplication. + if err = sparkapplication.NewReconciler( + mgr, + mgr.GetScheme(), + mgr.GetClient(), + mgr.GetEventRecorderFor("spark-application-controller"), + registry, + newSparkApplicationReconcilerOptions(), + ).SetupWithManager(mgr, newControllerOptions()); err != nil { + logger.Error(err, "Failed to create controller", "controller", "SparkApplication") + os.Exit(1) + } + + // Setup controller for ScheduledSparkApplication. + if err = scheduledsparkapplication.NewReconciler( + mgr.GetScheme(), + mgr.GetClient(), + mgr.GetEventRecorderFor("scheduled-spark-application-controller"), + clock.RealClock{}, + newScheduledSparkApplicationReconcilerOptions(), + ).SetupWithManager(mgr, newControllerOptions()); err != nil { + logger.Error(err, "Failed to create controller", "controller", "ScheduledSparkApplication") + os.Exit(1) + } + + // +kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + logger.Error(err, "Failed to set up health check") + os.Exit(1) + } + + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + logger.Error(err, "Failed to set up ready check") + os.Exit(1) + } + + logger.Info("Starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + logger.Error(err, "Failed to start manager") + os.Exit(1) + } +} + +// setupLog Configures the logging system +func setupLog() { + ctrl.SetLogger(logzap.New( + logzap.UseFlagOptions(&zapOptions), + func(o *logzap.Options) { + o.Development = development + }, func(o *logzap.Options) { + o.ZapOpts = append(o.ZapOpts, zap.AddCaller()) + }, func(o *logzap.Options) { + var config zapcore.EncoderConfig + if !development { + config = zap.NewProductionEncoderConfig() + } else { + config = zap.NewDevelopmentEncoderConfig() + } + config.EncodeLevel = zapcore.CapitalColorLevelEncoder + config.EncodeTime = zapcore.ISO8601TimeEncoder + config.EncodeCaller = zapcore.ShortCallerEncoder + o.Encoder = zapcore.NewConsoleEncoder(config) + }), + ) +} + +func newTLSOptions() []func(c *tls.Config) { + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + logger.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + tlsOpts := []func(*tls.Config){} + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + return tlsOpts +} + +// newCacheOptions creates and returns a cache.Options instance configured with default namespaces and object caching settings. +func newCacheOptions() cache.Options { + defaultNamespaces := make(map[string]cache.Config) + if util.ContainsString(namespaces, cache.AllNamespaces) { + defaultNamespaces[cache.AllNamespaces] = cache.Config{} + } else { + for _, ns := range namespaces { + defaultNamespaces[ns] = cache.Config{} + } + } + + options := cache.Options{ + Scheme: scheme, + DefaultNamespaces: defaultNamespaces, + ByObject: map[client.Object]cache.ByObject{ + &corev1.Pod{}: { + Label: labels.SelectorFromSet(labels.Set{ + common.LabelLaunchedBySparkOperator: "true", + }), + }, + &corev1.ConfigMap{}: {}, + &corev1.PersistentVolumeClaim{}: {}, + &corev1.Service{}: {}, + &v1beta2.SparkApplication{}: {}, + }, + } + + return options +} + +// newControllerOptions creates and returns a controller.Options instance configured with the given options. +func newControllerOptions() controller.Options { + options := controller.Options{ + MaxConcurrentReconciles: controllerThreads, + CacheSyncTimeout: cacheSyncTimeout, + } + return options +} + +func newSparkApplicationReconcilerOptions() sparkapplication.Options { + var sparkApplicationMetrics *metrics.SparkApplicationMetrics + var sparkExecutorMetrics *metrics.SparkExecutorMetrics + if enableMetrics { + sparkApplicationMetrics = metrics.NewSparkApplicationMetrics(metricsPrefix, metricsLabels, metricsJobStartLatencyBuckets) + sparkApplicationMetrics.Register() + sparkExecutorMetrics = metrics.NewSparkExecutorMetrics(metricsPrefix, metricsLabels) + sparkExecutorMetrics.Register() + } + options := sparkapplication.Options{ + Namespaces: namespaces, + EnableUIService: enableUIService, + IngressClassName: ingressClassName, + IngressURLFormat: ingressURLFormat, + SparkApplicationMetrics: sparkApplicationMetrics, + SparkExecutorMetrics: sparkExecutorMetrics, + } + return options +} + +func newScheduledSparkApplicationReconcilerOptions() scheduledsparkapplication.Options { + options := scheduledsparkapplication.Options{ + Namespaces: namespaces, + } + return options +} diff --git a/cmd/operator/root.go b/cmd/operator/root.go new file mode 100644 index 0000000000..2ddaa900d8 --- /dev/null +++ b/cmd/operator/root.go @@ -0,0 +1,39 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operator + +import ( + "github.com/spf13/cobra" + + "github.com/kubeflow/spark-operator/cmd/operator/controller" + "github.com/kubeflow/spark-operator/cmd/operator/version" + "github.com/kubeflow/spark-operator/cmd/operator/webhook" +) + +func NewCommand() *cobra.Command { + command := &cobra.Command{ + Use: "spark-operator", + Short: "Spark operator", + RunE: func(cmd *cobra.Command, _ []string) error { + return cmd.Help() + }, + } + command.AddCommand(controller.NewCommand()) + command.AddCommand(webhook.NewCommand()) + command.AddCommand(version.NewCommand()) + return command +} diff --git a/pkg/controller/scheduledsparkapplication/controller_util.go b/cmd/operator/version/root.go similarity index 53% rename from pkg/controller/scheduledsparkapplication/controller_util.go rename to cmd/operator/version/root.go index 8cb33ab749..331bd612c6 100644 --- a/pkg/controller/scheduledsparkapplication/controller_util.go +++ b/cmd/operator/version/root.go @@ -1,5 +1,5 @@ /* -Copyright 2018 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,23 +14,27 @@ See the License for the specific language governing permissions and limitations under the License. */ -package scheduledsparkapplication +package version import ( - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" -) - -type sparkApps []*v1beta2.SparkApplication + "github.com/spf13/cobra" -func (s sparkApps) Len() int { - return len(s) -} + sparkoperator "github.com/kubeflow/spark-operator" +) -func (s sparkApps) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} +var ( + short bool +) -func (s sparkApps) Less(i, j int) bool { - // Sort by decreasing order of application names and correspondingly creation time. - return s[i].Name > s[j].Name +func NewCommand() *cobra.Command { + command := &cobra.Command{ + Use: "version", + Short: "Print version information", + RunE: func(cmd *cobra.Command, args []string) error { + sparkoperator.PrintVersion(short) + return nil + }, + } + command.Flags().BoolVar(&short, "short", false, "Print just the version string.") + return command } diff --git a/cmd/operator/webhook/root.go b/cmd/operator/webhook/root.go new file mode 100644 index 0000000000..47609ea495 --- /dev/null +++ b/cmd/operator/webhook/root.go @@ -0,0 +1,33 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "github.com/spf13/cobra" +) + +func NewCommand() *cobra.Command { + command := &cobra.Command{ + Use: "webhook", + Short: "Spark operator webhook", + RunE: func(cmd *cobra.Command, _ []string) error { + return cmd.Help() + }, + } + command.AddCommand(NewStartCommand()) + return command +} diff --git a/cmd/operator/webhook/start.go b/cmd/operator/webhook/start.go new file mode 100644 index 0000000000..23ef7ae481 --- /dev/null +++ b/cmd/operator/webhook/start.go @@ -0,0 +1,410 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "crypto/tls" + "flag" + "os" + "time" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + logzap "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook" + + sparkoperator "github.com/kubeflow/spark-operator" + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/internal/controller/mutatingwebhookconfiguration" + "github.com/kubeflow/spark-operator/internal/controller/validatingwebhookconfiguration" + "github.com/kubeflow/spark-operator/internal/webhook" + "github.com/kubeflow/spark-operator/pkg/certificate" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" + // +kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + logger = ctrl.Log.WithName("") +) + +var ( + namespaces []string + labelSelectorFilter string + + // Controller + controllerThreads int + cacheSyncTimeout time.Duration + + // Webhook + enableResourceQuotaEnforcement bool + webhookCertDir string + webhookCertName string + webhookKeyName string + mutatingWebhookName string + validatingWebhookName string + webhookPort int + webhookSecretName string + webhookSecretNamespace string + webhookServiceName string + webhookServiceNamespace string + + // Leader election + enableLeaderElection bool + leaderElectionLockName string + leaderElectionLockNamespace string + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + + // Metrics + enableMetrics bool + metricsBindAddress string + metricsEndpoint string + metricsPrefix string + metricsLabels []string + + healthProbeBindAddress string + secureMetrics bool + enableHTTP2 bool + development bool + zapOptions = logzap.Options{} +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(v1beta1.AddToScheme(scheme)) + utilruntime.Must(v1beta2.AddToScheme(scheme)) + // +kubebuilder:scaffold:scheme +} + +func NewStartCommand() *cobra.Command { + var command = &cobra.Command{ + Use: "start", + Short: "Start controller and webhook", + PreRun: func(_ *cobra.Command, args []string) { + development = viper.GetBool("development") + }, + Run: func(cmd *cobra.Command, args []string) { + sparkoperator.PrintVersion(false) + start() + }, + } + + command.Flags().IntVar(&controllerThreads, "controller-threads", 10, "Number of worker threads used by the SparkApplication controller.") + command.Flags().StringSliceVar(&namespaces, "namespaces", []string{"default"}, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.") + command.Flags().StringVar(&labelSelectorFilter, "label-selector-filter", "", "A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels.") + command.Flags().DurationVar(&cacheSyncTimeout, "cache-sync-timeout", 30*time.Second, "Informer cache sync timeout.") + + command.Flags().StringVar(&webhookCertDir, "webhook-cert-dir", "/etc/k8s-webhook-server/serving-certs", "The directory that contains the webhook server key and certificate") + command.Flags().StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", "The file name of webhook server certificate.") + command.Flags().StringVar(&webhookKeyName, "webhook-key-name", "tls.key", "The file name of webhook server key.") + command.Flags().StringVar(&mutatingWebhookName, "mutating-webhook-name", "spark-operator-webhook", "The name of the mutating webhook.") + command.Flags().StringVar(&validatingWebhookName, "validating-webhook-name", "spark-operator-webhook", "The name of the validating webhook.") + command.Flags().IntVar(&webhookPort, "webhook-port", 9443, "Service port of the webhook server.") + command.Flags().StringVar(&webhookSecretName, "webhook-secret-name", "spark-operator-webhook-certs", "The name of the secret that contains the webhook server's TLS certificate and key.") + command.Flags().StringVar(&webhookSecretNamespace, "webhook-secret-namespace", "spark-operator", "The namespace of the secret that contains the webhook server's TLS certificate and key.") + command.Flags().StringVar(&webhookServiceName, "webhook-svc-name", "spark-webhook", "The name of the Service for the webhook server.") + command.Flags().StringVar(&webhookServiceNamespace, "webhook-svc-namespace", "spark-webhook", "The name of the Service for the webhook server.") + command.Flags().BoolVar(&enableResourceQuotaEnforcement, "enable-resource-quota-enforcement", false, "Whether to enable ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled.") + + command.Flags().BoolVar(&enableLeaderElection, "leader-election", false, "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + command.Flags().StringVar(&leaderElectionLockName, "leader-election-lock-name", "spark-operator-lock", "Name of the ConfigMap for leader election.") + command.Flags().StringVar(&leaderElectionLockNamespace, "leader-election-lock-namespace", "spark-operator", "Namespace in which to create the ConfigMap for leader election.") + command.Flags().DurationVar(&leaderElectionLeaseDuration, "leader-election-lease-duration", 15*time.Second, "Leader election lease duration.") + command.Flags().DurationVar(&leaderElectionRenewDeadline, "leader-election-renew-deadline", 14*time.Second, "Leader election renew deadline.") + command.Flags().DurationVar(&leaderElectionRetryPeriod, "leader-election-retry-period", 4*time.Second, "Leader election retry period.") + + command.Flags().BoolVar(&enableMetrics, "enable-metrics", false, "Enable metrics.") + command.Flags().StringVar(&metricsBindAddress, "metrics-bind-address", "0", "The address the metric endpoint binds to. "+ + "Use the port :8080. If not set, it will be 0 in order to disable the metrics server") + command.Flags().StringVar(&metricsEndpoint, "metrics-endpoint", "/metrics", "Metrics endpoint.") + command.Flags().StringVar(&metricsPrefix, "metrics-prefix", "", "Prefix for the metrics.") + command.Flags().StringSliceVar(&metricsLabels, "metrics-labels", []string{}, "Labels to be added to the metrics.") + + command.Flags().StringVar(&healthProbeBindAddress, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + command.Flags().BoolVar(&secureMetrics, "secure-metrics", false, "If set the metrics endpoint is served securely") + command.Flags().BoolVar(&enableHTTP2, "enable-http2", false, "If set, HTTP/2 will be enabled for the metrics and webhook servers") + + flagSet := flag.NewFlagSet("controller", flag.ExitOnError) + ctrl.RegisterFlags(flagSet) + zapOptions.BindFlags(flagSet) + command.Flags().AddGoFlagSet(flagSet) + + return command +} + +func start() { + setupLog() + + // Create the client rest config. Use kubeConfig if given, otherwise assume in-cluster. + cfg, err := ctrl.GetConfig() + if err != nil { + logger.Error(err, "failed to get kube config") + os.Exit(1) + } + + // Create the manager. + tlsOptions := newTLSOptions() + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme, + Cache: newCacheOptions(), + Metrics: metricsserver.Options{ + BindAddress: metricsBindAddress, + SecureServing: secureMetrics, + TLSOpts: tlsOptions, + }, + WebhookServer: ctrlwebhook.NewServer(ctrlwebhook.Options{ + Port: webhookPort, + CertDir: webhookCertDir, + CertName: webhookCertName, + TLSOpts: tlsOptions, + }), + HealthProbeBindAddress: healthProbeBindAddress, + LeaderElection: enableLeaderElection, + LeaderElectionID: leaderElectionLockName, + LeaderElectionNamespace: leaderElectionLockNamespace, + // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily + // when the Manager ends. This requires the binary to immediately end when the + // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly + // speeds up voluntary leader transitions as the new leader don't have to wait + // LeaseDuration time first. + // + // In the default scaffold provided, the program ends immediately after + // the manager stops, so would be fine to enable this option. However, + // if you are doing or is intended to do any operation such as perform cleanups + // after the manager stops then its usage might be unsafe. + // LeaderElectionReleaseOnCancel: true, + }) + if err != nil { + logger.Error(err, "Failed to create manager") + os.Exit(1) + } + + client, err := client.New(cfg, client.Options{Scheme: mgr.GetScheme()}) + if err != nil { + logger.Error(err, "Failed to create client") + os.Exit(1) + } + + certProvider := certificate.NewProvider( + client, + webhookServiceName, + webhookServiceNamespace, + ) + + if err := wait.ExponentialBackoff( + wait.Backoff{ + Steps: 5, + Duration: 1 * time.Second, + Factor: 2.0, + Jitter: 0.1, + }, + func() (bool, error) { + logger.Info("Syncing webhook secret", "name", webhookSecretName, "namespace", webhookSecretNamespace) + if err := certProvider.SyncSecret(context.TODO(), webhookSecretName, webhookSecretNamespace); err != nil { + if errors.IsAlreadyExists(err) || errors.IsConflict(err) { + return false, nil + } + return false, err + } + return true, nil + }, + ); err != nil { + logger.Error(err, "Failed to sync webhook secret") + os.Exit(1) + } + + logger.Info("Writing certificates", "path", webhookCertDir, "certificate name", webhookCertName, "key name", webhookKeyName) + if err := certProvider.WriteFile(webhookCertDir, webhookCertName, webhookKeyName); err != nil { + logger.Error(err, "Failed to save certificate") + os.Exit(1) + } + + if err := mutatingwebhookconfiguration.NewReconciler( + mgr.GetClient(), + certProvider, + mutatingWebhookName, + ).SetupWithManager(mgr, controller.Options{}); err != nil { + logger.Error(err, "Failed to create controller", "controller", "MutatingWebhookConfiguration") + os.Exit(1) + } + + if err := validatingwebhookconfiguration.NewReconciler( + mgr.GetClient(), + certProvider, + validatingWebhookName, + ).SetupWithManager(mgr, controller.Options{}); err != nil { + logger.Error(err, "Failed to create controller", "controller", "ValidatingWebhookConfiguration") + os.Exit(1) + } + + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1beta2.SparkApplication{}). + WithDefaulter(webhook.NewSparkApplicationDefaulter()). + WithValidator(webhook.NewSparkApplicationValidator(mgr.GetClient(), enableResourceQuotaEnforcement)). + Complete(); err != nil { + logger.Error(err, "Failed to create mutating webhook for Spark application") + os.Exit(1) + } + + if err := ctrl.NewWebhookManagedBy(mgr). + For(&v1beta2.ScheduledSparkApplication{}). + WithDefaulter(webhook.NewScheduledSparkApplicationDefaulter()). + WithValidator(webhook.NewScheduledSparkApplicationValidator()). + Complete(); err != nil { + logger.Error(err, "Failed to create mutating webhook for Scheduled Spark application") + os.Exit(1) + } + + if err := ctrl.NewWebhookManagedBy(mgr). + For(&corev1.Pod{}). + WithDefaulter(webhook.NewSparkPodDefaulter(mgr.GetClient(), namespaces)). + Complete(); err != nil { + logger.Error(err, "Failed to create mutating webhook for Spark pod") + os.Exit(1) + } + + // +kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", mgr.GetWebhookServer().StartedChecker()); err != nil { + logger.Error(err, "Failed to set up health check") + os.Exit(1) + } + + if err := mgr.AddReadyzCheck("readyz", mgr.GetWebhookServer().StartedChecker()); err != nil { + logger.Error(err, "Failed to set up ready check") + os.Exit(1) + } + + logger.Info("Starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + logger.Error(err, "Failed to start manager") + os.Exit(1) + } +} + +// setupLog Configures the logging system +func setupLog() { + ctrl.SetLogger(logzap.New( + logzap.UseFlagOptions(&zapOptions), + func(o *logzap.Options) { + o.Development = development + }, func(o *logzap.Options) { + o.ZapOpts = append(o.ZapOpts, zap.AddCaller()) + }, func(o *logzap.Options) { + var config zapcore.EncoderConfig + if !development { + config = zap.NewProductionEncoderConfig() + } else { + config = zap.NewDevelopmentEncoderConfig() + } + config.EncodeLevel = zapcore.CapitalColorLevelEncoder + config.EncodeTime = zapcore.ISO8601TimeEncoder + config.EncodeCaller = zapcore.ShortCallerEncoder + o.Encoder = zapcore.NewConsoleEncoder(config) + }), + ) +} + +func newTLSOptions() []func(c *tls.Config) { + // if the enable-http2 flag is false (the default), http/2 should be disabled + // due to its vulnerabilities. More specifically, disabling http/2 will + // prevent from being vulnerable to the HTTP/2 Stream Cancellation and + // Rapid Reset CVEs. For more information see: + // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3 + // - https://github.com/advisories/GHSA-4374-p667-p6c8 + disableHTTP2 := func(c *tls.Config) { + logger.Info("disabling http/2") + c.NextProtos = []string{"http/1.1"} + } + + tlsOpts := []func(*tls.Config){} + if !enableHTTP2 { + tlsOpts = append(tlsOpts, disableHTTP2) + } + return tlsOpts +} + +// newCacheOptions creates and returns a cache.Options instance configured with default namespaces and object caching settings. +func newCacheOptions() cache.Options { + defaultNamespaces := make(map[string]cache.Config) + if util.ContainsString(namespaces, cache.AllNamespaces) { + defaultNamespaces[cache.AllNamespaces] = cache.Config{} + } else { + for _, ns := range namespaces { + defaultNamespaces[ns] = cache.Config{} + } + } + + byObject := map[client.Object]cache.ByObject{ + &corev1.Pod{}: { + Label: labels.SelectorFromSet(labels.Set{ + common.LabelLaunchedBySparkOperator: "true", + }), + }, + &v1beta2.SparkApplication{}: {}, + &v1beta2.ScheduledSparkApplication{}: {}, + &admissionregistrationv1.MutatingWebhookConfiguration{}: { + Field: fields.SelectorFromSet(fields.Set{ + "metadata.name": mutatingWebhookName, + }), + }, + &admissionregistrationv1.ValidatingWebhookConfiguration{}: { + Field: fields.SelectorFromSet(fields.Set{ + "metadata.name": validatingWebhookName, + }), + }, + } + + if enableResourceQuotaEnforcement { + byObject[&corev1.ResourceQuota{}] = cache.ByObject{} + } + + options := cache.Options{ + Scheme: scheme, + DefaultNamespaces: defaultNamespaces, + ByObject: byObject, + } + + return options +} diff --git a/codecov.yaml b/codecov.yaml new file mode 100644 index 0000000000..4e7d7af670 --- /dev/null +++ b/codecov.yaml @@ -0,0 +1,10 @@ +coverage: + status: + project: + default: + threshold: 0.1% + patch: + default: + target: 60% +ignore: + - "**/*_generated.*" diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml new file mode 100644 index 0000000000..fbfd47519a --- /dev/null +++ b/config/certmanager/certificate.yaml @@ -0,0 +1,35 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + labels: + app.kubernetes.io/name: certificate + app.kubernetes.io/instance: serving-cert + app.kubernetes.io/component: certificate + app.kubernetes.io/created-by: spark-operator + app.kubernetes.io/part-of: spark-operator + app.kubernetes.io/managed-by: kustomize + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize + dnsNames: + - SERVICE_NAME.SERVICE_NAMESPACE.svc + - SERVICE_NAME.SERVICE_NAMESPACE.svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml new file mode 100644 index 0000000000..bebea5a595 --- /dev/null +++ b/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- certificate.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 0000000000..cf6f89e889 --- /dev/null +++ b/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,8 @@ +# This configuration is for teaching kustomize how to update name ref substitution +nameReference: +- kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name diff --git a/config/crd/bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml b/config/crd/bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml index b37b7a0008..7f77e1bb92 100644 --- a/config/crd/bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml +++ b/config/crd/bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml @@ -36,6 +36,8 @@ spec: name: v1beta2 schema: openAPIV3Schema: + description: ScheduledSparkApplication is the Schema for the scheduledsparkapplications + API. properties: apiVersion: description: |- @@ -55,6 +57,8 @@ spec: metadata: type: object spec: + description: ScheduledSparkApplicationSpec defines the desired state of + ScheduledSparkApplication. properties: concurrencyPolicy: description: ConcurrencyPolicy is the policy governing concurrent @@ -4883,7 +4887,7 @@ spec: serviceLabels: additionalProperties: type: string - description: ServiceLables is a map of key,value pairs of + description: ServiceLabels is a map of key,value pairs of labels that might be added to the service object. type: object servicePort: @@ -9820,7 +9824,7 @@ spec: serviceLabels: additionalProperties: type: string - description: ServiceLables is a map of key,value pairs of + description: ServiceLabels is a map of key,value pairs of labels that might be added to the service object. type: object servicePort: @@ -11563,6 +11567,8 @@ spec: - template type: object status: + description: ScheduledSparkApplicationStatus defines the observed state + of ScheduledSparkApplication. properties: lastRun: description: LastRun is the time when the last run of the application @@ -11601,9 +11607,6 @@ spec: application. type: string type: object - required: - - metadata - - spec type: object served: true storage: true diff --git a/config/crd/bases/sparkoperator.k8s.io_sparkapplications.yaml b/config/crd/bases/sparkoperator.k8s.io_sparkapplications.yaml index c23d69264a..afc07c2530 100644 --- a/config/crd/bases/sparkoperator.k8s.io_sparkapplications.yaml +++ b/config/crd/bases/sparkoperator.k8s.io_sparkapplications.yaml @@ -36,8 +36,7 @@ spec: name: v1beta2 schema: openAPIV3Schema: - description: SparkApplication represents a Spark application running on and - using Kubernetes as a cluster manager. + description: SparkApplication is the Schema for the sparkapplications API properties: apiVersion: description: |- @@ -58,7 +57,7 @@ spec: type: object spec: description: |- - SparkApplicationSpec describes the specification of a Spark application using Kubernetes as a cluster manager. + SparkApplicationSpec defines the desired state of SparkApplication It carries every pieces of information a spark-submit command takes and recognizes. properties: arguments: @@ -4827,7 +4826,7 @@ spec: serviceLabels: additionalProperties: type: string - description: ServiceLables is a map of key,value pairs of labels + description: ServiceLabels is a map of key,value pairs of labels that might be added to the service object. type: object servicePort: @@ -9734,7 +9733,7 @@ spec: serviceLabels: additionalProperties: type: string - description: ServiceLables is a map of key,value pairs of labels + description: ServiceLabels is a map of key,value pairs of labels that might be added to the service object. type: object servicePort: @@ -11466,8 +11465,7 @@ spec: - type type: object status: - description: SparkApplicationStatus describes the current status of a - Spark application. + description: SparkApplicationStatus defines the observed state of SparkApplication properties: applicationState: description: AppState tells the overall application state. @@ -11487,6 +11485,8 @@ spec: podName: type: string webUIAddress: + description: UI Details for the UI created via ClusterIP service + accessible from within the cluster. type: string webUIIngressAddress: type: string @@ -11494,8 +11494,6 @@ spec: description: Ingress Details if an ingress for the UI was created. type: string webUIPort: - description: UI Details for the UI created via ClusterIP service - accessible from within the cluster. format: int32 type: integer webUIServiceName: @@ -11543,9 +11541,6 @@ spec: required: - driverInfo type: object - required: - - metadata - - spec type: object served: true storage: true diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 44fe0ace56..3d5605b3ea 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -2,23 +2,25 @@ # since it depends on service name and namespace that are out of this kustomize package. # It should be run by config/default resources: -- bases/sparkoperator.k8s.io_sparkapplications.yaml - bases/sparkoperator.k8s.io_scheduledsparkapplications.yaml +- bases/sparkoperator.k8s.io_sparkapplications.yaml # +kubebuilder:scaffold:crdkustomizeresource patches: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD +- path: patches/webhook_in_sparkapplications.yaml +- path: patches/webhook_in_sparkapplications.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD -#- path: patches/cainjection_in_sparkapplications.yaml #- path: patches/cainjection_in_scheduledsparkapplications.yaml +#- path: patches/cainjection_in_sparkapplications.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # [WEBHOOK] To enable webhook, uncomment the following section # the following config is for teaching kustomize how to do kustomization for CRDs. -#configurations: -#- kustomizeconfig.yaml +configurations: +- kustomizeconfig.yaml diff --git a/config/crd/patches/cainjection_in_sparkapplications.yaml b/config/crd/patches/cainjection_in_sparkapplications.yaml new file mode 100644 index 0000000000..80a2b6df86 --- /dev/null +++ b/config/crd/patches/cainjection_in_sparkapplications.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME + name: sparkapplications.sparkoperator.k8s.io diff --git a/config/crd/patches/webhook_in_sparkapplications.yaml b/config/crd/patches/webhook_in_sparkapplications.yaml new file mode 100644 index 0000000000..35f652608e --- /dev/null +++ b/config/crd/patches/webhook_in_sparkapplications.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: sparkapplications.sparkoperator.k8s.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml new file mode 100644 index 0000000000..738de350b7 --- /dev/null +++ b/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: webhook-server-cert diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml new file mode 100644 index 0000000000..58549af995 --- /dev/null +++ b/config/default/webhookcainjection_patch.yaml @@ -0,0 +1,25 @@ +# This patch add annotation to admission webhook config and +# CERTIFICATE_NAMESPACE and CERTIFICATE_NAME will be substituted by kustomize +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: mutating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/name: validatingwebhookconfiguration + app.kubernetes.io/instance: validating-webhook-configuration + app.kubernetes.io/component: webhook + app.kubernetes.io/created-by: spark-operator + app.kubernetes.io/part-of: spark-operator + app.kubernetes.io/managed-by: kustomize + name: validating-webhook-configuration + annotations: + cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml new file mode 100644 index 0000000000..4a9d1d526a --- /dev/null +++ b/config/rbac/role.yaml @@ -0,0 +1,130 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: spark-operator-controller +rules: +- resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update +- resources: + - events + verbs: + - create + - patch + - update +- resources: + - nodes + verbs: + - get +- resources: + - pods + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- resources: + - resourcequotas + verbs: + - get + - list + - watch +- resources: + - services + verbs: + - create + - delete + - get +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get +- apiGroups: + - extensions + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications/finalizers + verbs: + - update +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications/status + verbs: + - get + - patch + - update +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/finalizers + verbs: + - update +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/status + verbs: + - get + - patch + - update diff --git a/config/rbac/scheduledsparkapplication_editor_role.yaml b/config/rbac/scheduledsparkapplication_editor_role.yaml new file mode 100644 index 0000000000..5bae907302 --- /dev/null +++ b/config/rbac/scheduledsparkapplication_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit scheduledsparkapplications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: scheduledsparkapplication-editor-role +rules: +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications/status + verbs: + - get diff --git a/config/rbac/scheduledsparkapplication_viewer_role.yaml b/config/rbac/scheduledsparkapplication_viewer_role.yaml new file mode 100644 index 0000000000..29ee54184c --- /dev/null +++ b/config/rbac/scheduledsparkapplication_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view scheduledsparkapplications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: scheduledsparkapplication-viewer-role +rules: +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications + verbs: + - get + - list + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - scheduledsparkapplications/status + verbs: + - get diff --git a/config/rbac/sparkapplication_editor_role.yaml b/config/rbac/sparkapplication_editor_role.yaml new file mode 100644 index 0000000000..575c2be6e5 --- /dev/null +++ b/config/rbac/sparkapplication_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit sparkapplications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: sparkapplication-editor-role +rules: +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/status + verbs: + - get diff --git a/config/rbac/sparkapplication_viewer_role.yaml b/config/rbac/sparkapplication_viewer_role.yaml new file mode 100644 index 0000000000..4738d708d3 --- /dev/null +++ b/config/rbac/sparkapplication_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view sparkapplications. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: sparkapplication-viewer-role +rules: +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications + verbs: + - get + - list + - watch +- apiGroups: + - sparkoperator.k8s.io + resources: + - sparkapplications/status + verbs: + - get diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml new file mode 100644 index 0000000000..e139c240b2 --- /dev/null +++ b/config/samples/kustomization.yaml @@ -0,0 +1,7 @@ +## Append samples of your project ## +resources: +- v1beta1_sparkapplication.yaml +- v1beta1_scheduledsparkapplication.yaml +- v1beta2_sparkapplication.yaml +- v1beta2_scheduledsparkapplication.yaml +# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/v1beta1_scheduledsparkapplication.yaml b/config/samples/v1beta1_scheduledsparkapplication.yaml new file mode 100644 index 0000000000..bc628f4892 --- /dev/null +++ b/config/samples/v1beta1_scheduledsparkapplication.yaml @@ -0,0 +1,9 @@ +apiVersion: sparkoperator.k8s.io/v1beta1 +kind: ScheduledSparkApplication +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: scheduledsparkapplication-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/v1beta1_sparkapplication.yaml b/config/samples/v1beta1_sparkapplication.yaml new file mode 100644 index 0000000000..d6c3e25b47 --- /dev/null +++ b/config/samples/v1beta1_sparkapplication.yaml @@ -0,0 +1,23 @@ +apiVersion: sparkoperator.k8s.io/v1beta1 +kind: SparkApplication +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: sparkapplication-sample +spec: + type: Scala + mode: cluster + image: spark:3.5.0 + imagePullPolicy: IfNotPresent + mainClass: org.apache.spark.examples.SparkPi + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 + driver: + labels: + version: 3.5.0 + serviceAccount: spark-operator-spark + executor: + labels: + version: 3.5.0 + instances: 1 diff --git a/config/samples/v1beta2_scheduledsparkapplication.yaml b/config/samples/v1beta2_scheduledsparkapplication.yaml new file mode 100644 index 0000000000..294430f576 --- /dev/null +++ b/config/samples/v1beta2_scheduledsparkapplication.yaml @@ -0,0 +1,34 @@ +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: ScheduledSparkApplication +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: scheduledsparkapplication-sample +spec: + schedule: "@every 3m" + concurrencyPolicy: Allow + template: + type: Scala + mode: cluster + image: spark:3.5.0 + imagePullPolicy: IfNotPresent + mainClass: org.apache.spark.examples.SparkPi + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 + restartPolicy: + type: Never + driver: + labels: + version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m + serviceAccount: spark-operator-spark + executor: + labels: + version: 3.5.0 + instances: 1 + cores: 1 + coreLimit: 1200m + memory: 512m diff --git a/config/samples/v1beta2_sparkapplication.yaml b/config/samples/v1beta2_sparkapplication.yaml new file mode 100644 index 0000000000..70f4152b99 --- /dev/null +++ b/config/samples/v1beta2_sparkapplication.yaml @@ -0,0 +1,23 @@ +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: SparkApplication +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: sparkapplication-sample +spec: + type: Scala + mode: cluster + image: spark:3.5.0 + imagePullPolicy: IfNotPresent + mainClass: org.apache.spark.examples.SparkPi + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 + driver: + labels: + version: 3.5.0 + serviceAccount: spark-operator-spark + executor: + labels: + version: 3.5.0 + instances: 1 diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml new file mode 100644 index 0000000000..9cf26134e4 --- /dev/null +++ b/config/webhook/kustomization.yaml @@ -0,0 +1,6 @@ +resources: +- manifests.yaml +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml new file mode 100644 index 0000000000..206316e54f --- /dev/null +++ b/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,22 @@ +# the following config is for teaching kustomize where to look at when substituting nameReference. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml new file mode 100644 index 0000000000..d98b6ec080 --- /dev/null +++ b/config/webhook/manifests.yaml @@ -0,0 +1,119 @@ +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: mutating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate--v1-pod + failurePolicy: Fail + matchPolicy: Exact + name: mutate-pod.sparkoperator.k8s.io + reinvocationPolicy: Never + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - pods + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-sparkoperator-k8s-io-v1beta2-sparkapplication + failurePolicy: Fail + matchPolicy: Exact + name: mutate-sparkapplication.sparkoperator.k8s.io + reinvocationPolicy: Never + rules: + - apiGroups: + - sparkoperator.k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - sparkapplications + sideEffects: None +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validating-webhook-configuration +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-sparkoperator-k8s-io-v1beta2-sparkapplication + failurePolicy: Fail + matchPolicy: Exact + name: mutate-scheduledsparkapplication.sparkoperator.k8s.io + rules: + - apiGroups: + - sparkoperator.k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - scheduledsparkapplications + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-sparkoperator-k8s-io-v1beta2-scheduledsparkapplication + failurePolicy: Fail + matchPolicy: Exact + name: validate-scheduledsparkapplication.sparkoperator.k8s.io + rules: + - apiGroups: + - sparkoperator.k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - scheduledsparkapplications + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-sparkoperator-k8s-io-v1beta2-sparkapplication + failurePolicy: Fail + matchPolicy: Exact + name: validate-sparkapplication.sparkoperator.k8s.io + rules: + - apiGroups: + - sparkoperator.k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - sparkapplications + sideEffects: None diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml new file mode 100644 index 0000000000..f171f47f7f --- /dev/null +++ b/config/webhook/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: spark-operator + app.kubernetes.io/managed-by: kustomize + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + protocol: TCP + targetPort: 9443 + selector: + control-plane: controller-manager diff --git a/docs/api-docs.md b/docs/api-docs.md index 69695fc062..6117b6a23b 100644 --- a/docs/api-docs.md +++ b/docs/api-docs.md @@ -9,14 +9,14 @@

Package v1beta2 is the v1beta2 version of the API.

Resource Types: - -

ScheduledSparkApplication +
    +

    ApplicationState

    +

    +(Appears on:SparkApplicationStatus) +

    +

    ApplicationState tells the current state of the application and an error message in case of failures.

    @@ -28,145 +28,275 @@ Resource Types: - - - - + +
    -apiVersion
    -string
    - -sparkoperator.k8s.io/v1beta2 - +state
    + + +ApplicationStateType + +
    -kind
    -string
    ScheduledSparkApplication
    -metadata
    +errorMessage
    - -Kubernetes meta/v1.ObjectMeta - +string
    -Refer to the Kubernetes API documentation for the fields of the -metadata field.
    +

    ApplicationStateType +(string alias)

    +

    +(Appears on:ApplicationState) +

    +
    +

    ApplicationStateType represents the type of the current state of an application.

    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    ValueDescription

    "COMPLETED"

    "FAILED"

    "SUBMISSION_FAILED"

    "FAILING"

    "INVALIDATING"

    ""

    "PENDING_RERUN"

    "RUNNING"

    "SUBMITTED"

    "SUCCEEDING"

    "UNKNOWN"

    +

    BatchSchedulerConfiguration +

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    BatchSchedulerConfiguration used to configure how to batch scheduling Spark Application

    +
    + + + + + + + + + +
    FieldDescription
    -spec
    +queue
    - -ScheduledSparkApplicationSpec - +string
    -
    -
    - +(Optional) +

    Queue stands for the resource queue which the application belongs to, it’s being used in Volcano batch scheduler.

    + + + +
    -schedule
    +priorityClassName
    string
    -

    Schedule is a cron schedule on which the application should run.

    +(Optional) +

    PriorityClassName stands for the name of k8s PriorityClass resource, it’s being used in Volcano batch scheduler.

    -template
    +resources
    - -SparkApplicationSpec + +Kubernetes core/v1.ResourceList
    -

    Template is a template from which SparkApplication instances can be created.

    +(Optional) +

    Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. +If specified, volcano scheduler will consider it as the resources requested.

    +

    ConcurrencyPolicy +(string alias)

    +

    +(Appears on:ScheduledSparkApplicationSpec) +

    +
    +
    + + + + + + + + + + + + + + +
    ValueDescription

    "Allow"

    ConcurrencyAllow allows SparkApplications to run concurrently.

    +

    "Forbid"

    ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous +one hasn’t finished yet.

    +

    "Replace"

    ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one.

    +
    +

    Dependencies +

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    Dependencies specifies all possible types of dependencies of a Spark application.

    +
    + + + + + + + + -
    FieldDescription
    -suspend
    +jars
    -bool +[]string
    (Optional) -

    Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. -Defaults to false.

    +

    Jars is a list of JAR files the Spark application depends on.

    -concurrencyPolicy
    +files
    - -ConcurrencyPolicy - +[]string
    -

    ConcurrencyPolicy is the policy governing concurrent SparkApplication runs.

    +(Optional) +

    Files is a list of files the Spark application depends on.

    -successfulRunHistoryLimit
    +pyFiles
    -int32 +[]string
    (Optional) -

    SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. -Defaults to 1.

    +

    PyFiles is a list of Python files the Spark application depends on.

    -failedRunHistoryLimit
    +packages
    -int32 +[]string
    (Optional) -

    FailedRunHistoryLimit is the number of past failed runs of the application to keep. -Defaults to 1.

    +

    Packages is a list of maven coordinates of jars to include on the driver and executor +classpaths. This will search the local maven repo, then maven central and any additional +remote repositories given by the “repositories” option. +Each package should be of the form “groupId:artifactId:version”.

    +
    +excludePackages
    + +[]string + +
    +(Optional) +

    ExcludePackages is a list of “groupId:artifactId”, to exclude while resolving the +dependencies provided in Packages to avoid dependency conflicts.

    -status
    +repositories
    - -ScheduledSparkApplicationStatus - +[]string
    +(Optional) +

    Repositories is a list of additional remote repositories to search for the maven coordinate +given with the “packages” option.

    -

    SparkApplication +

    DeployMode +(string alias)

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    DeployMode describes the type of deployment of a Spark application.

    +
    + + + + + + + + + + + + + + +
    ValueDescription

    "client"

    "cluster"

    "in-cluster-client"

    +

    DriverInfo

    +

    +(Appears on:SparkApplicationStatus) +

    -

    SparkApplication represents a Spark application running on and using Kubernetes as a cluster manager.

    +

    DriverInfo captures information about the driver.

    @@ -178,520 +308,547 @@ ScheduledSparkApplicationStatus - - - - - - - - - - -
    -apiVersion
    -string
    - -sparkoperator.k8s.io/v1beta2 - -
    -kind
    -string -
    SparkApplication
    -metadata
    +webUIServiceName
    - -Kubernetes meta/v1.ObjectMeta - +string
    -Refer to the Kubernetes API documentation for the fields of the -metadata field.
    -spec
    - - -SparkApplicationSpec - - -
    -
    -
    - - - + +
    -type
    +webUIAddress
    - -SparkApplicationType - +string
    -

    Type tells the type of the Spark application.

    +

    UI Details for the UI created via ClusterIP service accessible from within the cluster.

    -sparkVersion
    +webUIPort
    -string +int32
    -

    SparkVersion is the version of Spark the application uses.

    -mode
    +webUIIngressName
    - -DeployMode - +string
    -

    Mode is the deployment mode of the Spark application.

    +

    Ingress Details if an ingress for the UI was created.

    -proxyUser
    +webUIIngressAddress
    string
    -(Optional) -

    ProxyUser specifies the user to impersonate when submitting the application. -It maps to the command-line flag “–proxy-user” in spark-submit.

    -image
    +podName
    string
    -(Optional) -

    Image is the container image for the driver, executor, and init-container. Any custom container images for the -driver, executor, or init-container takes precedence over this.

    +

    DriverIngressConfiguration +

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    DriverIngressConfiguration is for driver ingress specific configuration parameters.

    +
    + + + + + + + + + +
    FieldDescription
    -imagePullPolicy
    +servicePort
    -string +int32
    -(Optional) -

    ImagePullPolicy is the image pull policy for the driver, executor, and init-container.

    +

    ServicePort allows configuring the port at service level that might be different from the targetPort.

    -imagePullSecrets
    +servicePortName
    -[]string +string
    -(Optional) -

    ImagePullSecrets is the list of image-pull secrets.

    +

    ServicePortName allows configuring the name of the service port. +This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP.

    -mainClass
    +serviceType
    -string + +Kubernetes core/v1.ServiceType +
    (Optional) -

    MainClass is the fully-qualified main class of the Spark application. -This only applies to Java/Scala Spark applications.

    +

    ServiceType allows configuring the type of the service. Defaults to ClusterIP.

    -mainApplicationFile
    +serviceAnnotations
    -string +map[string]string
    (Optional) -

    MainFile is the path to a bundled JAR, Python, or R file of the application.

    +

    ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object.

    -arguments
    +serviceLabels
    -[]string +map[string]string
    (Optional) -

    Arguments is a list of arguments to be passed to the application.

    +

    ServiceLabels is a map of key,value pairs of labels that might be added to the service object.

    -sparkConf
    +ingressURLFormat
    -map[string]string +string
    -(Optional) -

    SparkConf carries user-specified Spark configuration properties as they would use the “–conf” option in -spark-submit.

    +

    IngressURLFormat is the URL for the ingress.

    -hadoopConf
    +ingressAnnotations
    map[string]string
    (Optional) -

    HadoopConf carries user-specified Hadoop configuration properties as they would use the the “–conf” option -in spark-submit. The SparkApplication controller automatically adds prefix “spark.hadoop.” to Hadoop -configuration properties.

    +

    IngressAnnotations is a map of key,value pairs of annotations that might be added to the ingress object. i.e. specify nginx as ingress.class

    -sparkConfigMap
    +ingressTLS
    -string + +[]Kubernetes networking/v1.IngressTLS +
    (Optional) -

    SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. -The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to.

    +

    TlsHosts is useful If we need to declare SSL certificates to the ingress object

    +

    DriverSpec +

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    DriverSpec is specification of the driver.

    +
    + + + + + + + + + +
    FieldDescription
    -hadoopConfigMap
    +SparkPodSpec
    -string + +SparkPodSpec +
    -(Optional) -

    HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. -The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to.

    +

    +(Members of SparkPodSpec are embedded into this type.) +

    -volumes
    +podName
    - -[]Kubernetes core/v1.Volume - +string
    (Optional) -

    Volumes is the list of Kubernetes volumes that can be mounted by the driver and/or executors.

    +

    PodName is the name of the driver pod that the user creates. This is used for the +in-cluster client mode in which the user creates a client pod where the driver of +the user application runs. It’s an error to set this field if Mode is not +in-cluster-client.

    -driver
    +coreRequest
    - -DriverSpec - +string
    -

    Driver is the driver specification.

    +(Optional) +

    CoreRequest is the physical CPU core request for the driver. +Maps to spark.kubernetes.driver.request.cores that is available since Spark 3.0.

    -executor
    +javaOptions
    - -ExecutorSpec - +string
    -

    Executor is the executor specification.

    +(Optional) +

    JavaOptions is a string of extra JVM options to pass to the driver. For instance, +GC settings or other logging.

    -deps
    +lifecycle
    - -Dependencies + +Kubernetes core/v1.Lifecycle
    (Optional) -

    Deps captures all possible types of dependencies of a Spark application.

    +

    Lifecycle for running preStop or postStart commands

    -restartPolicy
    +kubernetesMaster
    - -RestartPolicy - +string
    -

    RestartPolicy defines the policy on if and in which conditions the controller should restart an application.

    +(Optional) +

    KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and +other Kubernetes resources. Default to https://kubernetes.default.svc.

    -nodeSelector
    +serviceAnnotations
    map[string]string
    (Optional) -

    NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. -This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). -This field will be deprecated in future versions (at SparkApplicationSpec level).

    +

    ServiceAnnotations defines the annotations to be added to the Kubernetes headless service used by +executors to connect to the driver.

    -failureRetries
    +serviceLabels
    -int32 +map[string]string
    (Optional) -

    FailureRetries is the number of times to retry a failed application before giving up. -This is best effort and actual retry attempts can be >= the value specified.

    +

    ServiceLabels defines the labels to be added to the Kubernetes headless service used by +executors to connect to the driver.

    -retryInterval
    +ports
    -int64 + +[]Port +
    (Optional) -

    RetryInterval is the unit of intervals in seconds between submission retries.

    +

    Ports settings for the pods, following the Kubernetes specifications.

    +

    DriverState +(string alias)

    +
    +

    DriverState tells the current state of a spark driver.

    +
    + + + + + + + + + + + + + + + + + + +
    ValueDescription

    "COMPLETED"

    "FAILED"

    "PENDING"

    "RUNNING"

    "UNKNOWN"

    +

    DynamicAllocation +

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    DynamicAllocation contains configuration options for dynamic allocation.

    +
    + + + + + + + + + +
    FieldDescription
    -pythonVersion
    +enabled
    -string +bool
    -(Optional) -

    This sets the major Python version of the docker -image used to run the driver and executor containers. Can either be 2 or 3, default 2.

    +

    Enabled controls whether dynamic allocation is enabled or not.

    -memoryOverheadFactor
    +initialExecutors
    -string +int32
    (Optional) -

    This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. -For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will -be overridden by Spec.Driver.MemoryOverhead and Spec.Executor.MemoryOverhead if they are set.

    +

    InitialExecutors is the initial number of executors to request. If .spec.executor.instances +is also set, the initial number of executors is set to the bigger of that and this option.

    -monitoring
    +minExecutors
    - -MonitoringSpec - +int32
    (Optional) -

    Monitoring configures how monitoring is handled.

    +

    MinExecutors is the lower bound for the number of executors if dynamic allocation is enabled.

    -batchScheduler
    +maxExecutors
    -string +int32
    (Optional) -

    BatchScheduler configures which batch scheduler will be used for scheduling

    +

    MaxExecutors is the upper bound for the number of executors if dynamic allocation is enabled.

    -timeToLiveSeconds
    +shuffleTrackingTimeout
    int64
    (Optional) -

    TimeToLiveSeconds defines the Time-To-Live (TTL) duration in seconds for this SparkApplication -after its termination. -The SparkApplication object will be garbage collected if the current time is more than the -TimeToLiveSeconds since its termination.

    +

    ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding +shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled).

    +

    ExecutorSpec +

    +

    +(Appears on:SparkApplicationSpec) +

    +
    +

    ExecutorSpec is specification of the executor.

    +
    + + + + + + + + - -
    FieldDescription
    -batchSchedulerOptions
    +SparkPodSpec
    - -BatchSchedulerConfiguration + +SparkPodSpec
    -(Optional) -

    BatchSchedulerOptions provides fine-grained control on how to batch scheduling.

    +

    +(Members of SparkPodSpec are embedded into this type.) +

    -sparkUIOptions
    +instances
    - -SparkUIConfiguration - +int32
    (Optional) -

    SparkUIOptions allows configuring the Service and the Ingress to expose the sparkUI

    +

    Instances is the number of executor instances.

    -driverIngressOptions
    +coreRequest
    - -[]DriverIngressConfiguration - +string
    (Optional) -

    DriverIngressOptions allows configuring the Service and the Ingress to expose ports inside Spark Driver

    +

    CoreRequest is the physical CPU core request for the executors. +Maps to spark.kubernetes.executor.request.cores that is available since Spark 2.4.

    -dynamicAllocation
    +javaOptions
    - -DynamicAllocation - +string
    (Optional) -

    DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes -scheduler backend since Spark 3.0.

    -
    +

    JavaOptions is a string of extra JVM options to pass to the executors. For instance, +GC settings or other logging.

    -status
    +lifecycle
    - -SparkApplicationStatus + +Kubernetes core/v1.Lifecycle
    +(Optional) +

    Lifecycle for running preStop or postStart commands

    -

    ApplicationState -

    -

    -(Appears on:SparkApplicationStatus) -

    -
    -

    ApplicationState tells the current state of the application and an error message in case of failures.

    -
    - - - - - - - -
    FieldDescription
    -state
    +deleteOnTermination
    - -ApplicationStateType - +bool
    +(Optional) +

    DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. +Maps to spark.kubernetes.executor.deleteOnTermination that is available since Spark 3.0.

    -errorMessage
    +ports
    -string + +[]Port +
    +(Optional) +

    Ports settings for the pods, following the Kubernetes specifications.

    -

    ApplicationStateType +

    ExecutorState (string alias)

    -(Appears on:ApplicationState) +(Appears on:SparkApplicationStatus)

    -

    ApplicationStateType represents the type of the current state of an application.

    +

    ExecutorState tells the current state of an executor.

    @@ -704,33 +861,20 @@ string - - - - - - - - - + - - - -

    "FAILED"

    "SUBMISSION_FAILED"

    "FAILING"

    "INVALIDATING"

    ""

    "PENDING_RERUN"

    "PENDING"

    "RUNNING"

    "SUBMITTED"

    "SUCCEEDING"

    "UNKNOWN"

    -

    BatchSchedulerConfiguration +

    GPUSpec

    -(Appears on:SparkApplicationSpec) +(Appears on:SparkPodSpec)

    -

    BatchSchedulerConfiguration used to configure how to batch scheduling Spark Application

    @@ -742,78 +886,35 @@ string - - - -
    -queue
    - -string - -
    -(Optional) -

    Queue stands for the resource queue which the application belongs to, it’s being used in Volcano batch scheduler.

    -
    -priorityClassName
    +name
    string
    -(Optional) -

    PriorityClassName stands for the name of k8s PriorityClass resource, it’s being used in Volcano batch scheduler.

    +

    Name is GPU resource name, such as: nvidia.com/gpu or amd.com/gpu

    -resources
    +quantity
    - -Kubernetes core/v1.ResourceList - +int64
    -(Optional) -

    Resources stands for the resource list custom request for. Usually it is used to define the lower-bound limit. -If specified, volcano scheduler will consider it as the resources requested.

    +

    Quantity is the number of GPUs to request for driver or executor.

    -

    ConcurrencyPolicy -(string alias)

    -

    -(Appears on:ScheduledSparkApplicationSpec) -

    -
    -
    - - - - - - - - - - - - - - -
    ValueDescription

    "Allow"

    ConcurrencyAllow allows SparkApplications to run concurrently.

    -

    "Forbid"

    ConcurrencyForbid forbids concurrent runs of SparkApplications, skipping the next run if the previous -one hasn’t finished yet.

    -

    "Replace"

    ConcurrencyReplace kills the currently running SparkApplication instance and replaces it with a new one.

    -
    -

    Dependencies +

    MonitoringSpec

    (Appears on:SparkApplicationSpec)

    -

    Dependencies specifies all possible types of dependencies of a Spark application.

    +

    MonitoringSpec defines the monitoring specification.

    @@ -825,113 +926,75 @@ one hasn’t finished yet.

    - - - -
    -jars
    - -[]string - -
    -(Optional) -

    Jars is a list of JAR files the Spark application depends on.

    -
    -files
    +exposeDriverMetrics
    -[]string +bool
    -(Optional) -

    Files is a list of files the Spark application depends on.

    +

    ExposeDriverMetrics specifies whether to expose metrics on the driver.

    -pyFiles
    +exposeExecutorMetrics
    -[]string +bool
    -(Optional) -

    PyFiles is a list of Python files the Spark application depends on.

    +

    ExposeExecutorMetrics specifies whether to expose metrics on the executors.

    -packages
    +metricsProperties
    -[]string +string
    (Optional) -

    Packages is a list of maven coordinates of jars to include on the driver and executor -classpaths. This will search the local maven repo, then maven central and any additional -remote repositories given by the “repositories” option. -Each package should be of the form “groupId:artifactId:version”.

    +

    MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. +If not specified, the content in spark-docker/conf/metrics.properties will be used.

    -excludePackages
    +metricsPropertiesFile
    -[]string +string
    (Optional) -

    ExcludePackages is a list of “groupId:artifactId”, to exclude while resolving the -dependencies provided in Packages to avoid dependency conflicts.

    +

    MetricsPropertiesFile is the container local path of file metrics.properties for configuring +the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used.

    -repositories
    +prometheus
    -[]string + +PrometheusSpec +
    (Optional) -

    Repositories is a list of additional remote repositories to search for the maven coordinate -given with the “packages” option.

    +

    Prometheus is for configuring the Prometheus JMX exporter.

    -

    DeployMode -(string alias)

    -

    -(Appears on:SparkApplicationSpec) -

    -
    -

    DeployMode describes the type of deployment of a Spark application.

    -
    - - - - - - - - - - - - - - -
    ValueDescription

    "client"

    "cluster"

    "in-cluster-client"

    -

    DriverInfo +

    NameKey

    -(Appears on:SparkApplicationStatus) +(Appears on:SparkPodSpec)

    -

    DriverInfo captures information about the driver.

    +

    NameKey represents the name and key of a SecretKeyRef.

    @@ -939,32 +1002,21 @@ given with the “packages” option.

    - - - - - - + + + +
    Field Description
    -webUIServiceName
    - -string - -
    -
    -webUIPort
    +name
    -int32 +string
    -

    UI Details for the UI created via ClusterIP service accessible from within the cluster.

    -webUIAddress
    +key
    string @@ -972,20 +1024,27 @@ string
    +

    NamePath +

    +

    +(Appears on:SparkPodSpec) +

    +
    +

    NamePath is a pair of a name and a path to which the named objects should be mounted to.

    +
    + + - - + + + +
    -webUIIngressName
    - -string - -
    -

    Ingress Details if an ingress for the UI was created.

    -
    FieldDescription
    -webUIIngressAddress
    +name
    string @@ -995,7 +1054,7 @@ string
    -podName
    +path
    string @@ -1005,13 +1064,13 @@ string
    -

    DriverIngressConfiguration +

    Port

    -(Appears on:SparkApplicationSpec) +(Appears on:DriverSpec, ExecutorSpec)

    -

    DriverIngressConfiguration is for driver ingress specific configuration parameters.

    +

    Port represents the port definition in the pods objects.

    @@ -1023,111 +1082,127 @@ string + +
    -servicePort
    +name
    -int32 +string
    -

    ServicePort allows configuring the port at service level that might be different from the targetPort.

    -servicePortName
    +protocol
    string
    -

    ServicePortName allows configuring the name of the service port. -This may be useful for sidecar proxies like Envoy injected by Istio which require specific ports names to treat traffic as proper HTTP.

    -serviceType
    +containerPort
    - -Kubernetes core/v1.ServiceType - +int32
    -(Optional) -

    ServiceType allows configuring the type of the service. Defaults to ClusterIP.

    +

    PrometheusSpec +

    +

    +(Appears on:MonitoringSpec) +

    +
    +

    PrometheusSpec defines the Prometheus specification when Prometheus is to be used for +collecting and exposing metrics.

    +
    + + + + + + + +
    FieldDescription
    -serviceAnnotations
    +jmxExporterJar
    -map[string]string +string
    -(Optional) -

    ServiceAnnotations is a map of key,value pairs of annotations that might be added to the service object.

    +

    JmxExporterJar is the path to the Prometheus JMX exporter jar in the container.

    -serviceLabels
    +port
    -map[string]string +int32
    (Optional) -

    ServiceLables is a map of key,value pairs of labels that might be added to the service object.

    +

    Port is the port of the HTTP server run by the Prometheus JMX exporter. +If not specified, 8090 will be used as the default.

    -ingressURLFormat
    +portName
    string
    -

    IngressURLFormat is the URL for the ingress.

    +(Optional) +

    PortName is the port name of prometheus JMX exporter port. +If not specified, jmx-exporter will be used as the default.

    -ingressAnnotations
    +configFile
    -map[string]string +string
    (Optional) -

    IngressAnnotations is a map of key,value pairs of annotations that might be added to the ingress object. i.e. specify nginx as ingress.class

    +

    ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. +ConfigFile takes precedence over Configuration, which is shown below.

    -ingressTLS
    +configuration
    - -[]Kubernetes networking/v1.IngressTLS - +string
    (Optional) -

    TlsHosts is useful If we need to declare SSL certificates to the ingress object

    +

    Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. +If not specified, the content in spark-docker/conf/prometheus.yaml will be used. +Configuration has no effect if ConfigFile is set.

    -

    DriverSpec +

    RestartPolicy

    (Appears on:SparkApplicationSpec)

    -

    DriverSpec is specification of the driver.

    +

    RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. +This completely defines actions to be taken on any kind of Failures during an application run.

    @@ -1139,239 +1214,255 @@ map[string]string + +
    -SparkPodSpec
    +type
    - -SparkPodSpec + +RestartPolicyType
    -

    -(Members of SparkPodSpec are embedded into this type.) -

    +

    Type specifies the RestartPolicyType.

    -podName
    +onSubmissionFailureRetries
    -string +int32
    (Optional) -

    PodName is the name of the driver pod that the user creates. This is used for the -in-cluster client mode in which the user creates a client pod where the driver of -the user application runs. It’s an error to set this field if Mode is not -in-cluster-client.

    +

    OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. +This is best effort and actual retry attempts can be >= the value specified due to caching. +These are required if RestartPolicy is OnFailure.

    -coreRequest
    +onFailureRetries
    -string +int32
    (Optional) -

    CoreRequest is the physical CPU core request for the driver. -Maps to spark.kubernetes.driver.request.cores that is available since Spark 3.0.

    +

    OnFailureRetries the number of times to retry running an application before giving up.

    -javaOptions
    +onSubmissionFailureRetryInterval
    -string +int64
    (Optional) -

    JavaOptions is a string of extra JVM options to pass to the driver. For instance, -GC settings or other logging.

    +

    OnSubmissionFailureRetryInterval is the interval in seconds between retries on failed submissions.

    -lifecycle
    +onFailureRetryInterval
    - -Kubernetes core/v1.Lifecycle - +int64
    (Optional) -

    Lifecycle for running preStop or postStart commands

    +

    OnFailureRetryInterval is the interval in seconds between retries on failed runs.

    +

    RestartPolicyType +(string alias)

    +

    +(Appears on:RestartPolicy) +

    +
    +
    + + + + + + + + + + + + + + +
    ValueDescription

    "Always"

    "Never"

    "OnFailure"

    +

    ScheduleState +(string alias)

    +

    +(Appears on:ScheduledSparkApplicationStatus) +

    +
    +
    + + + + + + + + + + + + + + + + +
    ValueDescription

    "FailedValidation"

    ""

    "Scheduled"

    "Validating"

    +

    ScheduledSparkApplication +

    +
    +

    ScheduledSparkApplication is the Schema for the scheduledsparkapplications API.

    +
    + + + + + + + + - +
    +
    +
    FieldDescription
    -kubernetesMaster
    +metadata
    -string + +Kubernetes meta/v1.ObjectMeta +
    -(Optional) -

    KubernetesMaster is the URL of the Kubernetes master used by the driver to manage executor pods and -other Kubernetes resources. Default to https://kubernetes.default.svc.

    +Refer to the Kubernetes API documentation for the fields of the +metadata field.
    -serviceAnnotations
    +spec
    -map[string]string + +ScheduledSparkApplicationSpec +
    -(Optional) -

    ServiceAnnotations defines the annotations to be added to the Kubernetes headless service used by -executors to connect to the driver.

    -
    - - -
    -serviceLabels
    +schedule
    -map[string]string +string
    -(Optional) -

    ServiceLabels defines the labels to be added to the Kubernetes headless service used by -executors to connect to the driver.

    +
    +

    Schedule is a cron schedule on which the application should run.

    -ports
    +template
    - -[]Port + +SparkApplicationSpec
    -(Optional) -

    Ports settings for the pods, following the Kubernetes specifications.

    +

    Template is a template from which SparkApplication instances can be created.

    -

    DriverState -(string alias)

    -
    -

    DriverState tells the current state of a spark driver.

    -
    - - - - - - - - - - - - - - - - - - -
    ValueDescription

    "COMPLETED"

    "FAILED"

    "PENDING"

    "RUNNING"

    "UNKNOWN"

    -

    DynamicAllocation -

    -

    -(Appears on:SparkApplicationSpec) -

    -
    -

    DynamicAllocation contains configuration options for dynamic allocation.

    -
    - - - - - - - - + +
    FieldDescription
    -enabled
    +suspend
    bool
    -

    Enabled controls whether dynamic allocation is enabled or not.

    +(Optional) +

    Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. +Defaults to false.

    -initialExecutors
    +concurrencyPolicy
    -int32 + +ConcurrencyPolicy +
    -(Optional) -

    InitialExecutors is the initial number of executors to request. If .spec.executor.instances -is also set, the initial number of executors is set to the bigger of that and this option.

    +

    ConcurrencyPolicy is the policy governing concurrent SparkApplication runs.

    -minExecutors
    +successfulRunHistoryLimit
    int32
    (Optional) -

    MinExecutors is the lower bound for the number of executors if dynamic allocation is enabled.

    +

    SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. +Defaults to 1.

    -maxExecutors
    +failedRunHistoryLimit
    int32
    (Optional) -

    MaxExecutors is the upper bound for the number of executors if dynamic allocation is enabled.

    +

    FailedRunHistoryLimit is the number of past failed runs of the application to keep. +Defaults to 1.

    +
    -shuffleTrackingTimeout
    +status
    -int64 + +ScheduledSparkApplicationStatus + -(Optional) -

    ShuffleTrackingTimeout controls the timeout in milliseconds for executors that are holding -shuffle data if shuffle tracking is enabled (true by default if dynamic allocation is enabled).

    -

    ExecutorSpec +

    ScheduledSparkApplicationSpec

    -(Appears on:SparkApplicationSpec) +(Appears on:ScheduledSparkApplication)

    -

    ExecutorSpec is specification of the executor.

    +

    ScheduledSparkApplicationSpec defines the desired state of ScheduledSparkApplication.

    @@ -1383,133 +1474,89 @@ shuffle data if shuffle tracking is enabled (true by default if dynamic allocati - - - -
    -SparkPodSpec
    - - -SparkPodSpec - - -
    -

    -(Members of SparkPodSpec are embedded into this type.) -

    -
    -instances
    +schedule
    -int32 +string
    -(Optional) -

    Instances is the number of executor instances.

    +

    Schedule is a cron schedule on which the application should run.

    -coreRequest
    +template
    -string + +SparkApplicationSpec +
    -(Optional) -

    CoreRequest is the physical CPU core request for the executors. -Maps to spark.kubernetes.executor.request.cores that is available since Spark 2.4.

    +

    Template is a template from which SparkApplication instances can be created.

    -javaOptions
    +suspend
    -string +bool
    (Optional) -

    JavaOptions is a string of extra JVM options to pass to the executors. For instance, -GC settings or other logging.

    +

    Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. +Defaults to false.

    -lifecycle
    +concurrencyPolicy
    - -Kubernetes core/v1.Lifecycle + +ConcurrencyPolicy
    -(Optional) -

    Lifecycle for running preStop or postStart commands

    +

    ConcurrencyPolicy is the policy governing concurrent SparkApplication runs.

    -deleteOnTermination
    +successfulRunHistoryLimit
    -bool +int32
    (Optional) -

    DeleteOnTermination specify whether executor pods should be deleted in case of failure or normal termination. -Maps to spark.kubernetes.executor.deleteOnTermination that is available since Spark 3.0.

    +

    SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. +Defaults to 1.

    -ports
    +failedRunHistoryLimit
    - -[]Port - +int32
    (Optional) -

    Ports settings for the pods, following the Kubernetes specifications.

    +

    FailedRunHistoryLimit is the number of past failed runs of the application to keep. +Defaults to 1.

    -

    ExecutorState -(string alias)

    -

    -(Appears on:SparkApplicationStatus) -

    -
    -

    ExecutorState tells the current state of an executor.

    -
    - - - - - - - - - - - - - - - - - - -
    ValueDescription

    "COMPLETED"

    "FAILED"

    "PENDING"

    "RUNNING"

    "UNKNOWN"

    -

    GPUSpec +

    ScheduledSparkApplicationStatus

    -(Appears on:SparkPodSpec) +(Appears on:ScheduledSparkApplication)

    +

    ScheduledSparkApplicationStatus defines the observed state of ScheduledSparkApplication.

    @@ -1521,115 +1568,96 @@ Maps to spark.kubernetes.executor.deleteOnTermination that is avail - -
    -name
    +lastRun
    -string + +Kubernetes meta/v1.Time +
    -

    Name is GPU resource name, such as: nvidia.com/gpu or amd.com/gpu

    +

    LastRun is the time when the last run of the application started.

    -quantity
    +nextRun
    -int64 + +Kubernetes meta/v1.Time +
    -

    Quantity is the number of GPUs to request for driver or executor.

    +

    NextRun is the time when the next run of the application will start.

    -

    MonitoringSpec -

    -

    -(Appears on:SparkApplicationSpec) -

    -
    -

    MonitoringSpec defines the monitoring specification.

    -
    - - - - - - - -
    FieldDescription
    -exposeDriverMetrics
    +lastRunName
    -bool +string
    -

    ExposeDriverMetrics specifies whether to expose metrics on the driver.

    +

    LastRunName is the name of the SparkApplication for the most recent run of the application.

    -exposeExecutorMetrics
    +pastSuccessfulRunNames
    -bool +[]string
    -

    ExposeExecutorMetrics specifies whether to expose metrics on the executors.

    +

    PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs.

    -metricsProperties
    +pastFailedRunNames
    -string +[]string
    -(Optional) -

    MetricsProperties is the content of a custom metrics.properties for configuring the Spark metric system. -If not specified, the content in spark-docker/conf/metrics.properties will be used.

    +

    PastFailedRunNames keeps the names of SparkApplications for past failed runs.

    -metricsPropertiesFile
    +scheduleState
    -string + +ScheduleState +
    -(Optional) -

    MetricsPropertiesFile is the container local path of file metrics.properties for configuring -the Spark metric system. If not specified, value /etc/metrics/conf/metrics.properties will be used.

    +

    ScheduleState is the current scheduling state of the application.

    -prometheus
    +reason
    - -PrometheusSpec - +string
    -(Optional) -

    Prometheus is for configuring the Prometheus JMX exporter.

    +

    Reason tells why the ScheduledSparkApplication is in the particular ScheduleState.

    -

    NameKey +

    SecretInfo

    (Appears on:SparkPodSpec)

    -

    NameKey represents the name and key of a SecretKeyRef.

    +

    SecretInfo captures information of a secret.

    @@ -1651,7 +1679,7 @@ string + + + +
    -key
    +path
    string @@ -1659,53 +1687,52 @@ string
    +secretType
    + + +SecretType + + +
    +
    -

    NamePath -

    +

    SecretType +(string alias)

    -(Appears on:SparkPodSpec) +(Appears on:SecretInfo)

    -

    NamePath is a pair of a name and a path to which the named objects should be mounted to.

    +

    SecretType tells the type of a secret.

    - + - - - - + - - - + - + - - +
    FieldValue Description
    -name
    - -string - -
    +

    "GCPServiceAccount"

    SecretTypeGCPServiceAccount is for secrets from a GCP service account Json key file that needs +the environment variable GOOGLE_APPLICATION_CREDENTIALS.

    -path
    - -string - +

    "Generic"

    SecretTypeGeneric is for secrets that needs no special handling.

    +

    "HadoopDelegationToken"

    SecretTypeHadoopDelegationToken is for secrets from an Hadoop delegation token that needs the +environment variable HADOOP_TOKEN_FILE_LOCATION.

    -

    Port +

    SparkApplication

    -

    -(Appears on:DriverSpec, ExecutorSpec) -

    -

    Port represents the port definition in the pods objects.

    +

    SparkApplication is the Schema for the sparkapplications API

    @@ -1717,521 +1744,463 @@ string - +
    +
    +
    -name
    +metadata
    -string + +Kubernetes meta/v1.ObjectMeta +
    +Refer to the Kubernetes API documentation for the fields of the +metadata field.
    -protocol
    +spec
    -string + +SparkApplicationSpec +
    -
    - -
    -containerPort
    +type
    -int32 + +SparkApplicationType +
    +

    Type tells the type of the Spark application.

    -

    PrometheusSpec -

    -

    -(Appears on:MonitoringSpec) -

    -
    -

    PrometheusSpec defines the Prometheus specification when Prometheus is to be used for -collecting and exposing metrics.

    -
    - - - - - - - - - -
    FieldDescription
    -jmxExporterJar
    +sparkVersion
    string
    -

    JmxExporterJar is the path to the Prometheus JMX exporter jar in the container.

    +

    SparkVersion is the version of Spark the application uses.

    -port
    +mode
    -int32 + +DeployMode +
    -(Optional) -

    Port is the port of the HTTP server run by the Prometheus JMX exporter. -If not specified, 8090 will be used as the default.

    +

    Mode is the deployment mode of the Spark application.

    -portName
    +proxyUser
    string
    (Optional) -

    PortName is the port name of prometheus JMX exporter port. -If not specified, jmx-exporter will be used as the default.

    +

    ProxyUser specifies the user to impersonate when submitting the application. +It maps to the command-line flag “–proxy-user” in spark-submit.

    -configFile
    +image
    string
    (Optional) -

    ConfigFile is the path to the custom Prometheus configuration file provided in the Spark image. -ConfigFile takes precedence over Configuration, which is shown below.

    +

    Image is the container image for the driver, executor, and init-container. Any custom container images for the +driver, executor, or init-container takes precedence over this.

    -configuration
    +imagePullPolicy
    string
    (Optional) -

    Configuration is the content of the Prometheus configuration needed by the Prometheus JMX exporter. -If not specified, the content in spark-docker/conf/prometheus.yaml will be used. -Configuration has no effect if ConfigFile is set.

    +

    ImagePullPolicy is the image pull policy for the driver, executor, and init-container.

    -

    RestartPolicy -

    -

    -(Appears on:SparkApplicationSpec) -

    -
    -

    RestartPolicy is the policy of if and in which conditions the controller should restart a terminated application. -This completely defines actions to be taken on any kind of Failures during an application run.

    -
    - - - - - - - - - - -
    FieldDescription
    -type
    +imagePullSecrets
    - -RestartPolicyType - +[]string
    -

    Type specifies the RestartPolicyType.

    +(Optional) +

    ImagePullSecrets is the list of image-pull secrets.

    -onSubmissionFailureRetries
    +mainClass
    -int32 +string
    (Optional) -

    OnSubmissionFailureRetries is the number of times to retry submitting an application before giving up. -This is best effort and actual retry attempts can be >= the value specified due to caching. -These are required if RestartPolicy is OnFailure.

    +

    MainClass is the fully-qualified main class of the Spark application. +This only applies to Java/Scala Spark applications.

    -onFailureRetries
    +mainApplicationFile
    -int32 +string
    (Optional) -

    OnFailureRetries the number of times to retry running an application before giving up.

    +

    MainFile is the path to a bundled JAR, Python, or R file of the application.

    -onSubmissionFailureRetryInterval
    +arguments
    -int64 +[]string
    (Optional) -

    OnSubmissionFailureRetryInterval is the interval in seconds between retries on failed submissions.

    +

    Arguments is a list of arguments to be passed to the application.

    -onFailureRetryInterval
    +sparkConf
    -int64 +map[string]string
    (Optional) -

    OnFailureRetryInterval is the interval in seconds between retries on failed runs.

    -
    -

    RestartPolicyType -(string alias)

    -

    -(Appears on:RestartPolicy) -

    -
    -
    - - - - - - - - - - - - - - -
    ValueDescription

    "Always"

    "Never"

    "OnFailure"

    -

    ScheduleState -(string alias)

    -

    -(Appears on:ScheduledSparkApplicationStatus) -

    -
    -
    - - - - - - - - - - - - -
    ValueDescription

    "FailedValidation"

    "Scheduled"

    -

    ScheduledSparkApplicationSpec -

    -

    -(Appears on:ScheduledSparkApplication) -

    -
    -
    - - +

    SparkConf carries user-specified Spark configuration properties as they would use the “–conf” option in +spark-submit.

    + + - - + + - - - -
    FieldDescription +hadoopConf
    + +map[string]string + +
    +(Optional) +

    HadoopConf carries user-specified Hadoop configuration properties as they would use the the “–conf” option +in spark-submit. The SparkApplication controller automatically adds prefix “spark.hadoop.” to Hadoop +configuration properties.

    +
    -schedule
    +sparkConfigMap
    string
    -

    Schedule is a cron schedule on which the application should run.

    +(Optional) +

    SparkConfigMap carries the name of the ConfigMap containing Spark configuration files such as log4j.properties. +The controller will add environment variable SPARK_CONF_DIR to the path where the ConfigMap is mounted to.

    -template
    +hadoopConfigMap
    - -SparkApplicationSpec - +string
    -

    Template is a template from which SparkApplication instances can be created.

    +(Optional) +

    HadoopConfigMap carries the name of the ConfigMap containing Hadoop configuration files such as core-site.xml. +The controller will add environment variable HADOOP_CONF_DIR to the path where the ConfigMap is mounted to.

    -suspend
    +volumes
    -bool + +[]Kubernetes core/v1.Volume +
    (Optional) -

    Suspend is a flag telling the controller to suspend subsequent runs of the application if set to true. -Defaults to false.

    +

    Volumes is the list of Kubernetes volumes that can be mounted by the driver and/or executors.

    -concurrencyPolicy
    +driver
    - -ConcurrencyPolicy + +DriverSpec
    -

    ConcurrencyPolicy is the policy governing concurrent SparkApplication runs.

    +

    Driver is the driver specification.

    -successfulRunHistoryLimit
    +executor
    -int32 + +ExecutorSpec +
    -(Optional) -

    SuccessfulRunHistoryLimit is the number of past successful runs of the application to keep. -Defaults to 1.

    +

    Executor is the executor specification.

    -failedRunHistoryLimit
    +deps
    -int32 + +Dependencies +
    (Optional) -

    FailedRunHistoryLimit is the number of past failed runs of the application to keep. -Defaults to 1.

    +

    Deps captures all possible types of dependencies of a Spark application.

    -

    ScheduledSparkApplicationStatus -

    -

    -(Appears on:ScheduledSparkApplication) -

    -
    -
    - - - - + + - - - -
    FieldDescription +restartPolicy
    + + +RestartPolicy + + +
    +

    RestartPolicy defines the policy on if and in which conditions the controller should restart an application.

    +
    -lastRun
    +nodeSelector
    - -Kubernetes meta/v1.Time - +map[string]string
    -

    LastRun is the time when the last run of the application started.

    +(Optional) +

    NodeSelector is the Kubernetes node selector to be added to the driver and executor pods. +This field is mutually exclusive with nodeSelector at podSpec level (driver or executor). +This field will be deprecated in future versions (at SparkApplicationSpec level).

    -nextRun
    +failureRetries
    - -Kubernetes meta/v1.Time - +int32
    -

    NextRun is the time when the next run of the application will start.

    +(Optional) +

    FailureRetries is the number of times to retry a failed application before giving up. +This is best effort and actual retry attempts can be >= the value specified.

    -lastRunName
    +retryInterval
    -string +int64
    -

    LastRunName is the name of the SparkApplication for the most recent run of the application.

    +(Optional) +

    RetryInterval is the unit of intervals in seconds between submission retries.

    -pastSuccessfulRunNames
    +pythonVersion
    -[]string +string
    -

    PastSuccessfulRunNames keeps the names of SparkApplications for past successful runs.

    +(Optional) +

    This sets the major Python version of the docker +image used to run the driver and executor containers. Can either be 2 or 3, default 2.

    -pastFailedRunNames
    +memoryOverheadFactor
    -[]string +string
    -

    PastFailedRunNames keeps the names of SparkApplications for past failed runs.

    +(Optional) +

    This sets the Memory Overhead Factor that will allocate memory to non-JVM memory. +For JVM-based jobs this value will default to 0.10, for non-JVM jobs 0.40. Value of this field will +be overridden by Spec.Driver.MemoryOverhead and Spec.Executor.MemoryOverhead if they are set.

    -scheduleState
    +monitoring
    - -ScheduleState + +MonitoringSpec
    -

    ScheduleState is the current scheduling state of the application.

    +(Optional) +

    Monitoring configures how monitoring is handled.

    -reason
    +batchScheduler
    string
    -

    Reason tells why the ScheduledSparkApplication is in the particular ScheduleState.

    +(Optional) +

    BatchScheduler configures which batch scheduler will be used for scheduling

    -

    SecretInfo -

    -

    -(Appears on:SparkPodSpec) -

    -
    -

    SecretInfo captures information of a secret.

    -
    - - - - + + - - - -
    FieldDescription +timeToLiveSeconds
    + +int64 + +
    +(Optional) +

    TimeToLiveSeconds defines the Time-To-Live (TTL) duration in seconds for this SparkApplication +after its termination. +The SparkApplication object will be garbage collected if the current time is more than the +TimeToLiveSeconds since its termination.

    +
    -name
    +batchSchedulerOptions
    -string + +BatchSchedulerConfiguration +
    +(Optional) +

    BatchSchedulerOptions provides fine-grained control on how to batch scheduling.

    -path
    +sparkUIOptions
    -string + +SparkUIConfiguration +
    +(Optional) +

    SparkUIOptions allows configuring the Service and the Ingress to expose the sparkUI

    -secretType
    +driverIngressOptions
    - -SecretType + +[]DriverIngressConfiguration
    +(Optional) +

    DriverIngressOptions allows configuring the Service and the Ingress to expose ports inside Spark Driver

    -

    SecretType -(string alias)

    -

    -(Appears on:SecretInfo) -

    -
    -

    SecretType tells the type of a secret.

    -
    - - - - + + - - -
    ValueDescription +dynamicAllocation
    + + +DynamicAllocation + + +
    +(Optional) +

    DynamicAllocation configures dynamic allocation that becomes available for the Kubernetes +scheduler backend since Spark 3.0.

    +

    "GCPServiceAccount"

    GCPServiceAccountSecret is for secrets from a GCP service account Json key file that needs -the environment variable GOOGLE_APPLICATION_CREDENTIALS.

    +
    -

    "Generic"

    -

    GenericType is for secrets that needs no special handling.

    + + + +status
    + + +SparkApplicationStatus + + -

    "HadoopDelegationToken"

    -

    HadoopDelegationTokenSecret is for secrets from an Hadoop delegation token that needs the -environment variable HADOOP_TOKEN_FILE_LOCATION.

    + - + +

    SparkApplicationSpec

    -(Appears on:SparkApplication, ScheduledSparkApplicationSpec) +(Appears on:ScheduledSparkApplicationSpec, SparkApplication)

    -

    SparkApplicationSpec describes the specification of a Spark application using Kubernetes as a cluster manager. +

    SparkApplicationSpec defines the desired state of SparkApplication It carries every pieces of information a spark-submit command takes and recognizes.

    @@ -2658,7 +2627,7 @@ scheduler backend since Spark 3.0.

    (Appears on:SparkApplication)

    -

    SparkApplicationStatus describes the current status of a Spark application.

    +

    SparkApplicationStatus defines the observed state of SparkApplication

    @@ -2747,7 +2716,7 @@ ApplicationState executorState
    -map[string]github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2.ExecutorState +map[string]github.com/kubeflow/spark-operator/api/v1beta2.ExecutorState @@ -2971,7 +2940,7 @@ Deprecated. Consider using env instead.

    envSecretKeyRefs
    -map[string]github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2.NameKey +map[string]github.com/kubeflow/spark-operator/api/v1beta2.NameKey @@ -3284,7 +3253,7 @@ map[string]string diff --git a/entrypoint.sh b/entrypoint.sh index f3c83ebade..0ca8730123 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -4,21 +4,18 @@ set -ex # Check whether there is a passwd entry for the container UID -myuid=$(id -u) -mygid=$(id -g) +uid=$(id -u) +gid=$(id -g) + # turn off -e for getent because it will return error code in anonymous uid case set +e -uidentry=$(getent passwd $myuid) +uidentry=$(getent passwd $uid) set -e -echo $myuid -echo $mygid -echo $uidentry - # If there is no passwd entry for the container UID, attempt to create one if [[ -z "$uidentry" ]] ; then if [[ -w /etc/passwd ]] ; then - echo "$myuid:x:$myuid:$mygid:anonymous uid:$SPARK_HOME:/bin/false" >> /etc/passwd + echo "$uid:x:$uid:$gid:anonymous uid:$SPARK_HOME:/bin/false" >> /etc/passwd else echo "Container ENTRYPOINT failed to add passwd entry for anonymous UID" fi diff --git a/examples/spark-operator-with-metrics.yaml b/examples/spark-operator-with-metrics.yaml deleted file mode 100644 index 3513b506a9..0000000000 --- a/examples/spark-operator-with-metrics.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sparkoperator - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - strategy: - type: Recreate - template: - metadata: - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "10254" - prometheus.io/path: "/metrics" - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - spec: - serviceAccountName: sparkoperator - containers: - - name: sparkoperator - image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 - imagePullPolicy: Always - ports: - - containerPort: 10254 - args: - - -logtostderr - - -enable-metrics=true - - -metrics-labels=app_type \ No newline at end of file diff --git a/examples/spark-operator-with-webhook.yaml b/examples/spark-operator-with-webhook.yaml deleted file mode 100644 index 25fa81d5e4..0000000000 --- a/examples/spark-operator-with-webhook.yaml +++ /dev/null @@ -1,94 +0,0 @@ -# -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sparkoperator - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - strategy: - type: Recreate - template: - metadata: - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - spec: - serviceAccountName: sparkoperator - volumes: - - name: webhook-certs - secret: - secretName: spark-webhook-certs - containers: - - name: sparkoperator - image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 - imagePullPolicy: Always - volumeMounts: - - name: webhook-certs - mountPath: /etc/webhook-certs - ports: - - containerPort: 8080 - args: - - -logtostderr - - -enable-webhook=true - - -v=2 ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: sparkoperator-init - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 -spec: - backoffLimit: 3 - template: - metadata: - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - spec: - serviceAccountName: sparkoperator - restartPolicy: Never - containers: - - name: main - image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 - imagePullPolicy: IfNotPresent - command: ["/usr/bin/gencerts.sh", "-p"] ---- -kind: Service -apiVersion: v1 -metadata: - name: spark-webhook - namespace: spark-operator -spec: - ports: - - port: 443 - targetPort: 8080 - name: webhook - selector: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 diff --git a/examples/spark-pi-configmap.yaml b/examples/spark-pi-configmap.yaml index a6a5dc023b..e2b4bc556f 100644 --- a/examples/spark-pi-configmap.yaml +++ b/examples/spark-pi-configmap.yaml @@ -13,41 +13,41 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: "sparkoperator.k8s.io/v1beta2" +apiVersion: sparkoperator.k8s.io/v1beta2 kind: SparkApplication metadata: - name: spark-pi + name: spark-pi-configmap namespace: default spec: type: Scala mode: cluster - image: "spark:3.5.0" - imagePullPolicy: Always + image: spark:3.5.0 + imagePullPolicy: IfNotPresent mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" - sparkVersion: "3.5.0" + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 restartPolicy: type: Never volumes: - - name: config-vol - configMap: - name: dummy-cm + - name: config-vol + configMap: + name: test-configmap driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" labels: version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m serviceAccount: spark-operator-spark volumeMounts: - - name: config-vol - mountPath: /opt/spark/mycm + - name: config-vol + mountPath: /opt/spark/config executor: - cores: 1 - instances: 1 - memory: "512m" labels: version: 3.5.0 + instances: 1 + cores: 1 + memory: 512m volumeMounts: - - name: config-vol - mountPath: /opt/spark/mycm + - name: config-vol + mountPath: /opt/spark/config diff --git a/examples/spark-pi-custom-resource.yaml b/examples/spark-pi-custom-resource.yaml index 1e70098d2e..83df405e10 100644 --- a/examples/spark-pi-custom-resource.yaml +++ b/examples/spark-pi-custom-resource.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: "sparkoperator.k8s.io/v1beta2" +apiVersion: sparkoperator.k8s.io/v1beta2 kind: SparkApplication metadata: name: spark-pi-custom-resource @@ -21,38 +21,34 @@ metadata: spec: type: Scala mode: cluster - image: "spark:3.5.0" - imagePullPolicy: Always + image: spark:3.5.0 + imagePullPolicy: IfNotPresent mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" - sparkVersion: "3.5.0" + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 restartPolicy: type: Never volumes: - - name: "test-volume" - hostPath: - path: "/tmp" - type: Directory + - name: test-volume + hostPath: + path: /tmp + type: Directory driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" labels: version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m serviceAccount: spark-operator-spark volumeMounts: - - name: "test-volume" - mountPath: "/tmp" + - name: test-volume + mountPath: /tmp executor: - cores: 1 - instances: 1 - memory: "512m" labels: version: 3.5.0 + instances: 1 + cores: 1 + memory: 512m volumeMounts: - - name: "test-volume" - mountPath: "/tmp" - batchSchedulerOptions: - resources: - cpu: "2" - memory: "4096m" + - name: test-volume + mountPath: /tmp diff --git a/examples/spark-pi-dynamic-allocation.yaml b/examples/spark-pi-dynamic-allocation.yaml new file mode 100644 index 0000000000..800313914f --- /dev/null +++ b/examples/spark-pi-dynamic-allocation.yaml @@ -0,0 +1,49 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: SparkApplication +metadata: + name: spark-pi-dynamic-allocation + namespace: default +spec: + type: Scala + mode: cluster + image: spark:3.5.0 + imagePullPolicy: IfNotPresent + mainClass: org.apache.spark.examples.SparkPi + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 + arguments: + - "50000" + driver: + labels: + version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m + serviceAccount: spark-operator-spark + executor: + labels: + version: 3.5.0 + instances: 1 + cores: 1 + coreLimit: 1200m + memory: 512m + dynamicAllocation: + enabled: true + initialExecutors: 2 + maxExecutors: 5 + minExecutors: 1 diff --git a/examples/spark-pi-prometheus.yaml b/examples/spark-pi-prometheus.yaml index b47de1db60..29a447061c 100644 --- a/examples/spark-pi-prometheus.yaml +++ b/examples/spark-pi-prometheus.yaml @@ -14,7 +14,7 @@ # limitations under the License. # -apiVersion: "sparkoperator.k8s.io/v1beta2" +apiVersion: sparkoperator.k8s.io/v1beta2 kind: SparkApplication metadata: name: spark-pi @@ -22,31 +22,31 @@ metadata: spec: type: Scala mode: cluster - image: "gcr.io/spark-operator/spark:v3.1.1-gcs-prometheus" + image: gcr.io/spark-operator/spark:v3.1.1-gcs-prometheus imagePullPolicy: Always mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.1.1.jar" + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.1.1.jar arguments: - - "100000" - sparkVersion: "3.1.1" + - "100000" + sparkVersion: 3.1.1 restartPolicy: type: Never driver: cores: 1 - coreLimit: "1200m" - memory: "512m" + coreLimit: 1200m + memory: 512m labels: version: 3.1.1 serviceAccount: spark-operator-spark executor: cores: 1 instances: 1 - memory: "512m" + memory: 512m labels: version: 3.1.1 monitoring: exposeDriverMetrics: true exposeExecutorMetrics: true prometheus: - jmxExporterJar: "/prometheus/jmx_prometheus_javaagent-0.11.0.jar" + jmxExporterJar: /prometheus/jmx_prometheus_javaagent-0.11.0.jar port: 8090 diff --git a/examples/spark-py-pi.yaml b/examples/spark-pi-python.yaml similarity index 72% rename from examples/spark-py-pi.yaml rename to examples/spark-pi-python.yaml index 11a193cfd0..5d0a7f2736 100644 --- a/examples/spark-py-pi.yaml +++ b/examples/spark-pi-python.yaml @@ -13,35 +13,30 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: "sparkoperator.k8s.io/v1beta2" +apiVersion: sparkoperator.k8s.io/v1beta2 kind: SparkApplication metadata: - name: pyspark-pi + name: spark-pi-python namespace: default spec: type: Python pythonVersion: "3" mode: cluster - image: "spark:3.5.0" - imagePullPolicy: Always + image: spark:3.5.0 + imagePullPolicy: IfNotPresent mainApplicationFile: local:///opt/spark/examples/src/main/python/pi.py - sparkVersion: "3.5.0" - restartPolicy: - type: OnFailure - onFailureRetries: 3 - onFailureRetryInterval: 10 - onSubmissionFailureRetries: 5 - onSubmissionFailureRetryInterval: 20 + sparkVersion: 3.5.0 driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" labels: version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m serviceAccount: spark-operator-spark executor: - cores: 1 - instances: 1 - memory: "512m" labels: version: 3.5.0 + instances: 1 + cores: 1 + coreLimit: 1200m + memory: 512m diff --git a/examples/spark-pi-schedule.yaml b/examples/spark-pi-scheduled.yaml similarity index 76% rename from examples/spark-pi-schedule.yaml rename to examples/spark-pi-scheduled.yaml index 576a77361d..f74143e7c1 100644 --- a/examples/spark-pi-schedule.yaml +++ b/examples/spark-pi-scheduled.yaml @@ -14,34 +14,35 @@ # limitations under the License. # -apiVersion: "sparkoperator.k8s.io/v1beta2" +apiVersion: sparkoperator.k8s.io/v1beta2 kind: ScheduledSparkApplication metadata: name: spark-pi-scheduled namespace: default spec: - schedule: "@every 5m" + schedule: "@every 3m" concurrencyPolicy: Allow template: type: Scala mode: cluster - image: "spark:3.5.0" - imagePullPolicy: Always + image: spark:3.5.0 + imagePullPolicy: IfNotPresent mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" - sparkVersion: "3.5.0" + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 restartPolicy: type: Never driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" labels: version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m serviceAccount: spark-operator-spark executor: - cores: 1 - instances: 1 - memory: "512m" labels: version: 3.5.0 + instances: 1 + cores: 1 + coreLimit: 1200m + memory: 512m diff --git a/examples/spark-pi-volcano.yaml b/examples/spark-pi-volcano.yaml new file mode 100644 index 0000000000..277ed173d2 --- /dev/null +++ b/examples/spark-pi-volcano.yaml @@ -0,0 +1,43 @@ +# +# Copyright 2024 The Kubeflow authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: sparkoperator.k8s.io/v1beta2 +kind: SparkApplication +metadata: + name: spark-pi-volcano + namespace: default +spec: + type: Scala + mode: cluster + image: spark:3.5.0 + imagePullPolicy: IfNotPresent + mainClass: org.apache.spark.examples.SparkPi + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 + driver: + labels: + version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m + serviceAccount: spark-operator-spark + executor: + labels: + version: 3.5.0 + instances: 2 + cores: 1 + coreLimit: 1200m + memory: 512m + batchScheduler: volcano diff --git a/examples/spark-pi.yaml b/examples/spark-pi.yaml index 41d48645e6..6d7ae68690 100644 --- a/examples/spark-pi.yaml +++ b/examples/spark-pi.yaml @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: "sparkoperator.k8s.io/v1beta2" +apiVersion: sparkoperator.k8s.io/v1beta2 kind: SparkApplication metadata: name: spark-pi @@ -21,37 +21,22 @@ metadata: spec: type: Scala mode: cluster - image: "spark:3.5.0" - imagePullPolicy: Always + image: spark:3.5.0 + imagePullPolicy: IfNotPresent mainClass: org.apache.spark.examples.SparkPi - mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar" - sparkVersion: "3.5.0" - sparkUIOptions: - serviceLabels: - test-label/v1: 'true' - restartPolicy: - type: Never - volumes: - - name: "test-volume" - hostPath: - path: "/tmp" - type: Directory + mainApplicationFile: local:///opt/spark/examples/jars/spark-examples_2.12-3.5.0.jar + sparkVersion: 3.5.0 driver: - cores: 1 - coreLimit: "1200m" - memory: "512m" labels: version: 3.5.0 + cores: 1 + coreLimit: 1200m + memory: 512m serviceAccount: spark-operator-spark - volumeMounts: - - name: "test-volume" - mountPath: "/tmp" executor: - cores: 1 - instances: 1 - memory: "512m" labels: version: 3.5.0 - volumeMounts: - - name: "test-volume" - mountPath: "/tmp" + instances: 1 + cores: 1 + coreLimit: 1200m + memory: 512m diff --git a/go.mod b/go.mod index f44232d712..72c1d25480 100644 --- a/go.mod +++ b/go.mod @@ -1,134 +1,217 @@ module github.com/kubeflow/spark-operator -go 1.22 +go 1.22.5 require ( - cloud.google.com/go/storage v1.40.0 - github.com/aws/aws-sdk-go-v2 v1.26.1 - github.com/aws/aws-sdk-go-v2/config v1.27.11 - github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 - github.com/evanphx/json-patch v5.9.0+incompatible - github.com/golang/glog v1.2.1 + cloud.google.com/go/storage v1.43.0 + github.com/aws/aws-sdk-go-v2 v1.30.3 + github.com/aws/aws-sdk-go-v2/config v1.27.26 + github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 + github.com/golang/glog v1.2.2 github.com/google/uuid v1.6.0 github.com/olekukonko/tablewriter v0.0.5 - github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.19.0 - github.com/prometheus/client_model v0.6.1 + github.com/onsi/ginkgo/v2 v2.19.0 + github.com/onsi/gomega v1.33.1 + github.com/prometheus/client_golang v1.19.1 github.com/robfig/cron/v3 v3.0.1 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 + github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 + go.uber.org/zap v1.27.0 gocloud.dev v0.37.0 - golang.org/x/net v0.24.0 - golang.org/x/sync v0.7.0 - golang.org/x/time v0.5.0 - k8s.io/api v0.29.3 - k8s.io/apiextensions-apiserver v0.29.3 - k8s.io/apimachinery v0.29.3 + golang.org/x/net v0.27.0 + helm.sh/helm/v3 v3.15.3 + k8s.io/api v0.30.2 + k8s.io/apiextensions-apiserver v0.30.2 + k8s.io/apimachinery v0.30.2 k8s.io/client-go v1.5.2 - k8s.io/kubectl v0.29.3 - k8s.io/kubernetes v1.29.3 - k8s.io/utils v0.0.0-20240310230437-4693a0247e57 - volcano.sh/apis v1.8.2 + k8s.io/kubernetes v1.30.2 + k8s.io/utils v0.0.0-20240710235135-d4aae2beeffc + sigs.k8s.io/controller-runtime v0.17.5 + volcano.sh/apis v1.9.0 ) require ( - cloud.google.com/go v0.112.2 // indirect - cloud.google.com/go/compute v1.25.1 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.7 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/aws/aws-sdk-go v1.51.16 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect + cloud.google.com/go v0.115.0 // indirect + cloud.google.com/go/auth v0.7.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/iam v1.1.11 // indirect + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect + github.com/Microsoft/hcsshim v0.12.4 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-sdk-go v1.54.18 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.26 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 // indirect - github.com/aws/smithy-go v1.20.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect + github.com/aws/smithy-go v1.20.3 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.12.0 // indirect - github.com/fatih/camelcase v1.0.0 // indirect + github.com/chai2010/gettext-go v1.0.3 // indirect + github.com/containerd/containerd v1.7.19 // indirect + github.com/containerd/errdefs v0.1.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cyphar/filepath-securejoin v0.2.5 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/cli v27.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker v27.0.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/go-connections v0.5.0 // indirect + github.com/docker/go-metrics v0.0.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect + github.com/fatih/color v1.17.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-errors/errors v1.4.2 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-errors/errors v1.5.1 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.0.1 // indirect + github.com/google/btree v1.1.2 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240625030939-27f56978b8b0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/wire v0.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.3 // indirect - github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect + github.com/googleapis/gax-go/v2 v2.12.5 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect + github.com/gosuri/uitable v0.0.4 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jmoiron/sqlx v1.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/locker v1.0.1 // indirect + github.com/moby/spdystream v0.4.0 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.52.2 // indirect - github.com/prometheus/procfs v0.13.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect + github.com/rubenv/sql-migrate v1.7.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sagikazarmark/locafero v0.6.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect - go.opentelemetry.io/otel v1.25.0 // indirect - go.opentelemetry.io/otel/metric v1.25.0 // indirect - go.opentelemetry.io/otel/trace v1.25.0 // indirect - go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect - golang.org/x/crypto v0.22.0 // indirect - golang.org/x/oauth2 v0.19.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/term v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.starlark.net v0.0.0-20240705175910-70002002b310 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/exp v0.0.0-20240707233637-46b078467d37 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/term v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.23.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/api v0.172.0 // indirect - google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/grpc v1.63.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/api v0.188.0 // indirect + google.golang.org/genproto v0.0.0-20240709173604-40e1e62336c5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240709173604-40e1e62336c5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5 // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/cli-runtime v0.29.3 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 // indirect + k8s.io/apiserver v0.30.2 // indirect + k8s.io/cli-runtime v0.30.2 // indirect + k8s.io/component-base v0.30.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f // indirect + k8s.io/kubectl v0.30.2 // indirect + oras.land/oras-go v1.2.5 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect - sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/api v0.17.2 // indirect + sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 774d575a5c..a5bb11dd5d 100644 --- a/go.sum +++ b/go.sum @@ -1,112 +1,216 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw= -cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms= -cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= -cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= -cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= -cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= -cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= +cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts= +cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw= +cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= +cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/iam v1.1.11 h1:0mQ8UKSfdHLut6pH9FM3bI55KWR46ketn0PuXleDyxw= +cloud.google.com/go/iam v1.1.11/go.mod h1:biXoiLWYIKntto2joP+62sd9uW5EpkZmKIvfNcTWlnQ= +cloud.google.com/go/longrunning v0.5.9 h1:haH9pAuXdPAMqHvzX0zlWQigXT7B0+CL4/2nXXdBo5k= +cloud.google.com/go/longrunning v0.5.9/go.mod h1:HD+0l9/OOW0za6UWdKJtXoFAX/BGg/3Wj8p10NeWF7c= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/Microsoft/hcsshim v0.12.4 h1:Ev7YUMHAHoWNm+aDSPzc5W9s6E2jyL1szpVDJeZ/Rr4= +github.com/Microsoft/hcsshim v0.12.4/go.mod h1:Iyl1WVpZzr+UkzjekHZbV8o5Z9ZkxNGx6CtY2Qg/JVQ= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aws/aws-sdk-go v1.51.16 h1:vnWKK8KjbftEkuPX8bRj3WHsLy1uhotn0eXptpvrxJI= -github.com/aws/aws-sdk-go v1.51.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= -github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= -github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA= -github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE= -github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15 h1:7Zwtt/lP3KNRkeZre7soMELMGNoBrutx8nobg1jKWmo= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.15/go.mod h1:436h2adoHb57yd+8W+gYPrrA9U/R/SuAuOO42Ushzhw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.54.18 h1:t8DGtN8A2wEiazoJxeDbfPsbxCKtjoRLuO7jBSgJzo4= +github.com/aws/aws-sdk-go v1.54.18/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= +github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM= +github.com/aws/aws-sdk-go-v2/config v1.27.26 h1:T1kAefbKuNum/AbShMsZEro6eRkeOT8YILfE9wyjAYQ= +github.com/aws/aws-sdk-go-v2/config v1.27.26/go.mod h1:ivWHkAWFrw/nxty5Fku7soTIVdqZaZ7dw+tc5iGW3GA= +github.com/aws/aws-sdk-go-v2/credentials v1.17.26 h1:tsm8g/nJxi8+/7XyJJcP2dLrnK/5rkFp6+i2nhmz5fk= +github.com/aws/aws-sdk-go-v2/credentials v1.17.26/go.mod h1:3vAM49zkIa3q8WT6o9Ve5Z0vdByDMwmdScO0zvThTgI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 h1:kNemAUX+bJFBSfPkGVZ8HFOKIadjLoI2Ua1ZKivhGSo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7/go.mod h1:71S2C1g/Zjn+ANmyoOqJ586OrPF9uC9iiHt9ZAT+MOw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= -github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= -github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 h1:sZXIzO38GZOU+O0C+INqbH7C2yALwfMWpd64tONS/NE= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 h1:Fv1vD2L65Jnp5QRsdiM64JvUM4Xe+E0JyVsRQKv6IeA= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.3/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ= +github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= +github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chai2010/gettext-go v1.0.3 h1:9liNh8t+u26xl5ddmWLmsOsdNLwkdRTg5AG+JnTiM80= +github.com/chai2010/gettext-go v1.0.3/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= +github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= +github.com/containerd/containerd v1.7.19 h1:/xQ4XRJ0tamDkdzrrBAUy/LE5nCcxFKdBm4EcPrSMEE= +github.com/containerd/containerd v1.7.19/go.mod h1:h4FtNYUUMB4Phr6v+xG89RYKj9XccvbNSCKjdufCrkc= +github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM= +github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= +github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= +github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= -github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v27.0.3+incompatible h1:usGs0/BoBW8MWxGeEtqPMkzOY56jZ6kYlSN5BLDioCQ= +github.com/docker/cli v27.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= +github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= +github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= -github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -117,8 +221,10 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -126,7 +232,6 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -138,15 +243,16 @@ github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/pprof v0.0.0-20240625030939-27f56978b8b0 h1:e+8XbKB6IMn8A4OAyZccO4pYfB3s7bt6azNIPE7AnPg= +github.com/google/pprof v0.0.0-20240625030939-27f56978b8b0/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -154,11 +260,31 @@ github.com/google/wire v0.6.0 h1:HBkoIh4BdSxoyo9PveV8giw7ZsaBOvzWKfcg/6MrVwI= github.com/google/wire v0.6.0/go.mod h1:F4QhpQ9EDIdJ1Mbop/NZBRB+5yrR6qg3BnctaoUk6NA= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q= -github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA= +github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= +github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -167,59 +293,125 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= +github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck= -github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= -github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= -github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= @@ -227,57 +419,109 @@ github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI= +github.com/rubenv/sql-migrate v1.7.0/go.mod h1:S4wtDEG1CKn+0ShpTtzWhFpHHI5PvCUtiGI+C+Z2THE= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= +github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0 h1:zvpPXY7RfYAGSdYQLjp6zxdJNSYD/+FFoCTQN9IPxBs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.50.0/go.mod h1:BMn8NB1vsxTljvuorms2hyOs8IBuuBEq0pl7ltOfy30= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 h1:cEPbyTSEHlQR89XVlyo78gqluF8Y3oMeBkXGWzQsfXY= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0/go.mod h1:DKdbWcT4GH1D0Y3Sqt/PFXt2naRKDWtU+eE6oLdFNA8= -go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= -go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= -go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= -go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= -go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= -go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= -go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM= -go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= -go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= -go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.starlark.net v0.0.0-20240705175910-70002002b310 h1:tEAOMoNmN2MqVNi0MMEWpTtPI4YNCXgxmAGtuv3mST0= +go.starlark.net v0.0.0-20240705175910-70002002b310/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= gocloud.dev v0.37.0 h1:XF1rN6R0qZI/9DYjN16Uy0durAmSlf58DHOcb28GPro= gocloud.dev v0.37.0/go.mod h1:7/O4kqdInCNsc6LqgmuFnS0GRew4XNNYWpA44yQnwco= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240707233637-46b078467d37 h1:uLDX+AfeFCct3a2C7uIWBKMJIR3CJMhcgfrUAqjRK6w= +golang.org/x/exp v0.0.0-20240707233637-46b078467d37/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -289,26 +533,30 @@ golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= -golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -319,8 +567,11 @@ golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -328,29 +579,34 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -365,34 +621,36 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.172.0 h1:/1OcMZGPmW1rX2LCu2CmGUD1KXK1+pfzxotxyRUCCdk= -google.golang.org/api v0.172.0/go.mod h1:+fJZq6QXWfa9pXhnIzsjx4yI22d4aI9ZpLb58gvXjis= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw= +google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= -google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda h1:b6F6WIV4xHHD0FA4oIyzU6mHWg2WI2X1RBehwa5QN38= -google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda/go.mod h1:AHcE/gZH76Bk/ROZhQphlRoWo5xKDEtz3eVEO1LfA8c= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto v0.0.0-20240709173604-40e1e62336c5 h1:ORprMx6Xqr56pGwKXMnVEFBI0k7OIcHI0Rx92/rKypo= +google.golang.org/genproto v0.0.0-20240709173604-40e1e62336c5/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY= +google.golang.org/genproto/googleapis/api v0.0.0-20240709173604-40e1e62336c5 h1:a/Z0jgw03aJ2rQnp5PlPpznJqJft0HyvyrcUcxgzPwY= +google.golang.org/genproto/googleapis/api v0.0.0-20240709173604-40e1e62336c5/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5 h1:SbSDUWW1PAO24TNpLdeheoYPd7kllICcLU52x6eD4kQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240709173604-40e1e62336c5/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= -google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -402,19 +660,31 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +helm.sh/helm/v3 v3.15.3 h1:HcZDaVFe9uHa6hpsR54mJjYyRy4uz/pc6csg27nxFOc= +helm.sh/helm/v3 v3.15.3/go.mod h1:FzSIP8jDQaa6WAVg9F+OkKz7J0ZmAga4MABtTbsb9WQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= @@ -423,29 +693,37 @@ k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc= k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= +k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k= k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM= k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 h1:w6nThEmGo9zcL+xH1Tu6pjxJ3K1jXFW+V0u4peqN8ks= -k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= +k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f h1:2sXuKesAYbRHxL3aE2PN6zX/gcJr22cjrsej+W784Tc= +k8s.io/kube-openapi v0.0.0-20240709000822-3c01b740850f/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us= k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4= -k8s.io/kubernetes v1.29.3 h1:EuOAKN4zpiP+kBx/0e9yS5iBkPSyLml19juOqZxBtDw= -k8s.io/kubernetes v1.29.3/go.mod h1:CP+Z+S9haxyB7J+nV6ywYry4dqlphArPXjcc0CsBVXc= -k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= -k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kubernetes v1.30.2 h1:11WhS78OYX/lnSy6TXxPO6Hk+E5K9ZNrEsk9JgMSX8I= +k8s.io/kubernetes v1.30.2/go.mod h1:yPbIk3MhmhGigX62FLJm+CphNtjxqCvAIFQXup6RKS0= +k8s.io/utils v0.0.0-20240710235135-d4aae2beeffc h1:sAWhW/i0Lsz5ZUgeE9svkFa4UyoA+LNAsPcWnwQ2PzM= +k8s.io/utils v0.0.0-20240710235135-d4aae2beeffc/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= +oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= +sigs.k8s.io/controller-runtime v0.17.5 h1:1FI9Lm7NiOOmBsgTV36/s2XrEFXnO2C4sbg/Zme72Rw= +sigs.k8s.io/controller-runtime v0.17.5/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= +sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= +sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= +sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= +sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -volcano.sh/apis v1.8.2 h1:MJ1EXpdQeKG+XEhb/I3liWgMFzkgW3qCcj6qdhTuvfA= -volcano.sh/apis v1.8.2/go.mod h1:h+xbUpkjfRaHjktAi8h+7JNnNahjwhRSgpN9FUUwNXQ= +volcano.sh/apis v1.9.0 h1:e+9yEbQOi6HvgaayAxYULT6n+59mkYvmqjKhp9Z06sY= +volcano.sh/apis v1.9.0/go.mod h1:yXNfsZRzAOq6EUyPJYFrlMorh1XsYQGonGWyr4IiznM= diff --git a/pkg/controller/doc.go b/internal/controller/doc.go similarity index 95% rename from pkg/controller/doc.go rename to internal/controller/doc.go index b1992cd8ec..f83ab071d5 100644 --- a/pkg/controller/doc.go +++ b/internal/controller/doc.go @@ -1,11 +1,11 @@ /* -Copyright 2017 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/internal/controller/mutatingwebhookconfiguration/controller.go b/internal/controller/mutatingwebhookconfiguration/controller.go new file mode 100644 index 0000000000..946a57841a --- /dev/null +++ b/internal/controller/mutatingwebhookconfiguration/controller.go @@ -0,0 +1,99 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutatingwebhookconfiguration + +import ( + "context" + "fmt" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubeflow/spark-operator/pkg/certificate" +) + +var ( + logger = ctrl.Log.WithName("") +) + +// Reconciler reconciles a webhook configuration object. +type Reconciler struct { + client client.Client + certProvider *certificate.Provider + name string +} + +// MutatingWebhookConfigurationReconciler implements reconcile.Reconciler. +var _ reconcile.Reconciler = &Reconciler{} + +// NewReconciler creates a new MutatingWebhookConfigurationReconciler instance. +func NewReconciler(client client.Client, certProvider *certificate.Provider, name string) *Reconciler { + return &Reconciler{ + client: client, + certProvider: certProvider, + name: name, + } +} + +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + Named("mutating-webhook-configuration-controller"). + Watches( + &admissionregistrationv1.MutatingWebhookConfiguration{}, + NewEventHandler(), + builder.WithPredicates( + NewEventFilter(r.name), + ), + ). + WithOptions(options). + Complete(r) +} + +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger.Info("Updating CA bundle of MutatingWebhookConfiguration", "name", req.Name) + if err := r.updateMutatingWebhookConfiguration(ctx, req.NamespacedName); err != nil { + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) updateMutatingWebhookConfiguration(ctx context.Context, key types.NamespacedName) error { + webhook := &admissionregistrationv1.MutatingWebhookConfiguration{} + if err := r.client.Get(ctx, key, webhook); err != nil { + return fmt.Errorf("failed to get mutating webhook configuration %v: %v", key, err) + } + + caBundle, err := r.certProvider.CACert() + if err != nil { + return fmt.Errorf("failed to get CA certificate: %v", err) + } + + newWebhook := webhook.DeepCopy() + for i := range newWebhook.Webhooks { + newWebhook.Webhooks[i].ClientConfig.CABundle = caBundle + } + if err := r.client.Update(ctx, newWebhook); err != nil { + return fmt.Errorf("failed to update mutating webhook configuration %v: %v", key, err) + } + + return nil +} diff --git a/internal/controller/mutatingwebhookconfiguration/event_filter.go b/internal/controller/mutatingwebhookconfiguration/event_filter.go new file mode 100644 index 0000000000..64131300b6 --- /dev/null +++ b/internal/controller/mutatingwebhookconfiguration/event_filter.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutatingwebhookconfiguration + +import ( + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// EventFilter filters events for MutatingWebhookConfiguration. +type EventFilter struct { + name string +} + +func NewEventFilter(name string) *EventFilter { + return &EventFilter{ + name: name, + } +} + +// MutatingWebhookConfigurationEventFilter implements predicate.Predicate. +var _ predicate.Predicate = &EventFilter{} + +// Create implements predicate.Predicate. +func (f *EventFilter) Create(e event.CreateEvent) bool { + return e.Object.GetName() == f.name +} + +// Update implements predicate.Predicate. +func (f *EventFilter) Update(e event.UpdateEvent) bool { + return e.ObjectOld.GetName() == f.name +} + +// Delete implements predicate.Predicate. +func (f *EventFilter) Delete(event.DeleteEvent) bool { + return false +} + +// Generic implements predicate.Predicate. +func (f *EventFilter) Generic(event.GenericEvent) bool { + return false +} diff --git a/internal/controller/mutatingwebhookconfiguration/event_handler.go b/internal/controller/mutatingwebhookconfiguration/event_handler.go new file mode 100644 index 0000000000..f9c8835061 --- /dev/null +++ b/internal/controller/mutatingwebhookconfiguration/event_handler.go @@ -0,0 +1,102 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutatingwebhookconfiguration + +import ( + "context" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" +) + +// EventHandler handles MutatingWebhookConfiguration events. +type EventHandler struct{} + +var _ handler.EventHandler = &EventHandler{} + +// NewEventHandler creates a new MutatingWebhookConfigurationEventHandler instance. +func NewEventHandler() *EventHandler { + return &EventHandler{} +} + +// Create implements handler.EventHandler. +func (h *EventHandler) Create(ctx context.Context, event event.CreateEvent, queue workqueue.RateLimitingInterface) { + mwc, ok := event.Object.(*admissionregistrationv1.MutatingWebhookConfiguration) + if !ok { + return + } + logger.Info("MutatingWebhookConfiguration created", "name", mwc.Name) + key := types.NamespacedName{ + Namespace: mwc.Namespace, + Name: mwc.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// Update implements handler.EventHandler. +func (h *EventHandler) Update(ctx context.Context, event event.UpdateEvent, queue workqueue.RateLimitingInterface) { + oldWebhook, ok := event.ObjectOld.(*admissionregistrationv1.MutatingWebhookConfiguration) + if !ok { + return + } + newWebhook, ok := event.ObjectNew.(*admissionregistrationv1.MutatingWebhookConfiguration) + if !ok { + return + } + if newWebhook.ResourceVersion == oldWebhook.ResourceVersion { + return + } + + logger.Info("MutatingWebhookConfiguration updated", "name", newWebhook.Name, "namespace", newWebhook.Namespace) + key := types.NamespacedName{ + Namespace: newWebhook.Namespace, + Name: newWebhook.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// Delete implements handler.EventHandler. +func (h *EventHandler) Delete(ctx context.Context, event event.DeleteEvent, queue workqueue.RateLimitingInterface) { + mwc, ok := event.Object.(*admissionregistrationv1.MutatingWebhookConfiguration) + if !ok { + return + } + logger.Info("MutatingWebhookConfiguration deleted", "name", mwc.Name, "namespace", mwc.Namespace) + key := types.NamespacedName{ + Namespace: mwc.Namespace, + Name: mwc.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// Generic implements handler.EventHandler. +func (h *EventHandler) Generic(ctx context.Context, event event.GenericEvent, queue workqueue.RateLimitingInterface) { + mwc, ok := event.Object.(*admissionregistrationv1.MutatingWebhookConfiguration) + if !ok { + return + } + logger.Info("MutatingWebhookConfiguration generic event", "name", mwc.Name, "namespace", mwc.Namespace) + key := types.NamespacedName{ + Namespace: mwc.Namespace, + Name: mwc.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} diff --git a/internal/controller/scheduledsparkapplication/controller.go b/internal/controller/scheduledsparkapplication/controller.go new file mode 100644 index 0000000000..c8abb3af60 --- /dev/null +++ b/internal/controller/scheduledsparkapplication/controller.go @@ -0,0 +1,377 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduledsparkapplication + +import ( + "context" + "fmt" + "reflect" + "sort" + "time" + + "github.com/robfig/cron/v3" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/utils/clock" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var ( + logger = log.Log.WithName("") +) + +type Options struct { + Namespaces []string +} + +// Reconciler reconciles a ScheduledSparkApplication object +type Reconciler struct { + scheme *runtime.Scheme + client client.Client + recorder record.EventRecorder + clock clock.Clock + options Options +} + +var _ reconcile.Reconciler = &Reconciler{} + +func NewReconciler( + scheme *runtime.Scheme, + client client.Client, + recorder record.EventRecorder, + clock clock.Clock, + options Options, +) *Reconciler { + return &Reconciler{ + scheme: scheme, + client: client, + recorder: recorder, + clock: clock, + options: options, + } +} + +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=scheduledsparkapplications,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=scheduledsparkapplications/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=scheduledsparkapplications/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the ScheduledSparkApplication object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.2/pkg/reconcile +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + oldScheduledApp, err := r.getScheduledSparkApplication(ctx, key) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{Requeue: true}, err + } + scheduledApp := oldScheduledApp.DeepCopy() + logger.Info("Reconciling ScheduledSparkApplication", "name", scheduledApp.Name, "namespace", scheduledApp.Namespace, "state", scheduledApp.Status.ScheduleState) + + if scheduledApp.Spec.Suspend != nil && *scheduledApp.Spec.Suspend { + return ctrl.Result{}, nil + } + + schedule, parseErr := cron.ParseStandard(scheduledApp.Spec.Schedule) + if parseErr != nil { + logger.Error(err, "Failed to parse schedule of ScheduledSparkApplication", "name", scheduledApp.Name, "namespace", scheduledApp.Namespace, "schedule", scheduledApp.Spec.Schedule) + scheduledApp.Status.ScheduleState = v1beta2.ScheduleStateFailedValidation + scheduledApp.Status.Reason = parseErr.Error() + if updateErr := r.updateScheduledSparkApplicationStatus(ctx, scheduledApp); updateErr != nil { + return ctrl.Result{Requeue: true}, updateErr + } + return ctrl.Result{}, nil + } + + switch scheduledApp.Status.ScheduleState { + case v1beta2.ScheduleStateNew: + now := r.clock.Now() + oldNextRunTime := scheduledApp.Status.NextRun.Time + nextRunTime := schedule.Next(now) + if oldNextRunTime.IsZero() || nextRunTime.Before(oldNextRunTime) { + scheduledApp.Status.NextRun = metav1.NewTime(nextRunTime) + } + scheduledApp.Status.ScheduleState = v1beta2.ScheduleStateScheduled + if err := r.updateScheduledSparkApplicationStatus(ctx, scheduledApp); err != nil { + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{RequeueAfter: nextRunTime.Sub(now)}, err + case v1beta2.ScheduleStateScheduled: + now := r.clock.Now() + nextRunTime := scheduledApp.Status.NextRun + if nextRunTime.IsZero() { + scheduledApp.Status.NextRun = metav1.NewTime(schedule.Next(now)) + if err := r.updateScheduledSparkApplicationStatus(ctx, scheduledApp); err != nil { + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{RequeueAfter: schedule.Next(now).Sub(now)}, nil + } + + if nextRunTime.Time.After(now) { + return ctrl.Result{RequeueAfter: nextRunTime.Time.Sub(now)}, nil + } + + ok, err := r.shouldStartNextRun(scheduledApp) + if err != nil { + return ctrl.Result{Requeue: true}, err + } + if !ok { + return ctrl.Result{RequeueAfter: schedule.Next(now).Sub(now)}, nil + } + + logger.Info("Next run of ScheduledSparkApplication is due", "name", scheduledApp.Name, "namespace", scheduledApp.Namespace) + app, err := r.startNextRun(scheduledApp, now) + if err != nil { + logger.Error(err, "Failed to start next run for ScheduledSparkApplication", "name", scheduledApp.Name, "namespace", scheduledApp.Namespace) + return ctrl.Result{RequeueAfter: schedule.Next(now).Sub(now)}, err + } + + scheduledApp.Status.LastRun = metav1.NewTime(now) + scheduledApp.Status.LastRunName = app.Name + scheduledApp.Status.NextRun = metav1.NewTime(schedule.Next(now)) + if err = r.checkAndUpdatePastRuns(ctx, scheduledApp); err != nil { + return ctrl.Result{Requeue: true}, err + } + if err := r.updateScheduledSparkApplicationStatus(ctx, scheduledApp); err != nil { + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{RequeueAfter: schedule.Next(now).Sub(now)}, nil + case v1beta2.ScheduleStateFailedValidation: + return ctrl.Result{}, nil + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + Named("scheduled-spark-application-controller"). + Watches( + &v1beta2.ScheduledSparkApplication{}, + NewEventHandler(), + builder.WithPredicates( + NewEventFilter(r.options.Namespaces), + )). + WithOptions(options). + Complete(r) +} + +func (r *Reconciler) getScheduledSparkApplication(ctx context.Context, key types.NamespacedName) (*v1beta2.ScheduledSparkApplication, error) { + app := &v1beta2.ScheduledSparkApplication{} + if err := r.client.Get(ctx, key, app); err != nil { + return nil, err + } + return app, nil +} + +func (r *Reconciler) createSparkApplication( + scheduledApp *v1beta2.ScheduledSparkApplication, + t time.Time, +) (*v1beta2.SparkApplication, error) { + labels := map[string]string{ + common.LabelScheduledSparkAppName: scheduledApp.Name, + } + for key, value := range scheduledApp.Labels { + labels[key] = value + } + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%d", scheduledApp.Name, t.UnixNano()), + Namespace: scheduledApp.Namespace, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: v1beta2.SchemeGroupVersion.String(), + Kind: reflect.TypeOf(v1beta2.ScheduledSparkApplication{}).Name(), + Name: scheduledApp.Name, + UID: scheduledApp.UID, + BlockOwnerDeletion: util.BoolPtr(true), + }}, + }, + Spec: scheduledApp.Spec.Template, + } + if err := r.client.Create(context.TODO(), app); err != nil { + return nil, err + } + return app, nil +} + +// shouldStartNextRun checks if the next run should be started. +func (r *Reconciler) shouldStartNextRun(scheduledApp *v1beta2.ScheduledSparkApplication) (bool, error) { + apps, err := r.listSparkApplications(scheduledApp) + if err != nil { + return false, err + } + if len(apps) == 0 { + return true, nil + } + + sortSparkApplicationsInPlace(apps) + // The last run (most recently started) is the first one in the sorted slice. + lastRun := apps[0] + switch scheduledApp.Spec.ConcurrencyPolicy { + case v1beta2.ConcurrencyAllow: + return true, nil + case v1beta2.ConcurrencyForbid: + return r.hasLastRunFinished(lastRun), nil + case v1beta2.ConcurrencyReplace: + if err := r.killLastRunIfNotFinished(lastRun); err != nil { + return false, err + } + return true, nil + } + return false, nil +} + +func (r *Reconciler) startNextRun(scheduledApp *v1beta2.ScheduledSparkApplication, now time.Time) (*v1beta2.SparkApplication, error) { + app, err := r.createSparkApplication(scheduledApp, now) + if err != nil { + return nil, err + } + return app, nil +} + +func (r *Reconciler) hasLastRunFinished(app *v1beta2.SparkApplication) bool { + return app.Status.AppState.State == v1beta2.ApplicationStateCompleted || + app.Status.AppState.State == v1beta2.ApplicationStateFailed +} + +func (r *Reconciler) killLastRunIfNotFinished(app *v1beta2.SparkApplication) error { + finished := r.hasLastRunFinished(app) + if finished { + return nil + } + + // Delete the SparkApplication object of the last run. + if err := r.client.Delete(context.TODO(), app, client.GracePeriodSeconds(0)); err != nil { + return err + } + + return nil +} + +func (r *Reconciler) checkAndUpdatePastRuns(ctx context.Context, scheduledApp *v1beta2.ScheduledSparkApplication) error { + apps, err := r.listSparkApplications(scheduledApp) + if err != nil { + return err + } + + var completedApps []*v1beta2.SparkApplication + var failedApps []*v1beta2.SparkApplication + for _, app := range apps { + if app.Status.AppState.State == v1beta2.ApplicationStateCompleted { + completedApps = append(completedApps, app) + } else if app.Status.AppState.State == v1beta2.ApplicationStateFailed { + failedApps = append(failedApps, app) + } + } + + historyLimit := 1 + if scheduledApp.Spec.SuccessfulRunHistoryLimit != nil { + historyLimit = int(*scheduledApp.Spec.SuccessfulRunHistoryLimit) + } + + toKeep, toDelete := bookkeepPastRuns(completedApps, historyLimit) + scheduledApp.Status.PastSuccessfulRunNames = []string{} + for _, app := range toKeep { + scheduledApp.Status.PastSuccessfulRunNames = append(scheduledApp.Status.PastSuccessfulRunNames, app.Name) + } + for _, app := range toDelete { + if err := r.client.Delete(ctx, app, client.GracePeriodSeconds(0)); err != nil { + return err + } + } + + historyLimit = 1 + if scheduledApp.Spec.FailedRunHistoryLimit != nil { + historyLimit = int(*scheduledApp.Spec.FailedRunHistoryLimit) + } + toKeep, toDelete = bookkeepPastRuns(failedApps, historyLimit) + scheduledApp.Status.PastFailedRunNames = []string{} + for _, app := range toKeep { + scheduledApp.Status.PastFailedRunNames = append(scheduledApp.Status.PastFailedRunNames, app.Name) + } + for _, app := range toDelete { + if err := r.client.Delete(ctx, app, client.GracePeriodSeconds(0)); err != nil { + return err + } + } + + return nil +} + +func (r *Reconciler) updateScheduledSparkApplicationStatus(ctx context.Context, scheduledApp *v1beta2.ScheduledSparkApplication) error { + // logger.Info("Updating SchedulingSparkApplication", "name", scheduledApp.Name, "namespace", scheduledApp.Namespace, "status", scheduledApp.Status) + if err := r.client.Status().Update(ctx, scheduledApp); err != nil { + return fmt.Errorf("failed to update ScheduledSparkApplication status: %v", err) + } + + return nil +} + +// listSparkApplications lists SparkApplications that are owned by the given ScheduledSparkApplication and sort them by decreasing order of creation timestamp. +func (r *Reconciler) listSparkApplications(app *v1beta2.ScheduledSparkApplication) ([]*v1beta2.SparkApplication, error) { + set := labels.Set{common.LabelScheduledSparkAppName: app.Name} + appList := &v1beta2.SparkApplicationList{} + if err := r.client.List(context.TODO(), appList, client.InNamespace(app.Namespace), client.MatchingLabels(set)); err != nil { + return nil, fmt.Errorf("failed to list SparkApplications: %v", err) + } + apps := []*v1beta2.SparkApplication{} + for _, item := range appList.Items { + apps = append(apps, &item) + } + return apps, nil +} + +// sortSparkApplicationsInPlace sorts the given slice of SparkApplication in place by the decreasing order of creation timestamp. +func sortSparkApplicationsInPlace(apps []*v1beta2.SparkApplication) { + sort.Slice(apps, func(i, j int) bool { + return apps[i].CreationTimestamp.After(apps[j].CreationTimestamp.Time) + }) +} + +// bookkeepPastRuns bookkeeps the past runs of the given SparkApplication slice. +func bookkeepPastRuns(apps []*v1beta2.SparkApplication, limit int) ([]*v1beta2.SparkApplication, []*v1beta2.SparkApplication) { + if len(apps) <= limit { + return apps, nil + } + sortSparkApplicationsInPlace(apps) + toKeep := apps[:limit] + toDelete := apps[limit:] + return toKeep, toDelete +} diff --git a/internal/controller/scheduledsparkapplication/controller_test.go b/internal/controller/scheduledsparkapplication/controller_test.go new file mode 100644 index 0000000000..fd95d302d6 --- /dev/null +++ b/internal/controller/scheduledsparkapplication/controller_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduledsparkapplication + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/clock" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubeflow/spark-operator/api/v1beta2" +) + +var _ = Describe("ScheduledSparkApplication Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + scheduledsparkapplication := &v1beta2.ScheduledSparkApplication{} + + BeforeEach(func() { + By("creating the custom resource for the Kind ScheduledSparkApplication") + err := k8sClient.Get(ctx, typeNamespacedName, scheduledsparkapplication) + if err != nil && errors.IsNotFound(err) { + resource := &v1beta2.ScheduledSparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: v1beta2.ScheduledSparkApplicationSpec{ + Schedule: "@every 1m", + ConcurrencyPolicy: v1beta2.ConcurrencyAllow, + Template: v1beta2.SparkApplicationSpec{ + Type: v1beta2.SparkApplicationTypeScala, + Mode: v1beta2.DeployModeCluster, + RestartPolicy: v1beta2.RestartPolicy{ + Type: v1beta2.RestartPolicyNever, + }, + }, + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &v1beta2.ScheduledSparkApplication{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance ScheduledSparkApplication") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + reconciler := NewReconciler(k8sClient.Scheme(), k8sClient, nil, clock.RealClock{}, Options{Namespaces: []string{"default"}}) + _, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: typeNamespacedName}) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/scheduledsparkapplication/event_filter.go b/internal/controller/scheduledsparkapplication/event_filter.go new file mode 100644 index 0000000000..e6ea5487be --- /dev/null +++ b/internal/controller/scheduledsparkapplication/event_filter.go @@ -0,0 +1,81 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduledsparkapplication + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/kubeflow/spark-operator/api/v1beta2" +) + +// EventFilter filters out ScheduledSparkApplication events. +type EventFilter struct { + namespaces map[string]bool +} + +// EventHandler handles ScheduledSparkApplication events. +var _ predicate.Predicate = &EventFilter{} + +// NewEventFilter creates a new EventFilter instance. +func NewEventFilter(namespaces []string) *EventFilter { + nsMap := make(map[string]bool) + for _, ns := range namespaces { + nsMap[ns] = true + } + return &EventFilter{ + namespaces: nsMap, + } +} + +// Create implements predicate.Predicate. +func (f *EventFilter) Create(e event.CreateEvent) bool { + app, ok := e.Object.(*v1beta2.ScheduledSparkApplication) + if !ok { + return false + } + return f.filter(app) +} + +// Update implements predicate.Predicate. +func (f *EventFilter) Update(e event.UpdateEvent) bool { + newApp, ok := e.ObjectNew.(*v1beta2.ScheduledSparkApplication) + if !ok { + return false + } + + return f.filter(newApp) +} + +// Delete implements predicate.Predicate. +func (f *EventFilter) Delete(_ event.DeleteEvent) bool { + return false +} + +// Generic implements predicate.Predicate. +func (f *EventFilter) Generic(e event.GenericEvent) bool { + app, ok := e.Object.(*v1beta2.ScheduledSparkApplication) + if !ok { + return false + } + return f.filter(app) +} + +func (f *EventFilter) filter(app *v1beta2.ScheduledSparkApplication) bool { + return f.namespaces[metav1.NamespaceAll] || f.namespaces[app.Namespace] +} diff --git a/internal/controller/scheduledsparkapplication/event_handler.go b/internal/controller/scheduledsparkapplication/event_handler.go new file mode 100644 index 0000000000..92127ac01b --- /dev/null +++ b/internal/controller/scheduledsparkapplication/event_handler.go @@ -0,0 +1,85 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduledsparkapplication + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + + "github.com/kubeflow/spark-operator/api/v1beta2" +) + +// EventHandler handles events for ScheduledSparkApplication. +type EventHandler struct { +} + +// EventHandler implements handler.EventHandler. +var _ handler.EventHandler = &EventHandler{} + +// NewEventHandler creates a new EventHandler instance +func NewEventHandler() *EventHandler { + return &EventHandler{} +} + +// Create implements handler.EventHandler. +func (s *EventHandler) Create(ctx context.Context, event event.CreateEvent, queue workqueue.RateLimitingInterface) { + app, ok := event.Object.(*v1beta2.ScheduledSparkApplication) + if !ok { + return + } + + logger.V(1).Info("ScheduledSparkApplication created", "name", app.Name, "namespace", app.Namespace) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}}) +} + +// Update implements handler.EventHandler. +func (s *EventHandler) Update(ctx context.Context, event event.UpdateEvent, queue workqueue.RateLimitingInterface) { + oldApp, ok := event.ObjectOld.(*v1beta2.ScheduledSparkApplication) + if !ok { + return + } + + logger.V(1).Info("ScheduledSparkApplication updated", "name", oldApp.Name, "namespace", oldApp.Namespace) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: oldApp.Name, Namespace: oldApp.Namespace}}) +} + +// Delete implements handler.EventHandler. +func (s *EventHandler) Delete(ctx context.Context, event event.DeleteEvent, queue workqueue.RateLimitingInterface) { + app, ok := event.Object.(*v1beta2.ScheduledSparkApplication) + if !ok { + return + } + + logger.V(1).Info("ScheduledSparkApplication deleted", "name", app.Name, "namespace", app.Namespace) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}}) +} + +// Generic implements handler.EventHandler. +func (s *EventHandler) Generic(ctx context.Context, event event.GenericEvent, queue workqueue.RateLimitingInterface) { + app, ok := event.Object.(*v1beta2.ScheduledSparkApplication) + if !ok { + return + } + + logger.V(1).Info("ScheduledSparkApplication generic event", "name", app.Name, "namespace", app.Namespace) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}}) +} diff --git a/internal/controller/scheduledsparkapplication/suite_test.go b/internal/controller/scheduledsparkapplication/suite_test.go new file mode 100644 index 0000000000..2a98ffa909 --- /dev/null +++ b/internal/controller/scheduledsparkapplication/suite_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduledsparkapplication + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "..", "bin", "k8s", + fmt.Sprintf("1.29.3-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = v1beta2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = v1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/internal/controller/sparkapplication/controller.go b/internal/controller/sparkapplication/controller.go new file mode 100644 index 0000000000..753108a90e --- /dev/null +++ b/internal/controller/sparkapplication/controller.go @@ -0,0 +1,1217 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "context" + "fmt" + "time" + + "github.com/golang/glog" + "github.com/google/uuid" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/retry" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/internal/metrics" + "github.com/kubeflow/spark-operator/internal/scheduler" + "github.com/kubeflow/spark-operator/internal/scheduler/volcano" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var ( + logger = log.Log.WithName("") +) + +// Options defines the options of the controller. +type Options struct { + Namespaces []string + EnableUIService bool + IngressClassName string + IngressURLFormat string + + SparkApplicationMetrics *metrics.SparkApplicationMetrics + SparkExecutorMetrics *metrics.SparkExecutorMetrics +} + +// Reconciler reconciles a SparkApplication object. +type Reconciler struct { + manager ctrl.Manager + scheme *runtime.Scheme + client client.Client + recorder record.EventRecorder + options Options + registry *scheduler.Registry +} + +// Reconciler implements reconcile.Reconciler. +var _ reconcile.Reconciler = &Reconciler{} + +// NewReconciler creates a new Reconciler instance. +func NewReconciler( + manager ctrl.Manager, + scheme *runtime.Scheme, + client client.Client, + recorder record.EventRecorder, + registry *scheduler.Registry, + options Options, +) *Reconciler { + return &Reconciler{ + manager: manager, + scheme: scheme, + client: client, + recorder: recorder, + registry: registry, + options: options, + } +} + +// +kubebuilder:rbac:groups=,resources=pods,verbs=get;list;watch;create;update;patch;delete;deletecollection +// +kubebuilder:rbac:groups=,resources=configmaps,verbs=get;list;create;update;patch;delete +// +kubebuilder:rbac:groups=,resources=services,verbs=get;create;delete +// +kubebuilder:rbac:groups=,resources=nodes,verbs=get +// +kubebuilder:rbac:groups=,resources=events,verbs=create;update;patch +// +kubebuilder:rbac:groups=,resources=resourcequotas,verbs=get;list;watch +// +kubebuilder:rbac:groups=extensions,resources=ingresses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=sparkapplications,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=sparkapplications/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=sparkoperator.k8s.io,resources=sparkapplications/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the SparkApplication object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.2/pkg/reconcile + +// Reconcile handles Create, Update and Delete events of the custom resource. +// State Machine for SparkApplication: +// +--------------------------------------------------------------------------------------------------------------------+ +// | +---------------------------------------------------------------------------------------------+ | +// | | +----------+ | | +// | | | | | | +// | | | | | | +// | | |Submission| | | +// | | +----> Failed +----+------------------------------------------------------------------+ | | +// | | | | | | | | | +// | | | | | | | | | +// | | | +----^-----+ | +-----------------------------------------+ | | | +// | | | | | | | | | | +// | | | | | | | | | | +// | +-+--+----+ | +-----v--+-+ +----------+ +-----v-----+ +----v--v--+ | +// | | | | | | | | | | | | | +// | | | | | | | | | | | | | +// | | New +---------> Submitted+----------> Running +-----------> Failing +----------> Failed | | +// | | | | | | | | | | | | | +// | | | | | | | | | | | | | +// | | | | | | | | | | | | | +// | +---------+ | +----^-----+ +-----+----+ +-----+-----+ +----------+ | +// | | | | | | +// | | | | | | +// | +------------+ | | +-------------------------------+ | +// | | | | +-----+-----+ | | +-----------+ +----------+ | +// | | | | | Pending | | | | | | | | +// | | | +---+ Rerun <-------+ +---------------->Succeeding +---------->Completed | | +// | |Invalidating| | <-------+ | | | | | +// | | +-------> | | | | | | | +// | | | | | | | | | | | +// | | | +-----------+ | +-----+-----+ +----------+ | +// | +------------+ | | | +// | | | | +// | +-------------------------------+ | +// | | +// +--------------------------------------------------------------------------------------------------------------------+ +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + app, err := r.getSparkApplication(key) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{Requeue: true}, err + } + logger.Info("Reconciling SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + + // Check if the spark application is being deleted + if !app.DeletionTimestamp.IsZero() { + return r.handleSparkApplicationDeletion(ctx, req) + } + switch app.Status.AppState.State { + case v1beta2.ApplicationStateNew: + return r.reconcileNewSparkApplication(ctx, req) + case v1beta2.ApplicationStateSubmitted: + return r.reconcileSubmittedSparkApplication(ctx, req) + case v1beta2.ApplicationStateFailedSubmission: + return r.reconcileFailedSubmissionSparkApplication(ctx, req) + case v1beta2.ApplicationStateRunning: + return r.reconcileRunningSparkApplication(ctx, req) + case v1beta2.ApplicationStatePendingRerun: + return r.reconcilePendingRerunSparkApplication(ctx, req) + case v1beta2.ApplicationStateInvalidating: + return r.reconcileInvalidatingSparkApplication(ctx, req) + case v1beta2.ApplicationStateSucceeding: + return r.reconcileSucceedingSparkApplication(ctx, req) + case v1beta2.ApplicationStateFailing: + return r.reconcileFailingSparkApplication(ctx, req) + case v1beta2.ApplicationStateCompleted: + return r.reconcileCompletedSparkApplication(ctx, req) + case v1beta2.ApplicationStateFailed: + return r.reconcileFailedSparkApplication(ctx, req) + case v1beta2.ApplicationStateUnknown: + return r.reconcileUnknownSparkApplication(ctx, req) + } + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + Named("spark-application-controller"). + Watches( + &corev1.Pod{}, + NewSparkPodEventHandler(mgr.GetClient(), r.options.SparkExecutorMetrics), + builder.WithPredicates(newSparkPodEventFilter(r.options.Namespaces)), + ). + Watches( + &v1beta2.SparkApplication{}, + NewSparkApplicationEventHandler(r.options.SparkApplicationMetrics), + builder.WithPredicates( + NewSparkApplicationEventFilter( + mgr.GetClient(), + mgr.GetEventRecorderFor("spark-application-event-handler"), + r.options.Namespaces, + ), + ), + ). + WithOptions(options). + Complete(r) +} + +func (r *Reconciler) handleSparkApplicationDeletion(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + app, err := r.getSparkApplication(key) + if err != nil { + if errors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{Requeue: true}, err + } + + if err := r.deleteSparkResources(ctx, app); err != nil { + logger.Error(err, "Failed to delete resources associated with SparkApplication", "name", app.Name, "namespace", app.Namespace) + return ctrl.Result{Requeue: true}, err + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileNewSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateNew { + return nil + } + app := old.DeepCopy() + + if err := r.submitSparkApplication(app); err != nil { + logger.Error(err, "Failed to submit SparkApplication", "name", app.Name, "namespace", app.Namespace) + app.Status = v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.ApplicationStateFailedSubmission, + ErrorMessage: err.Error(), + }, + SubmissionAttempts: app.Status.SubmissionAttempts + 1, + LastSubmissionAttemptTime: metav1.Now(), + } + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{Requeue: true}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileSubmittedSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateSubmitted { + return nil + } + app := old.DeepCopy() + + if err := r.updateSparkApplicationState(ctx, app); err != nil { + return err + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileFailedSubmissionSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateFailedSubmission { + return nil + } + app := old.DeepCopy() + + if util.ShouldRetry(app) { + if isNextRetryDue(app) { + if r.validateSparkResourceDeletion(ctx, app) { + _ = r.submitSparkApplication(app) + } else { + if err := r.deleteSparkResources(ctx, app); err != nil { + logger.Error(err, "failed to delete resources associated with SparkApplication", "name", app.Name, "namespace", app.Namespace) + } + return err + } + } + } else { + app.Status.AppState.State = v1beta2.ApplicationStateFailed + app.Status.TerminationTime = metav1.Now() + r.recordSparkApplicationEvent(app) + } + + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileRunningSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateRunning { + return nil + } + app := old.DeepCopy() + + if err := r.updateSparkApplicationState(ctx, app); err != nil { + return err + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcilePendingRerunSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStatePendingRerun { + return nil + } + app := old.DeepCopy() + + logger.Info("Pending rerun SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + if r.validateSparkResourceDeletion(ctx, app) { + logger.Info("Successfully deleted resources associated with SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + r.recordSparkApplicationEvent(app) + r.resetSparkApplicationStatus(app) + if err = r.submitSparkApplication(app); err != nil { + logger.Error(err, "Failed to run spark-submit", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + } + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileInvalidatingSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateInvalidating { + return nil + } + app := old.DeepCopy() + + // Invalidate the current run and enqueue the SparkApplication for re-execution. + if err := r.deleteSparkResources(ctx, app); err != nil { + logger.Error(err, "Failed to delete resources associated with SparkApplication", "name", app.Name, "namespace", app.Namespace) + } else { + r.resetSparkApplicationStatus(app) + app.Status.AppState.State = v1beta2.ApplicationStatePendingRerun + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileSucceedingSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateSucceeding { + return nil + } + app := old.DeepCopy() + + if util.ShouldRetry(app) { + if err := r.deleteSparkResources(ctx, app); err != nil { + logger.Error(err, "failed to delete spark resources", "name", app.Name, "namespace", app.Namespace) + return err + } + app.Status.AppState.State = v1beta2.ApplicationStatePendingRerun + } else { + app.Status.AppState.State = v1beta2.ApplicationStateCompleted + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileFailingSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateFailing { + return nil + } + app := old.DeepCopy() + + if util.ShouldRetry(app) { + if isNextRetryDue(app) { + if err := r.deleteSparkResources(ctx, app); err != nil { + logger.Error(err, "failed to delete spark resources", "name", app.Name, "namespace", app.Namespace) + return err + } + app.Status.AppState.State = v1beta2.ApplicationStatePendingRerun + } + } else { + app.Status.AppState.State = v1beta2.ApplicationStateFailed + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileCompletedSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateCompleted { + return nil + } + app := old.DeepCopy() + + if util.IsExpired(app) { + logger.Info("Deleting expired SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + if err := r.client.Delete(ctx, app); err != nil { + return err + } + return nil + } + if err := r.updateExecutorState(ctx, app); err != nil { + return err + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + if err := r.cleanUpOnTermination(old, app); err != nil { + logger.Error(err, "Failed to clean up resources for SparkApplication", "name", old.Name, "namespace", old.Namespace, "state", old.Status.AppState.State) + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileFailedSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateFailed { + return nil + } + app := old.DeepCopy() + + if util.IsExpired(app) { + logger.Info("Deleting expired SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + if err := r.client.Delete(ctx, app); err != nil { + return err + } + return nil + } + if err := r.updateExecutorState(ctx, app); err != nil { + return err + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + if err := r.cleanUpOnTermination(old, app); err != nil { + logger.Error(err, "Failed to clean up resources for SparkApplication", "name", old.Name, "namespace", old.Namespace, "state", old.Status.AppState.State) + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) reconcileUnknownSparkApplication(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + key := req.NamespacedName + retryErr := retry.RetryOnConflict( + retry.DefaultRetry, + func() error { + old, err := r.getSparkApplication(key) + if err != nil { + return err + } + if old.Status.AppState.State != v1beta2.ApplicationStateUnknown { + return nil + } + app := old.DeepCopy() + + if err := r.updateSparkApplicationState(ctx, app); err != nil { + return err + } + if err := r.updateSparkApplicationStatus(ctx, app); err != nil { + return err + } + return nil + }, + ) + if retryErr != nil { + logger.Error(retryErr, "Failed to reconcile SparkApplication", "name", key.Name, "namespace", key.Namespace) + return ctrl.Result{}, retryErr + } + return ctrl.Result{}, nil +} + +// getSparkApplication gets the SparkApplication with the given name and namespace. +func (r *Reconciler) getSparkApplication(key types.NamespacedName) (*v1beta2.SparkApplication, error) { + app := &v1beta2.SparkApplication{} + if err := r.client.Get(context.TODO(), key, app); err != nil { + return nil, err + } + return app, nil +} + +// submitSparkApplication creates a new submission for the given SparkApplication and submits it using spark-submit. +func (r *Reconciler) submitSparkApplication(app *v1beta2.SparkApplication) error { + logger.Info("Submitting SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + + if util.PrometheusMonitoringEnabled(app) { + logger.Info("Configure Prometheus monitoring for SparkApplication", "name", app.Name, "namespace", app.Namespace) + if err := configPrometheusMonitoring(app, r.client); err != nil { + return fmt.Errorf("failed to configure Prometheus monitoring: %v", err) + } + } + + // Use batch scheduler to perform scheduling task before submitting (before build command arguments). + if needScheduling, scheduler := r.shouldDoBatchScheduling(app); needScheduling { + logger.Info("Do batch scheduling for SparkApplication", "name", app.Name, "namespace", app.Namespace) + if err := scheduler.Schedule(app); err != nil { + return fmt.Errorf("failed to process batch scheduler: %v", err) + } + } + + // Create web UI service for spark applications if enabled. + if r.options.EnableUIService { + service, err := r.createWebUIService(app) + if err != nil { + return fmt.Errorf("failed to create web UI service") + } + app.Status.DriverInfo.WebUIServiceName = service.serviceName + app.Status.DriverInfo.WebUIPort = service.servicePort + app.Status.DriverInfo.WebUIAddress = fmt.Sprintf("%s:%d", service.serviceIP, app.Status.DriverInfo.WebUIPort) + logger.Info("Created web UI service for SparkApplication", "name", app.Name, "namespace", app.Namespace) + + // Create UI Ingress if ingress-format is set. + if r.options.IngressURLFormat != "" { + // We are going to want to use an ingress url. + ingressURL, err := getDriverIngressURL(r.options.IngressURLFormat, app.Name, app.Namespace) + if err != nil { + return fmt.Errorf("failed to get ingress url: %v", err) + } + // need to ensure the spark.ui variables are configured correctly if a subPath is used. + if ingressURL.Path != "" { + if app.Spec.SparkConf == nil { + app.Spec.SparkConf = make(map[string]string) + } + app.Spec.SparkConf[common.SparkUIProxyBase] = ingressURL.Path + app.Spec.SparkConf[common.SparkUIProxyRedirectURI] = "/" + } + ingress, err := r.createWebUIIngress(app, *service, ingressURL, r.options.IngressClassName) + if err != nil { + return fmt.Errorf("failed to create web UI service") + } + app.Status.DriverInfo.WebUIIngressAddress = ingress.ingressURL.String() + app.Status.DriverInfo.WebUIIngressName = ingress.ingressName + logger.Info("Created web UI ingress for SparkApplication", "name", app.Name, "namespace", app.Namespace) + } + } + + for _, driverIngressConfiguration := range app.Spec.DriverIngressOptions { + logger.Info("Creating driver ingress service for SparkApplication", "name", app.Name, "namespace", app.Namespace) + service, err := r.createDriverIngressServiceFromConfiguration(app, &driverIngressConfiguration) + if err != nil { + return fmt.Errorf("failed to create driver ingress service for SparkApplication: %v", err) + } + // Create ingress if ingress-format is set. + if driverIngressConfiguration.IngressURLFormat != "" { + // We are going to want to use an ingress url. + ingressURL, err := getDriverIngressURL(driverIngressConfiguration.IngressURLFormat, app.Name, app.Namespace) + if err != nil { + return fmt.Errorf("failed to get driver ingress url: %v", err) + } + ingress, err := r.createDriverIngress(app, &driverIngressConfiguration, *service, ingressURL, r.options.IngressClassName) + if err != nil { + return fmt.Errorf("failed to create driver ingress: %v", err) + } + logger.V(1).Info("Created driver ingress for SparkApplication", "name", app.Name, "namespace", app.Namespace, "ingressName", ingress.ingressName, "ingressURL", ingress.ingressURL) + } + } + + driverPodName := util.GetDriverPodName(app) + app.Status.DriverInfo.PodName = driverPodName + app.Status.SubmissionID = uuid.New().String() + sparkSubmitArgs, err := buildSparkSubmitArgs(app) + if err != nil { + return fmt.Errorf("failed to build spark-submit arguments: %v", err) + } + + // Try submitting the application by running spark-submit. + logger.Info("Running spark-submit for SparkApplication", "name", app.Name, "namespace", app.Namespace, "arguments", sparkSubmitArgs) + submitted, err := runSparkSubmit(newSubmission(sparkSubmitArgs, app)) + if err != nil { + r.recordSparkApplicationEvent(app) + return fmt.Errorf("failed to run spark-submit: %v", err) + } + if !submitted { + // The application may not have been submitted even if err == nil, e.g., when some + // state update caused an attempt to re-submit the application, in which case no + // error gets returned from runSparkSubmit. If this is the case, we simply return. + return nil + } + + app.Status.AppState = v1beta2.ApplicationState{ + State: v1beta2.ApplicationStateSubmitted, + } + app.Status.SubmissionAttempts = app.Status.SubmissionAttempts + 1 + app.Status.ExecutionAttempts = app.Status.ExecutionAttempts + 1 + app.Status.LastSubmissionAttemptTime = metav1.Now() + r.recordSparkApplicationEvent(app) + return nil +} + +// Helper func to determine if the next retry the SparkApplication is due now. +func isNextRetryDue(app *v1beta2.SparkApplication) bool { + retryInterval := app.Spec.RestartPolicy.OnFailureRetryInterval + attemptsDone := app.Status.SubmissionAttempts + lastEventTime := app.Status.LastSubmissionAttemptTime + if retryInterval == nil || lastEventTime.IsZero() || attemptsDone <= 0 { + return false + } + + // Retry if we have waited at-least equal to attempts*RetryInterval since we do a linear back-off. + interval := time.Duration(*retryInterval) * time.Second * time.Duration(attemptsDone) + currentTime := time.Now() + logger.Info(fmt.Sprintf("currentTime is %v, interval is %v", currentTime, interval)) + return currentTime.After(lastEventTime.Add(interval)) +} + +// updateDriverState finds the driver pod of the application +// and updates the driver state based on the current phase of the pod. +func (r *Reconciler) updateDriverState(_ context.Context, app *v1beta2.SparkApplication) error { + // Either the driver pod doesn't exist yet or its name has not been updated. + if app.Status.DriverInfo.PodName == "" { + return fmt.Errorf("empty driver pod name with application state %s", app.Status.AppState.State) + } + + driverPod, err := r.getDriverPod(app) + if err != nil { + return err + } + + if driverPod == nil { + app.Status.AppState.State = v1beta2.ApplicationStateFailing + app.Status.AppState.ErrorMessage = "driver pod not found" + app.Status.TerminationTime = metav1.Now() + return nil + } + + app.Status.SparkApplicationID = util.GetSparkApplicationID(driverPod) + driverState := util.GetDriverState(driverPod) + if util.IsDriverTerminated(driverState) { + if app.Status.TerminationTime.IsZero() { + app.Status.TerminationTime = metav1.Now() + } + if driverState == v1beta2.DriverStateFailed { + if state := util.GetDriverContainerTerminatedState(driverPod); state != nil { + if state.ExitCode != 0 { + app.Status.AppState.ErrorMessage = fmt.Sprintf("driver container failed with ExitCode: %d, Reason: %s", state.ExitCode, state.Reason) + } + } else { + app.Status.AppState.ErrorMessage = "driver container status missing" + } + } + } + + newState := util.DriverStateToApplicationState(driverState) + // Only record a driver event if the application state (derived from the driver pod phase) has changed. + if newState != app.Status.AppState.State { + r.recordDriverEvent(app, driverState, driverPod.Name) + app.Status.AppState.State = newState + } + + return nil +} + +// updateExecutorState lists the executor pods of the application +// and updates the executor state based on the current phase of the pods. +func (r *Reconciler) updateExecutorState(_ context.Context, app *v1beta2.SparkApplication) error { + podList, err := r.getExecutorPods(app) + if err != nil { + return err + } + pods := podList.Items + + executorStateMap := make(map[string]v1beta2.ExecutorState) + var executorApplicationID string + for _, pod := range pods { + if util.IsExecutorPod(&pod) { + newState := util.GetExecutorState(&pod) + oldState, exists := app.Status.ExecutorState[pod.Name] + // Only record an executor event if the executor state is new or it has changed. + if !exists || newState != oldState { + if newState == v1beta2.ExecutorStateFailed { + execContainerState := util.GetExecutorContainerTerminatedState(&pod) + if execContainerState != nil { + r.recordExecutorEvent(app, newState, pod.Name, execContainerState.ExitCode, execContainerState.Reason) + } else { + // If we can't find the container state, + // we need to set the exitCode and the Reason to unambiguous values. + r.recordExecutorEvent(app, newState, pod.Name, -1, "Unknown (Container not Found)") + } + } else { + r.recordExecutorEvent(app, newState, pod.Name) + } + } + executorStateMap[pod.Name] = newState + + if executorApplicationID == "" { + executorApplicationID = util.GetSparkApplicationID(&pod) + } + } + } + + // ApplicationID label can be different on driver/executors. Prefer executor ApplicationID if set. + // Refer https://issues.apache.org/jira/projects/SPARK/issues/SPARK-25922 for details. + if executorApplicationID != "" { + app.Status.SparkApplicationID = executorApplicationID + } + + if app.Status.ExecutorState == nil { + app.Status.ExecutorState = make(map[string]v1beta2.ExecutorState) + } + for name, state := range executorStateMap { + app.Status.ExecutorState[name] = state + } + + // Handle missing/deleted executors. + for name, oldStatus := range app.Status.ExecutorState { + _, exists := executorStateMap[name] + if !util.IsExecutorTerminated(oldStatus) && !exists { + if !util.IsDriverRunning(app) { + // If ApplicationState is COMPLETED, in other words, the driver pod has been completed + // successfully. The executor pods terminate and are cleaned up, so we could not found + // the executor pod, under this circumstances, we assume the executor pod are completed. + if app.Status.AppState.State == v1beta2.ApplicationStateCompleted { + app.Status.ExecutorState[name] = v1beta2.ExecutorStateCompleted + } else { + glog.Infof("Executor pod %s not found, assuming it was deleted.", name) + app.Status.ExecutorState[name] = v1beta2.ExecutorStateFailed + } + } else { + app.Status.ExecutorState[name] = v1beta2.ExecutorStateUnknown + } + } + } + + return nil +} + +func (r *Reconciler) getExecutorPods(app *v1beta2.SparkApplication) (*corev1.PodList, error) { + matchLabels := util.GetResourceLabels(app) + matchLabels[common.LabelSparkRole] = common.SparkRoleExecutor + pods := &corev1.PodList{} + if err := r.client.List(context.TODO(), pods, client.InNamespace(app.Namespace), client.MatchingLabels(matchLabels)); err != nil { + return nil, fmt.Errorf("failed to get pods for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) + } + return pods, nil +} + +func (r *Reconciler) getDriverPod(app *v1beta2.SparkApplication) (*corev1.Pod, error) { + pod := &corev1.Pod{} + var err error + + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Status.DriverInfo.PodName} + err = r.client.Get(context.TODO(), key, pod) + if err == nil { + return pod, nil + } + if !errors.IsNotFound(err) { + return nil, fmt.Errorf("failed to get driver pod %s: %v", app.Status.DriverInfo.PodName, err) + } + + return nil, nil +} + +func (r *Reconciler) updateSparkApplicationState(ctx context.Context, app *v1beta2.SparkApplication) error { + if err := r.updateDriverState(ctx, app); err != nil { + return err + } + + if err := r.updateExecutorState(ctx, app); err != nil { + return err + } + + return nil +} + +// updateSparkApplicationStatus updates the status of the SparkApplication. +func (r *Reconciler) updateSparkApplicationStatus(ctx context.Context, app *v1beta2.SparkApplication) error { + if err := r.client.Status().Update(ctx, app); err != nil { + return err + } + return nil +} + +// Delete the resources associated with the spark application. +func (r *Reconciler) deleteSparkResources(ctx context.Context, app *v1beta2.SparkApplication) error { + if err := r.deleteDriverPod(ctx, app); err != nil { + return err + } + + if err := r.deleteWebUIService(ctx, app); err != nil { + return err + } + + if err := r.deleteWebUIIngress(ctx, app); err != nil { + return err + } + + return nil +} + +func (r *Reconciler) deleteDriverPod(ctx context.Context, app *v1beta2.SparkApplication) error { + podName := app.Status.DriverInfo.PodName + // Derive the driver pod name in case the driver pod name was not recorded in the status, + // which could happen if the status update right after submission failed. + if podName == "" { + podName = util.GetDriverPodName(app) + } + + logger.Info("Deleting driver pod", "name", podName, "namespace", app.Namespace) + if err := r.client.Delete( + ctx, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: app.Namespace, + }, + }, + ); err != nil && !errors.IsNotFound(err) { + return err + } + + return nil +} + +func (r *Reconciler) deleteWebUIService(ctx context.Context, app *v1beta2.SparkApplication) error { + svcName := app.Status.DriverInfo.WebUIServiceName + if svcName == "" { + return nil + } + logger.Info("Deleting Spark web UI service", "name", svcName, "namespace", app.Namespace) + if err := r.client.Delete( + ctx, + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: svcName, + Namespace: app.Namespace, + }, + }, + &client.DeleteOptions{ + GracePeriodSeconds: util.Int64Ptr(0), + }, + ); err != nil && !errors.IsNotFound(err) { + return err + } + return nil +} + +func (r *Reconciler) deleteWebUIIngress(ctx context.Context, app *v1beta2.SparkApplication) error { + ingressName := app.Status.DriverInfo.WebUIIngressName + if ingressName == "" { + return nil + } + + if util.IngressCapabilities.Has("networking.k8s.io/v1") { + logger.Info("Deleting Spark web UI ingress", "name", ingressName, "namespace", app.Namespace) + if err := r.client.Delete( + ctx, + &networkingv1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingressName, + Namespace: app.Namespace, + }, + }, + &client.DeleteOptions{ + GracePeriodSeconds: util.Int64Ptr(0), + }, + ); err != nil && !errors.IsNotFound(err) { + return err + } + } + + if util.IngressCapabilities.Has("extensions/v1beta1") { + logger.V(1).Info("Deleting extensions/v1beta1 Spark UI Ingress", "name", ingressName, "namespace", app.Namespace) + if err := r.client.Delete( + context.TODO(), + &extensionsv1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: ingressName, + Namespace: app.Namespace, + }, + }, + &client.DeleteOptions{ + GracePeriodSeconds: util.Int64Ptr(0), + }, + ); err != nil && !errors.IsNotFound(err) { + return err + } + } + + return nil +} + +// Validate that any Spark resources (driver/Service/Ingress) created for the application have been deleted. +func (r *Reconciler) validateSparkResourceDeletion(ctx context.Context, app *v1beta2.SparkApplication) bool { + // Validate whether driver pod has been deleted. + driverPodName := app.Status.DriverInfo.PodName + // Derive the driver pod name in case the driver pod name was not recorded in the status, + // which could happen if the status update right after submission failed. + if driverPodName == "" { + driverPodName = util.GetDriverPodName(app) + } + if err := r.client.Get(ctx, types.NamespacedName{Name: driverPodName, Namespace: app.Namespace}, &corev1.Pod{}); err == nil || !errors.IsNotFound(err) { + return false + } + + // Validate whether Spark web UI service has been deleted. + sparkUIServiceName := app.Status.DriverInfo.WebUIServiceName + if sparkUIServiceName != "" { + if err := r.client.Get(ctx, types.NamespacedName{Name: sparkUIServiceName, Namespace: app.Namespace}, &corev1.Service{}); err == nil || !errors.IsNotFound(err) { + return false + } + } + + // Validate whether Spark web UI ingress has been deleted. + sparkUIIngressName := app.Status.DriverInfo.WebUIIngressName + if sparkUIIngressName != "" { + if err := r.client.Get(ctx, types.NamespacedName{Name: sparkUIIngressName, Namespace: app.Namespace}, &networkingv1.Ingress{}); err == nil || !errors.IsNotFound(err) { + return false + } + } + + return true +} + +func (r *Reconciler) recordSparkApplicationEvent(app *v1beta2.SparkApplication) { + switch app.Status.AppState.State { + case v1beta2.ApplicationStateNew: + r.recorder.Eventf( + app, + corev1.EventTypeNormal, + common.EventSparkApplicationAdded, + "SparkApplication %s was added, enqueuing it for submission", + app.Name, + ) + case v1beta2.ApplicationStateSubmitted: + r.recorder.Eventf( + app, + corev1.EventTypeNormal, + common.EventSparkApplicationSubmitted, + "SparkApplication %s was submitted successfully", + app.Name, + ) + case v1beta2.ApplicationStateFailedSubmission: + r.recorder.Eventf( + app, + corev1.EventTypeWarning, + common.EventSparkApplicationSubmissionFailed, + "failed to submit SparkApplication %s: %s", + app.Name, + app.Status.AppState.ErrorMessage, + ) + case v1beta2.ApplicationStateCompleted: + r.recorder.Eventf( + app, + corev1.EventTypeNormal, + common.EventSparkApplicationCompleted, + "SparkApplication %s completed", + app.Name, + ) + case v1beta2.ApplicationStateFailed: + r.recorder.Eventf( + app, + corev1.EventTypeWarning, + common.EventSparkApplicationFailed, + "SparkApplication %s failed: %s", + app.Name, + app.Status.AppState.ErrorMessage, + ) + case v1beta2.ApplicationStatePendingRerun: + r.recorder.Eventf( + app, + corev1.EventTypeWarning, + common.EventSparkApplicationPendingRerun, + "SparkApplication %s is pending rerun", + app.Name, + ) + } +} + +func (r *Reconciler) recordDriverEvent(app *v1beta2.SparkApplication, state v1beta2.DriverState, name string) { + switch state { + case v1beta2.DriverStatePending: + r.recorder.Eventf(app, corev1.EventTypeNormal, common.EventSparkDriverPending, "Driver %s is pending", name) + case v1beta2.DriverStateRunning: + r.recorder.Eventf(app, corev1.EventTypeNormal, common.EventSparkDriverRunning, "Driver %s is running", name) + case v1beta2.DriverStateCompleted: + r.recorder.Eventf(app, corev1.EventTypeNormal, common.EventSparkDriverCompleted, "Driver %s completed", name) + case v1beta2.DriverStateFailed: + r.recorder.Eventf(app, corev1.EventTypeWarning, common.EventSparkDriverFailed, "Driver %s failed", name) + case v1beta2.DriverStateUnknown: + r.recorder.Eventf(app, corev1.EventTypeWarning, common.EventSparkDriverUnknown, "Driver %s in unknown state", name) + } +} + +func (r *Reconciler) recordExecutorEvent(app *v1beta2.SparkApplication, state v1beta2.ExecutorState, args ...interface{}) { + switch state { + case v1beta2.ExecutorStatePending: + r.recorder.Eventf(app, corev1.EventTypeNormal, common.EventSparkExecutorPending, "Executor %s is pending", args) + case v1beta2.ExecutorStateRunning: + r.recorder.Eventf(app, corev1.EventTypeNormal, common.EventSparkExecutorRunning, "Executor %s is running", args) + case v1beta2.ExecutorStateCompleted: + r.recorder.Eventf(app, corev1.EventTypeNormal, common.EventSparkExecutorCompleted, "Executor %s completed", args) + case v1beta2.ExecutorStateFailed: + r.recorder.Eventf(app, corev1.EventTypeWarning, common.EventSparkExecutorFailed, "Executor %s failed with ExitCode: %d, Reason: %s", args) + case v1beta2.ExecutorStateUnknown: + r.recorder.Eventf(app, corev1.EventTypeWarning, common.EventSparkExecutorUnknown, "Executor %s in unknown state", args) + } +} + +func (r *Reconciler) resetSparkApplicationStatus(app *v1beta2.SparkApplication) { + status := &app.Status + switch status.AppState.State { + case v1beta2.ApplicationStateInvalidating: + status.SparkApplicationID = "" + status.SubmissionAttempts = 0 + status.ExecutionAttempts = 0 + status.LastSubmissionAttemptTime = metav1.Time{} + status.TerminationTime = metav1.Time{} + status.AppState.ErrorMessage = "" + status.ExecutorState = nil + case v1beta2.ApplicationStatePendingRerun: + status.SparkApplicationID = "" + status.SubmissionAttempts = 0 + status.LastSubmissionAttemptTime = metav1.Time{} + status.DriverInfo = v1beta2.DriverInfo{} + status.AppState.ErrorMessage = "" + status.ExecutorState = nil + } +} + +func (r *Reconciler) shouldDoBatchScheduling(app *v1beta2.SparkApplication) (bool, scheduler.Interface) { + if r.registry == nil || app.Spec.BatchScheduler == nil || *app.Spec.BatchScheduler == "" { + return false, nil + } + + var err error + var scheduler scheduler.Interface + + schedulerName := *app.Spec.BatchScheduler + switch schedulerName { + case common.VolcanoSchedulerName: + config := &volcano.Config{ + RestConfig: r.manager.GetConfig(), + } + scheduler, err = r.registry.GetScheduler(schedulerName, config) + } + + if err != nil || scheduler == nil { + logger.Error(err, "Failed to get scheduler for SparkApplication", "name", app.Name, "namespace", app.Namespace, "scheduler", schedulerName) + return false, nil + } + return scheduler.ShouldSchedule(app), scheduler +} + +// Clean up when the spark application is terminated. +func (r *Reconciler) cleanUpOnTermination(_, newApp *v1beta2.SparkApplication) error { + if needScheduling, scheduler := r.shouldDoBatchScheduling(newApp); needScheduling { + if err := scheduler.Cleanup(newApp); err != nil { + return err + } + } + return nil +} diff --git a/internal/controller/sparkapplication/controller_test.go b/internal/controller/sparkapplication/controller_test.go new file mode 100644 index 0000000000..07e3b0606c --- /dev/null +++ b/internal/controller/sparkapplication/controller_test.go @@ -0,0 +1,290 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication_test + +import ( + "context" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/internal/controller/sparkapplication" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var _ = Describe("SparkApplication Controller", func() { + Context("When reconciling a new SparkApplication", func() { + ctx := context.Background() + appName := "test" + appNamespace := "default" + key := types.NamespacedName{ + Name: appName, + Namespace: appNamespace, + } + + BeforeEach(func() { + By("Creating a test SparkApplication") + app := &v1beta2.SparkApplication{} + if err := k8sClient.Get(ctx, key, app); err != nil && errors.IsNotFound(err) { + app = &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: appNamespace, + }, + } + v1beta2.SetSparkApplicationDefaults(app) + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + + app.Status.AppState.State = v1beta2.ApplicationStateCompleted + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + } + }) + + AfterEach(func() { + app := &v1beta2.SparkApplication{} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + By("Deleting the created test SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + }) + }) + + Context("When reconciling a completed SparkApplication", func() { + ctx := context.Background() + appName := "test" + appNamespace := "default" + key := types.NamespacedName{ + Name: appName, + Namespace: appNamespace, + } + + BeforeEach(func() { + By("Creating a test SparkApplication") + app := &v1beta2.SparkApplication{} + if err := k8sClient.Get(ctx, key, app); err != nil && errors.IsNotFound(err) { + app = &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: appNamespace, + Labels: map[string]string{ + common.LabelSparkAppName: app.Name, + }, + }, + } + v1beta2.SetSparkApplicationDefaults(app) + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + + app.Status.AppState.State = v1beta2.ApplicationStateCompleted + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + } + }) + + AfterEach(func() { + app := &v1beta2.SparkApplication{} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + By("Deleting the created test SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + }) + + It("Should successfully reconcile a completed SparkApplication", func() { + By("Reconciling the created test SparkApplication") + reconciler := sparkapplication.NewReconciler( + nil, + k8sClient.Scheme(), + k8sClient, + nil, + nil, + sparkapplication.Options{Namespaces: []string{appNamespace}}, + ) + result, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: key}) + Expect(err).NotTo(HaveOccurred()) + Expect(result.Requeue).To(BeFalse()) + }) + }) + + Context("When reconciling a completed expired SparkApplication", func() { + ctx := context.Background() + appName := "test" + appNamespace := "default" + key := types.NamespacedName{ + Name: appName, + Namespace: appNamespace, + } + + BeforeEach(func() { + By("Creating a test SparkApplication") + app := &v1beta2.SparkApplication{} + if err := k8sClient.Get(ctx, key, app); err != nil && errors.IsNotFound(err) { + app = &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: appNamespace, + }, + } + v1beta2.SetSparkApplicationDefaults(app) + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + + app.Status.AppState.State = v1beta2.ApplicationStateCompleted + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + } + }) + + AfterEach(func() { + app := &v1beta2.SparkApplication{} + Expect(errors.IsNotFound(k8sClient.Get(ctx, key, app))).To(BeTrue()) + }) + + It("Should delete expired SparkApplication", func() { + By("Set TimeToLiveSeconds and make the SparkApplication expired") + app := &v1beta2.SparkApplication{} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + app.Spec.TimeToLiveSeconds = util.Int64Ptr(60) + Expect(k8sClient.Update(ctx, app)).To(Succeed()) + app.Status.TerminationTime = metav1.NewTime(time.Now().Add(-2 * time.Minute)) + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + + By("Reconciling the expired SparkApplication") + reconciler := sparkapplication.NewReconciler( + nil, + k8sClient.Scheme(), + k8sClient, + nil, + nil, + sparkapplication.Options{Namespaces: []string{appNamespace}}, + ) + result, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: key}) + Expect(err).NotTo(HaveOccurred()) + Expect(result.Requeue).To(BeFalse()) + }) + }) + + Context("When reconciling a failed SparkApplication", func() { + ctx := context.Background() + appName := "test" + appNamespace := "default" + key := types.NamespacedName{ + Name: appName, + Namespace: appNamespace, + } + + BeforeEach(func() { + By("Creating a test SparkApplication") + app := &v1beta2.SparkApplication{} + if err := k8sClient.Get(ctx, key, app); err != nil && errors.IsNotFound(err) { + app = &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: appNamespace, + }, + } + v1beta2.SetSparkApplicationDefaults(app) + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + + app.Status.AppState.State = v1beta2.ApplicationStateFailed + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + } + }) + + AfterEach(func() { + app := &v1beta2.SparkApplication{} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + By("Deleting the created test SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + }) + + It("Should successfully reconcile a failed SparkApplication", func() { + By("Reconciling the created test SparkApplication") + reconciler := sparkapplication.NewReconciler( + nil, + k8sClient.Scheme(), + k8sClient, + nil, + nil, + sparkapplication.Options{Namespaces: []string{appNamespace}}, + ) + result, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: key}) + Expect(err).NotTo(HaveOccurred()) + Expect(result.Requeue).To(BeFalse()) + }) + }) + + Context("When reconciling a failed expired SparkApplication", func() { + ctx := context.Background() + appName := "test" + appNamespace := "default" + key := types.NamespacedName{ + Name: appName, + Namespace: appNamespace, + } + + BeforeEach(func() { + By("Creating a test SparkApplication") + app := &v1beta2.SparkApplication{} + if err := k8sClient.Get(ctx, key, app); err != nil && errors.IsNotFound(err) { + app = &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: appNamespace, + }, + } + v1beta2.SetSparkApplicationDefaults(app) + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + + app.Status.AppState.State = v1beta2.ApplicationStateFailed + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + } + }) + + AfterEach(func() { + app := &v1beta2.SparkApplication{} + Expect(errors.IsNotFound(k8sClient.Get(ctx, key, app))).To(BeTrue()) + }) + + It("Should delete expired SparkApplication", func() { + By("Set TimeToLiveSeconds and make the SparkApplication expired") + app := &v1beta2.SparkApplication{} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + app.Spec.TimeToLiveSeconds = util.Int64Ptr(60) + Expect(k8sClient.Update(ctx, app)).To(Succeed()) + app.Status.TerminationTime = metav1.NewTime(time.Now().Add(-2 * time.Minute)) + Expect(k8sClient.Status().Update(ctx, app)).To(Succeed()) + + By("Reconciling the expired SparkApplication") + reconciler := sparkapplication.NewReconciler( + nil, + k8sClient.Scheme(), + k8sClient, + nil, + nil, + sparkapplication.Options{Namespaces: []string{appNamespace}}, + ) + result, err := reconciler.Reconcile(ctx, reconcile.Request{NamespacedName: key}) + Expect(err).NotTo(HaveOccurred()) + Expect(result.Requeue).To(BeFalse()) + }) + }) +}) diff --git a/pkg/controller/sparkapplication/driveringress.go b/internal/controller/sparkapplication/driveringress.go similarity index 65% rename from pkg/controller/sparkapplication/driveringress.go rename to internal/controller/sparkapplication/driveringress.go index 08dab31468..982ee8b03e 100644 --- a/pkg/controller/sparkapplication/driveringress.go +++ b/internal/controller/sparkapplication/driveringress.go @@ -19,26 +19,25 @@ package sparkapplication import ( "context" "fmt" - "github.com/golang/glog" "net/url" "regexp" - apiv1 "k8s.io/api/core/v1" - extensions "k8s.io/api/extensions/v1beta1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - clientset "k8s.io/client-go/kubernetes" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" "github.com/kubeflow/spark-operator/pkg/util" ) // SparkService encapsulates information about the driver UI service. type SparkService struct { serviceName string - serviceType apiv1.ServiceType + serviceType corev1.ServiceType servicePort int32 servicePortName string targetPort intstr.IntOrString @@ -56,8 +55,8 @@ type SparkIngress struct { ingressTLS []networkingv1.IngressTLS } -var ingressAppNameURLRegex = regexp.MustCompile("{{\\s*[$]appName\\s*}}") -var ingressAppNamespaceURLRegex = regexp.MustCompile("{{\\s*[$]appNamespace\\s*}}") +var ingressAppNameURLRegex = regexp.MustCompile(`{{\s*[$]appName\s*}}`) +var ingressAppNamespaceURLRegex = regexp.MustCompile(`{{\s*[$]appNamespace\s*}}`) func getDriverIngressURL(ingressURLFormat string, appName string, appNamespace string) (*url.URL, error) { ingressURL := ingressAppNamespaceURLRegex.ReplaceAllString(ingressAppNameURLRegex.ReplaceAllString(ingressURLFormat, appName), appNamespace) @@ -75,21 +74,20 @@ func getDriverIngressURL(ingressURLFormat string, appName string, appNamespace s return parsedURL, nil } -func createDriverIngress(app *v1beta2.SparkApplication, driverIngressConfiguration *v1beta2.DriverIngressConfiguration, service SparkService, ingressURL *url.URL, ingressClassName string, kubeClient clientset.Interface) (*SparkIngress, error) { +func (r *Reconciler) createDriverIngress(app *v1beta2.SparkApplication, driverIngressConfiguration *v1beta2.DriverIngressConfiguration, service SparkService, ingressURL *url.URL, ingressClassName string) (*SparkIngress, error) { if driverIngressConfiguration.ServicePort == nil { return nil, fmt.Errorf("cannot create Driver Ingress for application %s/%s due to empty ServicePort on driverIngressConfiguration", app.Namespace, app.Name) } ingressName := fmt.Sprintf("%s-ing-%d", app.Name, *driverIngressConfiguration.ServicePort) if util.IngressCapabilities.Has("networking.k8s.io/v1") { - return createDriverIngress_v1(app, service, ingressName, ingressURL, ingressClassName, kubeClient) - } else { - return createDriverIngress_legacy(app, service, ingressName, ingressURL, kubeClient) + return r.createDriverIngressV1(app, service, ingressName, ingressURL, ingressClassName) } + return r.createDriverIngressLegacy(app, service, ingressName, ingressURL) } -func createDriverIngress_v1(app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL, ingressClassName string, kubeClient clientset.Interface) (*SparkIngress, error) { - ingressResourceAnnotations := getIngressResourceAnnotations(app) - ingressTlsHosts := getIngressTlsHosts(app) +func (r *Reconciler) createDriverIngressV1(app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL, ingressClassName string) (*SparkIngress, error) { + ingressResourceAnnotations := util.GetWebUIIngressAnnotations(app) + ingressTLSHosts := util.GetWebUIIngressTLS(app) ingressURLPath := ingressURL.Path // If we're serving on a subpath, we need to ensure we create capture groups @@ -99,12 +97,12 @@ func createDriverIngress_v1(app *v1beta2.SparkApplication, service SparkService, implementationSpecific := networkingv1.PathTypeImplementationSpecific - ingress := networkingv1.Ingress{ + ingress := &networkingv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: ingressName, Namespace: app.Namespace, - Labels: getResourceLabels(app), - OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, + Labels: util.GetResourceLabels(app), + OwnerReferences: []metav1.OwnerReference{util.GetOwnerReference(app)}, }, Spec: networkingv1.IngressSpec{ Rules: []networkingv1.IngressRule{{ @@ -140,53 +138,52 @@ func createDriverIngress_v1(app *v1beta2.SparkApplication, service SparkService, } ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/rewrite-target"] = "/$2" } - if len(ingressTlsHosts) != 0 { - ingress.Spec.TLS = ingressTlsHosts + if len(ingressTLSHosts) != 0 { + ingress.Spec.TLS = ingressTLSHosts } if len(ingressClassName) != 0 { ingress.Spec.IngressClassName = &ingressClassName } - glog.Infof("Creating an Ingress %s for the Spark UI for application %s", ingress.Name, app.Name) - _, err := kubeClient.NetworkingV1().Ingresses(ingress.Namespace).Create(context.TODO(), &ingress, metav1.CreateOptions{}) - if err != nil { - return nil, err + logger.Info("Creating networking.v1/Ingress for SparkApplication web UI", "name", app.Name, "namespace", app.Namespace, "ingressName", ingress.Name) + if err := r.client.Create(context.TODO(), ingress); err != nil { + return nil, fmt.Errorf("failed to create ingress %s/%s: %v", ingress.Namespace, ingress.Name, err) } return &SparkIngress{ ingressName: ingress.Name, ingressURL: ingressURL, ingressClassName: ingressClassName, annotations: ingress.Annotations, - ingressTLS: ingressTlsHosts, + ingressTLS: ingressTLSHosts, }, nil } -func createDriverIngress_legacy(app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL, kubeClient clientset.Interface) (*SparkIngress, error) { - ingressResourceAnnotations := getIngressResourceAnnotations(app) - // var ingressTlsHosts networkingv1.IngressTLS[] - // That we convert later for extensionsv1beta1, but return as is in SparkIngress - ingressTlsHosts := getIngressTlsHosts(app) +func (r *Reconciler) createDriverIngressLegacy(app *v1beta2.SparkApplication, service SparkService, ingressName string, ingressURL *url.URL) (*SparkIngress, error) { + ingressResourceAnnotations := util.GetWebUIIngressAnnotations(app) + // var ingressTLSHosts networkingv1.IngressTLS[] + // That we convert later for extensionsv1beta1, but return as is in SparkIngress. + ingressTLSHosts := util.GetWebUIIngressTLS(app) ingressURLPath := ingressURL.Path - // If we're serving on a subpath, we need to ensure we create capture groups + // If we're serving on a subpath, we need to ensure we create capture groups. if ingressURLPath != "" && ingressURLPath != "/" { ingressURLPath = ingressURLPath + "(/|$)(.*)" } - ingress := extensions.Ingress{ + ingress := &extensionsv1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: ingressName, Namespace: app.Namespace, - Labels: getResourceLabels(app), - OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, + Labels: util.GetResourceLabels(app), + OwnerReferences: []metav1.OwnerReference{util.GetOwnerReference(app)}, }, - Spec: extensions.IngressSpec{ - Rules: []extensions.IngressRule{{ + Spec: extensionsv1beta1.IngressSpec{ + Rules: []extensionsv1beta1.IngressRule{{ Host: ingressURL.Host, - IngressRuleValue: extensions.IngressRuleValue{ - HTTP: &extensions.HTTPIngressRuleValue{ - Paths: []extensions.HTTPIngressPath{{ - Backend: extensions.IngressBackend{ + IngressRuleValue: extensionsv1beta1.IngressRuleValue{ + HTTP: &extensionsv1beta1.HTTPIngressRuleValue{ + Paths: []extensionsv1beta1.HTTPIngressPath{{ + Backend: extensionsv1beta1.IngressBackend{ ServiceName: service.serviceName, ServicePort: intstr.IntOrString{ Type: intstr.Int, @@ -212,52 +209,51 @@ func createDriverIngress_legacy(app *v1beta2.SparkApplication, service SparkServ } ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/rewrite-target"] = "/$2" } - if len(ingressTlsHosts) != 0 { - ingress.Spec.TLS = convertIngressTlsHostsToLegacy(ingressTlsHosts) + if len(ingressTLSHosts) != 0 { + ingress.Spec.TLS = convertIngressTLSHostsToLegacy(ingressTLSHosts) } - glog.Infof("Creating an extensions/v1beta1 Ingress %s for application %s", ingress.Name, app.Name) - _, err := kubeClient.ExtensionsV1beta1().Ingresses(ingress.Namespace).Create(context.TODO(), &ingress, metav1.CreateOptions{}) - if err != nil { - return nil, err + logger.Info("Creating extensions.v1beta1/Ingress for SparkApplication web UI", app.Name, "namespace", app.Namespace, "ingressName", ingress.Name) + if err := r.client.Create(context.TODO(), ingress); err != nil { + return nil, fmt.Errorf("failed to create ingress %s/%s: %v", ingress.Namespace, ingress.Name, err) } return &SparkIngress{ ingressName: ingress.Name, ingressURL: ingressURL, annotations: ingress.Annotations, - ingressTLS: ingressTlsHosts, + ingressTLS: ingressTLSHosts, }, nil } -func convertIngressTlsHostsToLegacy(ingressTlsHosts []networkingv1.IngressTLS) []extensions.IngressTLS { - var ingressTlsHosts_legacy []extensions.IngressTLS - for _, ingressTlsHost := range ingressTlsHosts { - ingressTlsHosts_legacy = append(ingressTlsHosts_legacy, extensions.IngressTLS{ - Hosts: ingressTlsHost.Hosts, - SecretName: ingressTlsHost.SecretName, +func convertIngressTLSHostsToLegacy(ingressTLSHosts []networkingv1.IngressTLS) []extensionsv1beta1.IngressTLS { + var ingressTLSHostsLegacy []extensionsv1beta1.IngressTLS + for _, ingressTLSHost := range ingressTLSHosts { + ingressTLSHostsLegacy = append(ingressTLSHostsLegacy, extensionsv1beta1.IngressTLS{ + Hosts: ingressTLSHost.Hosts, + SecretName: ingressTLSHost.SecretName, }) } - return ingressTlsHosts_legacy + return ingressTLSHostsLegacy } -func createDriverIngressService( +func (r *Reconciler) createDriverIngressService( app *v1beta2.SparkApplication, portName string, port int32, targetPort int32, serviceName string, - serviceType apiv1.ServiceType, + serviceType corev1.ServiceType, serviceAnnotations map[string]string, serviceLabels map[string]string, - kubeClient clientset.Interface) (*SparkService, error) { - service := &apiv1.Service{ +) (*SparkService, error) { + service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: app.Namespace, - Labels: getResourceLabels(app), - OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, + Labels: util.GetResourceLabels(app), + OwnerReferences: []metav1.OwnerReference{util.GetOwnerReference(app)}, }, - Spec: apiv1.ServiceSpec{ - Ports: []apiv1.ServicePort{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ { Name: portName, Port: port, @@ -268,26 +264,30 @@ func createDriverIngressService( }, }, Selector: map[string]string{ - config.SparkAppNameLabel: app.Name, - config.SparkRoleLabel: config.SparkDriverRole, + common.LabelSparkAppName: app.Name, + common.LabelSparkRole: common.SparkRoleDriver, }, Type: serviceType, }, } + if len(serviceLabels) != 0 { + service.ObjectMeta.Labels = serviceLabels + } + if len(serviceAnnotations) != 0 { service.ObjectMeta.Annotations = serviceAnnotations } - if len(serviceLabels) != 0 { - glog.Infof("Creating a service labels %s for the Driver Ingress: %v", service.Name, &serviceLabels) - service.ObjectMeta.Labels = serviceLabels - } + if err := r.client.Create(context.TODO(), service); err != nil { + if !errors.IsAlreadyExists(err) { + return nil, err + } - glog.Infof("Creating a service %s for the Driver Ingress for application %s", service.Name, app.Name) - service, err := kubeClient.CoreV1().Services(app.Namespace).Create(context.TODO(), service, metav1.CreateOptions{}) - if err != nil { - return nil, err + // Update the service if it already exists. + if err := r.client.Update(context.TODO(), service); err != nil { + return nil, err + } } return &SparkService{ @@ -305,7 +305,7 @@ func createDriverIngressService( func getDriverIngressServicePort(driverIngressConfiguration *v1beta2.DriverIngressConfiguration) (int32, error) { port := driverIngressConfiguration.ServicePort if port == nil { - return 0, fmt.Errorf("servie port is nil on driver ingress configuration") + return 0, fmt.Errorf("service port is nil on driver ingress configuration") } return *port, nil } @@ -326,11 +326,11 @@ func getDriverIngressServiceName(app *v1beta2.SparkApplication, port int32) stri return fmt.Sprintf("%s-driver-%d", app.Name, port) } -func getDriverIngressServiceType(driverIngressConfiguration *v1beta2.DriverIngressConfiguration) apiv1.ServiceType { +func getDriverIngressServiceType(driverIngressConfiguration *v1beta2.DriverIngressConfiguration) corev1.ServiceType { if driverIngressConfiguration.ServiceType != nil { return *driverIngressConfiguration.ServiceType } - return apiv1.ServiceTypeClusterIP + return corev1.ServiceTypeClusterIP } func getDriverIngressServiceAnnotations(driverIngressConfiguration *v1beta2.DriverIngressConfiguration) map[string]string { @@ -353,10 +353,10 @@ func getDriverIngressServiceLabels(driverIngressConfiguration *v1beta2.DriverIng return serviceLabels } -func createDriverIngressServiceFromConfiguration( +func (r *Reconciler) createDriverIngressServiceFromConfiguration( app *v1beta2.SparkApplication, driverIngressConfiguration *v1beta2.DriverIngressConfiguration, - kubeClient clientset.Interface) (*SparkService, error) { +) (*SparkService, error) { portName := getDriverIngressServicePortName(driverIngressConfiguration) port, err := getDriverIngressServicePort(driverIngressConfiguration) if err != nil { @@ -366,5 +366,5 @@ func createDriverIngressServiceFromConfiguration( serviceType := getDriverIngressServiceType(driverIngressConfiguration) serviceAnnotations := getDriverIngressServiceAnnotations(driverIngressConfiguration) serviceLabels := getDriverIngressServiceLabels(driverIngressConfiguration) - return createDriverIngressService(app, portName, port, port, serviceName, serviceType, serviceAnnotations, serviceLabels, kubeClient) + return r.createDriverIngressService(app, portName, port, port, serviceName, serviceType, serviceAnnotations, serviceLabels) } diff --git a/internal/controller/sparkapplication/driveringress_test.go b/internal/controller/sparkapplication/driveringress_test.go new file mode 100644 index 0000000000..498ecc330a --- /dev/null +++ b/internal/controller/sparkapplication/driveringress_test.go @@ -0,0 +1,713 @@ +/* +Copyright 2024 spark-operator contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +// func TestCreateDriverIngressService(t *testing.T) { +// type testcase struct { +// name string +// app *v1beta2.SparkApplication +// expectedServices []SparkService +// expectedSelector map[string]string +// expectError bool +// } +// testFn := func(test testcase, t *testing.T) { +// fakeClient := fake.NewSimpleClientset() +// util.IngressCapabilities = map[string]bool{"networking.k8s.io/v1": true} +// if len(test.expectedServices) != len(test.app.Spec.DriverIngressOptions) { +// t.Errorf("%s: size of test.expectedServices (%d) and test.app.Spec.DriverIngressOptions (%d) is different for %s", +// test.name, len(test.expectedServices), len(test.app.Spec.DriverIngressOptions), test.app.Name) +// } +// for i, driverIngressConfiguration := range test.app.Spec.DriverIngressOptions { +// sparkService, err := createDriverIngressServiceFromConfiguration(test.app, &driverIngressConfiguration, fakeClient) +// if err != nil { +// if test.expectError { +// return +// } +// t.Fatal(err) +// } +// expectedService := test.expectedServices[i] +// if sparkService.serviceName != expectedService.serviceName { +// t.Errorf("%s: for service name wanted %s got %s", test.name, expectedService.serviceName, sparkService.serviceName) +// } +// service, err := fakeClient.CoreV1(). +// Services(test.app.Namespace). +// Get(context.TODO(), sparkService.serviceName, metav1.GetOptions{}) +// if err != nil { +// if test.expectError { +// return +// } +// t.Fatal(err) +// } +// if service.Labels[common.SparkAppNameLabel] != test.app.Name { +// t.Errorf("%s: service of app %s has the wrong labels", test.name, test.app.Name) +// } +// if !reflect.DeepEqual(test.expectedSelector, service.Spec.Selector) { +// t.Errorf("%s: for label selector wanted %s got %s", test.name, test.expectedSelector, service.Spec.Selector) +// } +// if service.Spec.Type != expectedService.serviceType { +// t.Errorf("%s: for service type wanted %s got %s", test.name, expectedService.serviceType, service.Spec.Type) +// } +// if len(service.Spec.Ports) != 1 { +// t.Errorf("%s: wanted a single port got %d ports", test.name, len(service.Spec.Ports)) +// } +// port := service.Spec.Ports[0] +// if port.Port != expectedService.servicePort { +// t.Errorf("%s: unexpected port wanted %d got %d", test.name, expectedService.servicePort, port.Port) +// } +// if port.Name != expectedService.servicePortName { +// t.Errorf("%s: unexpected port name wanted %s got %s", test.name, expectedService.servicePortName, port.Name) +// } +// serviceAnnotations := service.ObjectMeta.Annotations +// if !reflect.DeepEqual(serviceAnnotations, expectedService.serviceAnnotations) { +// t.Errorf("%s: unexpected annotations wanted %s got %s", test.name, expectedService.serviceAnnotations, serviceAnnotations) +// } +// serviceLabels := service.ObjectMeta.Labels +// if !reflect.DeepEqual(serviceLabels, expectedService.serviceLabels) { +// t.Errorf("%s: unexpected labels wanted %s got %s", test.name, expectedService.serviceLabels, serviceLabels) +// } +// } +// } +// serviceNameFormat := "%s-driver-%d" +// portNameFormat := "driver-ing-%d" +// app1 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo1", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: util.Int32Ptr(8888), +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// ExecutionAttempts: 1, +// }, +// } +// app2 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo2", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: util.Int32Ptr(8888), +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-2", +// ExecutionAttempts: 2, +// }, +// } +// app3 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo3", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: nil, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-3", +// }, +// } +// var appPort int32 = 80 +// app4 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo4", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: &appPort, +// }, +// }, +// SparkConf: map[string]string{ +// sparkUIPortConfigurationKey: "4041", +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-3", +// }, +// } +// var serviceTypeNodePort apiv1.ServiceType = apiv1.ServiceTypeNodePort +// app5 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo5", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: util.Int32Ptr(8888), +// ServiceType: &serviceTypeNodePort, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-2", +// ExecutionAttempts: 2, +// }, +// } +// appPortName := "http-spark-test" +// app6 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo6", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: &appPort, +// ServicePortName: &appPortName, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-6", +// }, +// } +// app7 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo7", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: util.Int32Ptr(8888), +// ServiceAnnotations: map[string]string{ +// "key": "value", +// }, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-7", +// ExecutionAttempts: 1, +// }, +// } +// app8 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo8", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: util.Int32Ptr(8888), +// ServiceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo8", +// "key": "value", +// }, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-8", +// ExecutionAttempts: 1, +// }, +// } +// testcases := []testcase{ +// { +// name: "service with custom serviceport and serviceport and target port are same", +// app: app1, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: fmt.Sprintf(portNameFormat, *app1.Spec.DriverIngressOptions[0].ServicePort), +// servicePort: *app1.Spec.DriverIngressOptions[0].ServicePort, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo1", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(*app1.Spec.DriverIngressOptions[0].ServicePort), +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo1", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with default port", +// app: app2, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app2.GetName(), *app2.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: fmt.Sprintf(portNameFormat, *app2.Spec.DriverIngressOptions[0].ServicePort), +// servicePort: int32(*app2.Spec.DriverIngressOptions[0].ServicePort), +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo2", +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo2", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom serviceport and serviceport and target port are different", +// app: app4, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app4.GetName(), *app4.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: fmt.Sprintf(portNameFormat, *app4.Spec.DriverIngressOptions[0].ServicePort), +// servicePort: 80, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo4", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo4", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom servicetype", +// app: app5, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app5.GetName(), *app5.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeNodePort, +// servicePortName: fmt.Sprintf(portNameFormat, *app5.Spec.DriverIngressOptions[0].ServicePort), +// servicePort: *app5.Spec.DriverIngressOptions[0].ServicePort, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo5", +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo5", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom serviceportname", +// app: app6, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app6.GetName(), *app6.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: "http-spark-test", +// servicePort: int32(80), +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo6", +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo6", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with annotation", +// app: app7, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app7.GetName(), *app7.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: fmt.Sprintf(portNameFormat, *app7.Spec.DriverIngressOptions[0].ServicePort), +// servicePort: *app7.Spec.DriverIngressOptions[0].ServicePort, +// serviceAnnotations: map[string]string{ +// "key": "value", +// }, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo7", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo7", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom labels", +// app: app8, +// expectedServices: []SparkService{ +// { +// serviceName: fmt.Sprintf(serviceNameFormat, app8.GetName(), *app8.Spec.DriverIngressOptions[0].ServicePort), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: fmt.Sprintf(portNameFormat, *app8.Spec.DriverIngressOptions[0].ServicePort), +// servicePort: *app8.Spec.DriverIngressOptions[0].ServicePort, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo8", +// "key": "value", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo8", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with bad port configurations", +// app: app3, +// expectError: true, +// expectedServices: []SparkService{{}}, +// }, +// } +// for _, test := range testcases { +// testFn(test, t) +// } +// } + +// func TestCreateDriverIngress(t *testing.T) { +// type testcase struct { +// name string +// app *v1beta2.SparkApplication +// expectedIngresses []SparkIngress +// expectError bool +// } + +// testFn := func(test testcase, t *testing.T, ingressURLFormat string, ingressClassName string) { +// fakeClient := fake.NewSimpleClientset() +// if len(test.expectedIngresses) != len(test.app.Spec.DriverIngressOptions) { +// t.Errorf("%s: size of test.expectedIngresses (%d) and test.app.Spec.DriverIngressOptions (%d) is different for %s", +// test.name, len(test.expectedIngresses), len(test.app.Spec.DriverIngressOptions), test.app.Name) +// } +// for i, driverIngressConfiguration := range test.app.Spec.DriverIngressOptions { +// sparkService, err := createDriverIngressServiceFromConfiguration(test.app, &driverIngressConfiguration, fakeClient) +// if err != nil { +// t.Fatal(err) +// } +// ingressURL, err := getDriverIngressURL(ingressURLFormat, test.app.Name, test.app.Namespace) +// if err != nil { +// t.Fatal(err) +// } +// sparkIngress, err := createDriverIngress(test.app, &driverIngressConfiguration, *sparkService, ingressURL, ingressClassName, fakeClient) +// if err != nil { +// if test.expectError { +// return +// } +// t.Fatal(err) +// } +// expectedIngress := test.expectedIngresses[i] +// if sparkIngress.ingressName != expectedIngress.ingressName { +// t.Errorf("Ingress name wanted %s got %s", expectedIngress.ingressName, sparkIngress.ingressName) +// } +// if sparkIngress.ingressURL.String() != expectedIngress.ingressURL.String() { +// t.Errorf("Ingress URL wanted %s got %s", expectedIngress.ingressURL, sparkIngress.ingressURL) +// } +// ingress, err := fakeClient.NetworkingV1().Ingresses(test.app.Namespace). +// Get(context.TODO(), sparkIngress.ingressName, metav1.GetOptions{}) +// if err != nil { +// t.Fatal(err) +// } +// if len(ingress.Annotations) != 0 { +// for key, value := range ingress.Annotations { +// if expectedIngress.annotations[key] != ingress.Annotations[key] { +// t.Errorf("Expected annotation: %s=%s but found : %s=%s", key, value, key, ingress.Annotations[key]) +// } +// } +// } +// if len(ingress.Spec.TLS) != 0 { +// for _, ingressTls := range ingress.Spec.TLS { +// if ingressTls.Hosts[0] != expectedIngress.ingressTLS[0].Hosts[0] { +// t.Errorf("Expected ingressTls host: %s but found : %s", expectedIngress.ingressTLS[0].Hosts[0], ingressTls.Hosts[0]) +// } +// if ingressTls.SecretName != expectedIngress.ingressTLS[0].SecretName { +// t.Errorf("Expected ingressTls secretName: %s but found : %s", expectedIngress.ingressTLS[0].SecretName, ingressTls.SecretName) +// } +// } +// } +// if ingress.Labels[common.SparkAppNameLabel] != test.app.Name { +// t.Errorf("Ingress of app %s has the wrong labels", test.app.Name) +// } + +// if len(ingress.Spec.Rules) != 1 { +// t.Errorf("No Ingress rules found.") +// } +// ingressRule := ingress.Spec.Rules[0] +// // If we have a path, then the ingress adds capture groups +// if ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "" && ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "/" { +// expectedIngress.ingressURL.Path = expectedIngress.ingressURL.Path + "(/|$)(.*)" +// } +// if ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path != expectedIngress.ingressURL.Host+expectedIngress.ingressURL.Path { +// t.Errorf("Ingress of app %s has the wrong host %s", ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path, expectedIngress.ingressURL.Host+expectedIngress.ingressURL.Path) +// } + +// if len(ingressRule.IngressRuleValue.HTTP.Paths) != 1 { +// t.Errorf("No Ingress paths found.") +// } +// ingressPath := ingressRule.IngressRuleValue.HTTP.Paths[0] +// if ingressPath.Backend.Service.Name != sparkService.serviceName { +// t.Errorf("Service name wanted %s got %s", sparkService.serviceName, ingressPath.Backend.Service.Name) +// } +// if *ingressPath.PathType != networkingv1.PathTypeImplementationSpecific { +// t.Errorf("PathType wanted %s got %s", networkingv1.PathTypeImplementationSpecific, *ingressPath.PathType) +// } +// if ingressPath.Backend.Service.Port.Number != sparkService.servicePort { +// t.Errorf("Service port wanted %v got %v", sparkService.servicePort, ingressPath.Backend.Service.Port.Number) +// } +// } +// } + +// ingressNameFormat := "%s-ing-%d" +// var appPort int32 = 80 +// app1 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: &appPort, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } +// app2 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: &appPort, +// IngressAnnotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } +// app3 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: &appPort, +// IngressAnnotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// IngressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, +// }, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } +// app4 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ +// { +// ServicePort: &appPort, +// IngressAnnotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// }, +// IngressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: ""}, +// }, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } + +// testcases := []testcase{ +// { +// name: "simple ingress object", +// app: app1, +// expectedIngresses: []SparkIngress{ +// { +// ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), +// ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), +// }, +// }, +// expectError: false, +// }, +// { +// name: "ingress with annotations and without tls configuration", +// app: app2, +// expectedIngresses: []SparkIngress{ +// { +// ingressName: fmt.Sprintf(ingressNameFormat, app2.GetName(), *app2.Spec.DriverIngressOptions[0].ServicePort), +// ingressURL: parseURLAndAssertError(app2.GetName()+".ingress.clusterName.com", t), +// annotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// }, +// }, +// expectError: false, +// }, +// { +// name: "ingress with annotations and tls configuration", +// app: app3, +// expectedIngresses: []SparkIngress{ +// { +// ingressName: fmt.Sprintf(ingressNameFormat, app3.GetName(), *app3.Spec.DriverIngressOptions[0].ServicePort), +// ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), +// annotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// ingressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, +// }, +// }, +// }, +// expectError: false, +// }, +// { +// name: "ingress with incomplete list of annotations", +// app: app4, +// expectedIngresses: []SparkIngress{ +// { +// ingressName: fmt.Sprintf(ingressNameFormat, app4.GetName(), *app4.Spec.DriverIngressOptions[0].ServicePort), +// ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), +// annotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// ingressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: ""}, +// }, +// }, +// }, +// expectError: true, +// }, +// } + +// for _, test := range testcases { +// testFn(test, t, "{{$appName}}.ingress.clusterName.com", "") +// } + +// testcases = []testcase{ +// { +// name: "simple ingress object with ingress URL Format with path", +// app: app1, +// expectedIngresses: []SparkIngress{ +// { +// ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), +// ingressURL: parseURLAndAssertError("ingress.clusterName.com/"+app1.GetNamespace()+"/"+app1.GetName(), t), +// annotations: map[string]string{ +// "nginx.ingress.kubernetes.io/rewrite-target": "/$2", +// }, +// }, +// }, +// expectError: false, +// }, +// } + +// for _, test := range testcases { +// testFn(test, t, "ingress.clusterName.com/{{$appNamespace}}/{{$appName}}", "") +// } + +// testcases = []testcase{ +// { +// name: "simple ingress object with ingressClassName set", +// app: app1, +// expectedIngresses: []SparkIngress{ +// { +// ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), +// ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), +// ingressClassName: "nginx", +// }, +// }, +// expectError: false, +// }, +// } +// for _, test := range testcases { +// testFn(test, t, "{{$appName}}.ingress.clusterName.com", "nginx") +// } +// } diff --git a/internal/controller/sparkapplication/event_filter.go b/internal/controller/sparkapplication/event_filter.go new file mode 100644 index 0000000000..3fe49ee133 --- /dev/null +++ b/internal/controller/sparkapplication/event_filter.go @@ -0,0 +1,207 @@ +/* +Copyright 2024 The kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/util" +) + +// sparkPodEventFilter filters Spark pod events. +type sparkPodEventFilter struct { + namespaces map[string]bool +} + +// sparkPodEventFilter implements the predicate.Predicate interface. +var _ predicate.Predicate = &sparkPodEventFilter{} + +// newSparkPodEventFilter creates a new SparkPodEventFilter instance. +func newSparkPodEventFilter(namespaces []string) *sparkPodEventFilter { + nsMap := make(map[string]bool) + for _, ns := range namespaces { + nsMap[ns] = true + } + + return &sparkPodEventFilter{ + namespaces: nsMap, + } +} + +// Create implements predicate.Predicate. +func (f *sparkPodEventFilter) Create(e event.CreateEvent) bool { + pod, ok := e.Object.(*corev1.Pod) + if !ok { + return false + } + + return f.filter(pod) +} + +// Update implements predicate.Predicate. +func (f *sparkPodEventFilter) Update(e event.UpdateEvent) bool { + oldPod, ok := e.ObjectOld.(*corev1.Pod) + if !ok { + return false + } + + newPod, ok := e.ObjectNew.(*corev1.Pod) + if !ok { + return false + } + + if newPod.Status.Phase == oldPod.Status.Phase { + return false + } + + return f.filter(newPod) +} + +// Delete implements predicate.Predicate. +func (f *sparkPodEventFilter) Delete(e event.DeleteEvent) bool { + pod, ok := e.Object.(*corev1.Pod) + if !ok { + return false + } + + return f.filter(pod) +} + +// Generic implements predicate.Predicate. +func (f *sparkPodEventFilter) Generic(e event.GenericEvent) bool { + pod, ok := e.Object.(*corev1.Pod) + if !ok { + return false + } + + return f.filter(pod) +} + +func (f *sparkPodEventFilter) filter(pod *corev1.Pod) bool { + if !util.IsLaunchedBySparkOperator(pod) { + return false + } + + return f.namespaces[metav1.NamespaceAll] || f.namespaces[pod.Namespace] +} + +type EventFilter struct { + client client.Client + recorder record.EventRecorder + namespaces map[string]bool +} + +var _ predicate.Predicate = &EventFilter{} + +func NewSparkApplicationEventFilter(client client.Client, recorder record.EventRecorder, namespaces []string) *EventFilter { + nsMap := make(map[string]bool) + for _, ns := range namespaces { + nsMap[ns] = true + } + + return &EventFilter{ + client: client, + recorder: recorder, + namespaces: nsMap, + } +} + +// Create implements predicate.Predicate. +func (f *EventFilter) Create(e event.CreateEvent) bool { + app, ok := e.Object.(*v1beta2.SparkApplication) + if !ok { + return false + } + + return f.filter(app) +} + +// Update implements predicate.Predicate. +func (f *EventFilter) Update(e event.UpdateEvent) bool { + oldApp, ok := e.ObjectOld.(*v1beta2.SparkApplication) + if !ok { + return false + } + + newApp, ok := e.ObjectNew.(*v1beta2.SparkApplication) + if !ok { + return false + } + + if !f.filter(newApp) { + return false + } + + if oldApp.ResourceVersion == newApp.ResourceVersion && !util.IsExpired(newApp) && !util.ShouldRetry(newApp) { + return false + } + + // The spec has changed. This is currently best effort as we can potentially miss updates + // and end up in an inconsistent state. + if !equality.Semantic.DeepEqual(oldApp.Spec, newApp.Spec) { + // Force-set the application status to Invalidating which handles clean-up and application re-run. + newApp.Status.AppState.State = v1beta2.ApplicationStateInvalidating + logger.Info("Updating SparkApplication status", "name", newApp.Name, "namespace", newApp.Namespace, " oldState", oldApp.Status.AppState.State, "newState", newApp.Status.AppState.State) + if err := f.client.Status().Update(context.TODO(), newApp); err != nil { + logger.Error(err, "Failed to update application status", "application", newApp.Name) + f.recorder.Eventf( + newApp, + corev1.EventTypeWarning, + "SparkApplicationSpecUpdateFailed", + "Failed to update spec for SparkApplication %s: %v", + newApp.Name, + err, + ) + return false + } + } + + return true +} + +// Delete implements predicate.Predicate. +func (f *EventFilter) Delete(e event.DeleteEvent) bool { + app, ok := e.Object.(*v1beta2.SparkApplication) + if !ok { + return false + } + + return f.filter(app) +} + +// Generic implements predicate.Predicate. +func (f *EventFilter) Generic(e event.GenericEvent) bool { + app, ok := e.Object.(*v1beta2.SparkApplication) + if !ok { + return false + } + + return f.filter(app) +} + +func (f *EventFilter) filter(app *v1beta2.SparkApplication) bool { + return f.namespaces[metav1.NamespaceAll] || f.namespaces[app.Namespace] +} diff --git a/internal/controller/sparkapplication/event_handler.go b/internal/controller/sparkapplication/event_handler.go new file mode 100644 index 0000000000..0e2ee58964 --- /dev/null +++ b/internal/controller/sparkapplication/event_handler.go @@ -0,0 +1,220 @@ +/* +Copyright 2024 The kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/internal/metrics" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +// SparkPodEventHandler watches Spark pods and update the SparkApplication objects accordingly. +type SparkPodEventHandler struct { + client client.Client + metrics *metrics.SparkExecutorMetrics +} + +// SparkPodEventHandler implements handler.EventHandler. +var _ handler.EventHandler = &SparkPodEventHandler{} + +// NewSparkPodEventHandler creates a new sparkPodEventHandler instance. +func NewSparkPodEventHandler(client client.Client, metrics *metrics.SparkExecutorMetrics) *SparkPodEventHandler { + handler := &SparkPodEventHandler{ + client: client, + metrics: metrics, + } + return handler +} + +// Create implements handler.EventHandler. +func (h *SparkPodEventHandler) Create(ctx context.Context, event event.CreateEvent, queue workqueue.RateLimitingInterface) { + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return + } + logger.Info("Spark pod created", "name", pod.Name, "namespace", pod.Namespace, "phase", pod.Status.Phase) + h.enqueueSparkAppForUpdate(ctx, pod, queue) + + if h.metrics != nil && util.IsExecutorPod(pod) { + h.metrics.HandleSparkExecutorCreate(pod) + } +} + +// Update implements handler.EventHandler. +func (h *SparkPodEventHandler) Update(ctx context.Context, event event.UpdateEvent, queue workqueue.RateLimitingInterface) { + oldPod, ok := event.ObjectOld.(*corev1.Pod) + if !ok { + return + } + + newPod, ok := event.ObjectNew.(*corev1.Pod) + if !ok { + return + } + + if newPod.Status.Phase == oldPod.Status.Phase { + return + } + + logger.Info("Spark pod updated", "name", newPod.Name, "namespace", newPod.Namespace, "oldPhase", oldPod.Status.Phase, "newPhase", newPod.Status.Phase) + h.enqueueSparkAppForUpdate(ctx, newPod, queue) + + if h.metrics != nil && util.IsExecutorPod(oldPod) && util.IsExecutorPod(newPod) { + h.metrics.HandleSparkExecutorUpdate(oldPod, newPod) + } +} + +// Delete implements handler.EventHandler. +func (h *SparkPodEventHandler) Delete(ctx context.Context, event event.DeleteEvent, queue workqueue.RateLimitingInterface) { + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return + } + + logger.Info("Spark pod deleted", "name", pod.Name, "namespace", pod.Namespace, "phase", pod.Status.Phase) + h.enqueueSparkAppForUpdate(ctx, pod, queue) + + if h.metrics != nil && util.IsExecutorPod(pod) { + h.metrics.HandleSparkExecutorDelete(pod) + } +} + +// Generic implements handler.EventHandler. +func (h *SparkPodEventHandler) Generic(ctx context.Context, event event.GenericEvent, queue workqueue.RateLimitingInterface) { + pod, ok := event.Object.(*corev1.Pod) + if !ok { + return + } + + logger.Info("Spark pod generic event ", "name", pod.Name, "namespace", pod.Namespace, "phase", pod.Status.Phase) + h.enqueueSparkAppForUpdate(ctx, pod, queue) +} + +func (h *SparkPodEventHandler) enqueueSparkAppForUpdate(ctx context.Context, pod *corev1.Pod, queue workqueue.RateLimitingInterface) { + name := util.GetAppName(pod) + if name == "" { + return + } + namespace := pod.Namespace + key := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + + app := &v1beta2.SparkApplication{} + if submissionID, ok := pod.Labels[common.LabelSubmissionID]; ok { + if err := h.client.Get(ctx, key, app); err != nil { + return + } + if app.Status.SubmissionID != submissionID { + return + } + } + + // Do not enqueue SparkApplication in invalidating state when driver pod get deleted. + if util.GetApplicationState(app) == v1beta2.ApplicationStateInvalidating { + return + } + + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// EventHandler watches SparkApplication events. +type EventHandler struct { + metrics *metrics.SparkApplicationMetrics +} + +var _ handler.EventHandler = &EventHandler{} + +// NewSparkApplicationEventHandler creates a new SparkApplicationEventHandler instance. +func NewSparkApplicationEventHandler(metrics *metrics.SparkApplicationMetrics) *EventHandler { + return &EventHandler{ + metrics: metrics, + } +} + +// Create implements handler.EventHandler. +func (h *EventHandler) Create(ctx context.Context, event event.CreateEvent, queue workqueue.RateLimitingInterface) { + app, ok := event.Object.(*v1beta2.SparkApplication) + if !ok { + return + } + + logger.Info("SparkApplication created", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}}) + + if h.metrics != nil { + h.metrics.HandleSparkApplicationCreate(app) + } +} + +// Update implements handler.EventHandler. +func (h *EventHandler) Update(ctx context.Context, event event.UpdateEvent, queue workqueue.RateLimitingInterface) { + oldApp, ok := event.ObjectOld.(*v1beta2.SparkApplication) + if !ok { + return + } + + newApp, ok := event.ObjectNew.(*v1beta2.SparkApplication) + if !ok { + return + } + + logger.Info("SparkApplication updated", "name", oldApp.Name, "namespace", oldApp.Namespace, "oldState", oldApp.Status.AppState.State, "newState", newApp.Status.AppState.State) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: newApp.Name, Namespace: newApp.Namespace}}) + + if h.metrics != nil { + h.metrics.HandleSparkApplicationUpdate(oldApp, newApp) + } +} + +// Delete implements handler.EventHandler. +func (h *EventHandler) Delete(ctx context.Context, event event.DeleteEvent, queue workqueue.RateLimitingInterface) { + app, ok := event.Object.(*v1beta2.SparkApplication) + if !ok { + return + } + + logger.Info("SparkApplication deleted", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}}) + + if h.metrics != nil { + h.metrics.HandleSparkApplicationDelete(app) + } +} + +// Generic implements handler.EventHandler. +func (h *EventHandler) Generic(ctx context.Context, event event.GenericEvent, queue workqueue.RateLimitingInterface) { + app, ok := event.Object.(*v1beta2.SparkApplication) + if !ok { + return + } + + logger.Info("SparkApplication generic event", "name", app.Name, "namespace", app.Namespace, "state", app.Status.AppState.State) + queue.AddRateLimited(ctrl.Request{NamespacedName: types.NamespacedName{Name: app.Name, Namespace: app.Namespace}}) +} diff --git a/pkg/controller/sparkapplication/monitoring_config.go b/internal/controller/sparkapplication/monitoring_config.go similarity index 55% rename from pkg/controller/sparkapplication/monitoring_config.go rename to internal/controller/sparkapplication/monitoring_config.go index ea88326b16..a4ef7b454e 100644 --- a/pkg/controller/sparkapplication/monitoring_config.go +++ b/internal/controller/sparkapplication/monitoring_config.go @@ -22,51 +22,42 @@ import ( "github.com/golang/glog" corev1 "k8s.io/api/core/v1" - apiErrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" ) -const ( - metricsPropertiesKey = "metrics.properties" - prometheusConfigKey = "prometheus.yaml" - prometheusScrapeAnnotation = "prometheus.io/scrape" - prometheusPortAnnotation = "prometheus.io/port" - prometheusPathAnnotation = "prometheus.io/path" -) - -func configPrometheusMonitoring(app *v1beta2.SparkApplication, kubeClient clientset.Interface) error { - port := config.DefaultPrometheusJavaAgentPort +func configPrometheusMonitoring(app *v1beta2.SparkApplication, client client.Client) error { + port := common.DefaultPrometheusJavaAgentPort if app.Spec.Monitoring.Prometheus.Port != nil { port = *app.Spec.Monitoring.Prometheus.Port } // If one or both of the metricsPropertiesFile and Prometheus.ConfigFile are not set - if !app.HasMetricsPropertiesFile() || !app.HasPrometheusConfigFile() { - glog.V(2).Infof("Creating a ConfigMap for metrics and Prometheus configurations.") - configMapName := config.GetPrometheusConfigMapName(app) + if !util.HasMetricsPropertiesFile(app) || !util.HasPrometheusConfigFile(app) { + logger.V(1).Info("Creating a ConfigMap for metrics and Prometheus configurations") + configMapName := util.GetPrometheusConfigMapName(app) configMap := buildPrometheusConfigMap(app, configMapName) - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - cm, err := kubeClient.CoreV1().ConfigMaps(app.Namespace).Get(context.TODO(), configMapName, metav1.GetOptions{}) - if apiErrors.IsNotFound(err) { - _, createErr := kubeClient.CoreV1().ConfigMaps(app.Namespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) - return createErr - } - if err != nil { + key := types.NamespacedName{Namespace: configMap.Namespace, Name: configMap.Name} + if retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + cm := &corev1.ConfigMap{} + if err := client.Get(context.TODO(), key, cm); err != nil { + if errors.IsNotFound(err) { + return client.Create(context.TODO(), configMap) + } return err } - cm.Data = configMap.Data - _, updateErr := kubeClient.CoreV1().ConfigMaps(app.Namespace).Update(context.TODO(), cm, metav1.UpdateOptions{}) - return updateErr - }) - - if retryErr != nil { - return fmt.Errorf("failed to apply %s in namespace %s: %v", configMapName, app.Namespace, retryErr) + return client.Update(context.TODO(), cm) + }); retryErr != nil { + logger.Error(retryErr, "Failed to create/update Prometheus ConfigMap for SparkApplication", "name", app.Name, "ConfigMap name", configMap.Name, "namespace", app.Namespace) + return retryErr } } @@ -76,10 +67,10 @@ func configPrometheusMonitoring(app *v1beta2.SparkApplication, kubeClient client "-javaagent:%s=%d:%s/%s", app.Spec.Monitoring.Prometheus.JmxExporterJar, port, - config.PrometheusConfigMapMountPath, - prometheusConfigKey) + common.PrometheusConfigMapMountPath, + common.PrometheusConfigKey) - if app.HasPrometheusConfigFile() { + if util.HasPrometheusConfigFile(app) { configFile := *app.Spec.Monitoring.Prometheus.ConfigFile glog.V(2).Infof("Overriding the default Prometheus configuration with config file %s in the Spark image.", configFile) javaOption = fmt.Sprintf("-javaagent:%s=%d:%s", app.Spec.Monitoring.Prometheus.JmxExporterJar, @@ -88,14 +79,14 @@ func configPrometheusMonitoring(app *v1beta2.SparkApplication, kubeClient client /* work around for push gateway issue: https://github.com/prometheus/pushgateway/issues/97 */ metricNamespace := fmt.Sprintf("%s.%s", app.Namespace, app.Name) - metricConf := fmt.Sprintf("%s/%s", config.PrometheusConfigMapMountPath, metricsPropertiesKey) + metricConf := fmt.Sprintf("%s/%s", common.PrometheusConfigMapMountPath, common.MetricsPropertiesKey) if app.Spec.SparkConf == nil { app.Spec.SparkConf = make(map[string]string) } app.Spec.SparkConf["spark.metrics.namespace"] = metricNamespace app.Spec.SparkConf["spark.metrics.conf"] = metricConf - if app.HasMetricsPropertiesFile() { + if util.HasMetricsPropertiesFile(app) { app.Spec.SparkConf["spark.metrics.conf"] = *app.Spec.Monitoring.MetricsPropertiesFile } @@ -103,9 +94,9 @@ func configPrometheusMonitoring(app *v1beta2.SparkApplication, kubeClient client if app.Spec.Driver.Annotations == nil { app.Spec.Driver.Annotations = make(map[string]string) } - app.Spec.Driver.Annotations[prometheusScrapeAnnotation] = "true" - app.Spec.Driver.Annotations[prometheusPortAnnotation] = fmt.Sprintf("%d", port) - app.Spec.Driver.Annotations[prometheusPathAnnotation] = "/metrics" + app.Spec.Driver.Annotations[common.PrometheusScrapeAnnotation] = "true" + app.Spec.Driver.Annotations[common.PrometheusPortAnnotation] = fmt.Sprintf("%d", port) + app.Spec.Driver.Annotations[common.PrometheusPathAnnotation] = "/metrics" if app.Spec.Driver.JavaOptions == nil { app.Spec.Driver.JavaOptions = &javaOption @@ -117,9 +108,9 @@ func configPrometheusMonitoring(app *v1beta2.SparkApplication, kubeClient client if app.Spec.Executor.Annotations == nil { app.Spec.Executor.Annotations = make(map[string]string) } - app.Spec.Executor.Annotations[prometheusScrapeAnnotation] = "true" - app.Spec.Executor.Annotations[prometheusPortAnnotation] = fmt.Sprintf("%d", port) - app.Spec.Executor.Annotations[prometheusPathAnnotation] = "/metrics" + app.Spec.Executor.Annotations[common.PrometheusScrapeAnnotation] = "true" + app.Spec.Executor.Annotations[common.PrometheusPortAnnotation] = fmt.Sprintf("%d", port) + app.Spec.Executor.Annotations[common.PrometheusPathAnnotation] = "/metrics" if app.Spec.Executor.JavaOptions == nil { app.Spec.Executor.JavaOptions = &javaOption @@ -134,27 +125,27 @@ func configPrometheusMonitoring(app *v1beta2.SparkApplication, kubeClient client func buildPrometheusConfigMap(app *v1beta2.SparkApplication, prometheusConfigMapName string) *corev1.ConfigMap { configMapData := make(map[string]string) - if !app.HasMetricsPropertiesFile() { - metricsProperties := config.DefaultMetricsProperties + if !util.HasMetricsPropertiesFile(app) { + metricsProperties := common.DefaultMetricsProperties if app.Spec.Monitoring.MetricsProperties != nil { metricsProperties = *app.Spec.Monitoring.MetricsProperties } - configMapData[metricsPropertiesKey] = metricsProperties + configMapData[common.MetricsPropertiesKey] = metricsProperties } - if !app.HasPrometheusConfigFile() { - prometheusConfig := config.DefaultPrometheusConfiguration + if !util.HasPrometheusConfigFile(app) { + prometheusConfig := common.DefaultPrometheusConfiguration if app.Spec.Monitoring.Prometheus.Configuration != nil { prometheusConfig = *app.Spec.Monitoring.Prometheus.Configuration } - configMapData[prometheusConfigKey] = prometheusConfig + configMapData[common.PrometheusConfigKey] = prometheusConfig } return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: prometheusConfigMapName, Namespace: app.Namespace, - OwnerReferences: []metav1.OwnerReference{*getOwnerReference(app)}, + OwnerReferences: []metav1.OwnerReference{util.GetOwnerReference(app)}, }, Data: configMapData, } diff --git a/internal/controller/sparkapplication/monitoring_config_test.go b/internal/controller/sparkapplication/monitoring_config_test.go new file mode 100644 index 0000000000..2b83bb1417 --- /dev/null +++ b/internal/controller/sparkapplication/monitoring_config_test.go @@ -0,0 +1,255 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication_test + +// func TestConfigPrometheusMonitoring(t *testing.T) { +// type testcase struct { +// app *v1beta2.SparkApplication +// metricsProperties string +// metricsPropertiesFile string +// prometheusConfig string +// port string +// driverJavaOptions string +// executorJavaOptions string +// } + +// fakeClient := fake.NewSimpleClientset() +// testFn := func(test testcase, t *testing.T) { +// err := configPrometheusMonitoring(test.app, fakeClient) +// if err != nil { +// t.Errorf("failed to configure Prometheus monitoring: %v", err) +// } + +// configMapName := test.app.GetPrometheusConfigMapName() +// configMap, err := fakeClient.CoreV1().ConfigMaps(test.app.Namespace).Get(context.TODO(), configMapName, metav1.GetOptions{}) +// if err != nil { +// t.Errorf("failed to get ConfigMap %s: %v", configMapName, err) +// } + +// if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && +// test.app.Spec.Monitoring.MetricsPropertiesFile == nil && +// len(configMap.Data) != 2 { +// t.Errorf("expected %d data items got %d", 2, len(configMap.Data)) +// } + +// if test.app.Spec.Monitoring.Prometheus.ConfigFile != nil && +// test.app.Spec.Monitoring.MetricsPropertiesFile == nil && +// len(configMap.Data) != 1 { +// t.Errorf("expected %d data items got %d", 1, len(configMap.Data)) +// } + +// if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && +// test.app.Spec.Monitoring.MetricsPropertiesFile != nil && +// len(configMap.Data) != 1 { +// t.Errorf("expected %d data items got %d", 1, len(configMap.Data)) +// } + +// if test.app.Spec.Monitoring.MetricsPropertiesFile == nil && configMap.Data[common.MetricsPropertiesKey] != test.metricsProperties { +// t.Errorf("metrics.properties expected %s got %s", test.metricsProperties, configMap.Data[common.MetricsPropertiesKey]) +// } + +// if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && configMap.Data[common.PrometheusConfigKey] != test.prometheusConfig { +// t.Errorf("prometheus.yaml expected %s got %s", test.prometheusConfig, configMap.Data[common.PrometheusConfigKey]) +// } + +// if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && configMap.Data[common.PrometheusConfigKey] != test.prometheusConfig { +// t.Errorf("prometheus.yaml expected %s got %s", test.prometheusConfig, configMap.Data[common.PrometheusConfigKey]) +// } + +// if test.app.Spec.Monitoring.ExposeDriverMetrics { +// if len(test.app.Spec.Driver.Annotations) != 3 { +// t.Errorf("expected %d driver annotations got %d", 3, len(test.app.Spec.Driver.Annotations)) +// } +// if test.app.Spec.Driver.Annotations[common.PrometheusPortAnnotation] != test.port { +// t.Errorf("java agent port expected %s got %s", test.port, test.app.Spec.Driver.Annotations[common.PrometheusPortAnnotation]) +// } + +// if *test.app.Spec.Driver.JavaOptions != test.driverJavaOptions { +// t.Errorf("driver Java options expected %s got %s", test.driverJavaOptions, *test.app.Spec.Driver.JavaOptions) +// } +// } + +// if test.app.Spec.Monitoring.ExposeExecutorMetrics { +// if len(test.app.Spec.Executor.Annotations) != 3 { +// t.Errorf("expected %d driver annotations got %d", 3, len(test.app.Spec.Executor.Annotations)) +// } +// if test.app.Spec.Executor.Annotations[common.PrometheusPortAnnotation] != test.port { +// t.Errorf("java agent port expected %s got %s", test.port, test.app.Spec.Executor.Annotations[common.PrometheusPortAnnotation]) +// } + +// if *test.app.Spec.Executor.JavaOptions != test.executorJavaOptions { +// t.Errorf("driver Java options expected %s got %s", test.executorJavaOptions, *test.app.Spec.Executor.JavaOptions) +// } +// } + +// if test.app.Spec.Monitoring.MetricsPropertiesFile != nil { +// if test.app.Spec.SparkConf["spark.metrics.conf"] != test.metricsPropertiesFile { +// t.Errorf("expected sparkConf %s got %s", test.metricsPropertiesFile, test.app.Spec.SparkConf["spark.metrics.conf"]) +// } +// } +// } + +// testcases := []testcase{ +// { +// app: &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "app1", +// Namespace: "default", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Monitoring: &v1beta2.MonitoringSpec{ +// ExposeDriverMetrics: true, +// ExposeExecutorMetrics: true, +// Prometheus: &v1beta2.PrometheusSpec{ +// JmxExporterJar: "/prometheus/exporter.jar", +// }, +// }, +// }, +// }, +// metricsProperties: common.DefaultMetricsProperties, +// prometheusConfig: common.DefaultPrometheusConfiguration, +// port: fmt.Sprintf("%d", common.DefaultPrometheusJavaAgentPort), +// driverJavaOptions: "-javaagent:/prometheus/exporter.jar=8090:/etc/metrics/conf/prometheus.yaml", +// executorJavaOptions: "-javaagent:/prometheus/exporter.jar=8090:/etc/metrics/conf/prometheus.yaml", +// }, +// { +// app: &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "app2", +// Namespace: "default", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Driver: v1beta2.DriverSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Executor: v1beta2.ExecutorSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Monitoring: &v1beta2.MonitoringSpec{ +// ExposeDriverMetrics: true, +// ExposeExecutorMetrics: true, +// MetricsProperties: util.StringPtr("testcase2dummy"), +// Prometheus: &v1beta2.PrometheusSpec{ +// JmxExporterJar: "/prometheus/exporter.jar", +// Port: util.Int32Ptr(8091), +// Configuration: util.StringPtr("testcase2dummy"), +// }, +// }, +// }, +// }, +// metricsProperties: "testcase2dummy", +// prometheusConfig: "testcase2dummy", +// port: "8091", +// driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", +// executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", +// }, +// { +// app: &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "app2", +// Namespace: "default", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Driver: v1beta2.DriverSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Executor: v1beta2.ExecutorSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Monitoring: &v1beta2.MonitoringSpec{ +// ExposeDriverMetrics: true, +// ExposeExecutorMetrics: true, +// MetricsProperties: util.StringPtr("testcase3dummy"), +// Prometheus: &v1beta2.PrometheusSpec{ +// JmxExporterJar: "/prometheus/exporter.jar", +// Port: util.Int32Ptr(8091), +// ConfigFile: util.StringPtr("testcase3dummy.yaml"), +// }, +// }, +// }, +// }, +// metricsProperties: "testcase3dummy", +// port: "8091", +// driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase3dummy.yaml", +// executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase3dummy.yaml", +// }, +// { +// app: &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "app2", +// Namespace: "default", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Driver: v1beta2.DriverSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Executor: v1beta2.ExecutorSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Monitoring: &v1beta2.MonitoringSpec{ +// ExposeDriverMetrics: true, +// ExposeExecutorMetrics: true, +// MetricsPropertiesFile: util.StringPtr("/testcase4dummy/metrics.properties"), +// Prometheus: &v1beta2.PrometheusSpec{ +// JmxExporterJar: "/prometheus/exporter.jar", +// Port: util.Int32Ptr(8091), +// ConfigFile: util.StringPtr("testcase4dummy.yaml"), +// }, +// }, +// }, +// }, +// metricsPropertiesFile: "/testcase4dummy/metrics.properties", +// port: "8091", +// driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase4dummy.yaml", +// executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase4dummy.yaml", +// }, +// { +// app: &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "app2", +// Namespace: "default", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Driver: v1beta2.DriverSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Executor: v1beta2.ExecutorSpec{ +// JavaOptions: util.StringPtr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), +// }, +// Monitoring: &v1beta2.MonitoringSpec{ +// ExposeDriverMetrics: true, +// ExposeExecutorMetrics: true, +// MetricsPropertiesFile: util.StringPtr("/testcase5dummy/metrics.properties"), +// Prometheus: &v1beta2.PrometheusSpec{ +// JmxExporterJar: "/prometheus/exporter.jar", +// Port: util.Int32Ptr(8091), +// }, +// }, +// }, +// }, +// metricsPropertiesFile: "/testcase5dummy/metrics.properties", +// prometheusConfig: common.DefaultPrometheusConfiguration, +// port: "8091", +// driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", +// executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", +// }, +// } + +// for _, test := range testcases { +// testFn(test, t) +// } +// } diff --git a/internal/controller/sparkapplication/submission.go b/internal/controller/sparkapplication/submission.go new file mode 100644 index 0000000000..318bb6e4f1 --- /dev/null +++ b/internal/controller/sparkapplication/submission.go @@ -0,0 +1,1023 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +// submission includes information of a Spark application to be submitted. +type submission struct { + namespace string + name string + args []string +} + +func newSubmission(args []string, app *v1beta2.SparkApplication) *submission { + return &submission{ + namespace: app.Namespace, + name: app.Name, + args: args, + } +} + +func runSparkSubmit(submission *submission) (bool, error) { + sparkHome, present := os.LookupEnv(common.EnvSparkHome) + if !present { + return false, fmt.Errorf("env %s is not specified", common.EnvSparkHome) + } + command := filepath.Join(sparkHome, "bin", "spark-submit") + cmd := exec.Command(command, submission.args...) + _, err := cmd.Output() + if err != nil { + var errorMsg string + if exitErr, ok := err.(*exec.ExitError); ok { + errorMsg = string(exitErr.Stderr) + } + // The driver pod of the application already exists. + if strings.Contains(errorMsg, common.ErrorCodePodAlreadyExists) { + return false, fmt.Errorf("driver pod already exist") + } + if errorMsg != "" { + return false, fmt.Errorf("failed to run spark-submit: %s", errorMsg) + } + return false, fmt.Errorf("failed to run spark-submit: %v", err) + } + return true, nil +} + +// buildSparkSubmitArgs builds the arguments for spark-submit. +func buildSparkSubmitArgs(app *v1beta2.SparkApplication) ([]string, error) { + optionFuncs := []sparkSubmitOptionFunc{ + masterOption, + deployModeOption, + mainClassOption, + nameOption, + dependenciesOption, + namespaceOption, + imageOption, + pythonVersionOption, + memoryOverheadFactorOption, + submissionWaitAppCompletionOption, + sparkConfOption, + hadoopConfOption, + driverPodNameOption, + driverConfOption, + driverSecretOption, + driverEnvOption, + driverVolumeMountsOption, + executorConfOption, + executorSecretOption, + executorVolumeMountsOption, + executorEnvOption, + nodeSelectorOption, + dynamicAllocationOption, + proxyUserOption, + mainApplicationFileOption, + applicationOption, + } + + var args []string + for _, optionFunc := range optionFuncs { + option, err := optionFunc(app) + if err != nil { + return nil, err + } + args = append(args, option...) + } + + return args, nil +} + +type sparkSubmitOptionFunc func(*v1beta2.SparkApplication) ([]string, error) + +func masterOption(_ *v1beta2.SparkApplication) ([]string, error) { + masterURL, err := util.GetMasterURL() + if err != nil { + return nil, fmt.Errorf("failed to get master URL: %v", err) + } + args := []string{ + "--master", + masterURL, + } + return args, nil +} + +func deployModeOption(app *v1beta2.SparkApplication) ([]string, error) { + args := []string{ + "--deploy-mode", + string(app.Spec.Mode), + } + return args, nil +} + +func mainClassOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.MainClass == nil { + return nil, nil + } + args := []string{ + "--class", + *app.Spec.MainClass, + } + return args, nil +} + +func nameOption(app *v1beta2.SparkApplication) ([]string, error) { + args := []string{"--name", app.Name} + return args, nil +} + +func namespaceOption(app *v1beta2.SparkApplication) ([]string, error) { + args := []string{ + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesNamespace, app.Namespace), + } + return args, nil +} + +func driverPodNameOption(app *v1beta2.SparkApplication) ([]string, error) { + args := []string{ + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesDriverPodName, util.GetDriverPodName(app)), + } + return args, nil +} + +func dependenciesOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + + if len(app.Spec.Deps.Jars) > 0 { + args = append(args, "--jars", strings.Join(app.Spec.Deps.Jars, ",")) + } + + if len(app.Spec.Deps.Packages) > 0 { + args = append(args, "--packages", strings.Join(app.Spec.Deps.Packages, ",")) + } + + if len(app.Spec.Deps.ExcludePackages) > 0 { + args = append(args, "--exclude-packages", strings.Join(app.Spec.Deps.ExcludePackages, ",")) + } + + if len(app.Spec.Deps.Repositories) > 0 { + args = append(args, "--repositories", strings.Join(app.Spec.Deps.Repositories, ",")) + } + + if len(app.Spec.Deps.PyFiles) > 0 { + args = append(args, "--py-files", strings.Join(app.Spec.Deps.PyFiles, ",")) + } + + if len(app.Spec.Deps.Files) > 0 { + args = append(args, "--files", strings.Join(app.Spec.Deps.Files, ",")) + } + + return args, nil +} + +func imageOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + if app.Spec.Image == nil || *app.Spec.Image == "" { + return nil, nil + } + args = append(args, + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesContainerImage, *app.Spec.Image), + ) + + if app.Spec.ImagePullPolicy == nil || *app.Spec.ImagePullPolicy == "" { + return nil, nil + } + args = append(args, + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesContainerImagePullPolicy, *app.Spec.ImagePullPolicy), + ) + + if len(app.Spec.ImagePullSecrets) == 0 { + return nil, nil + } + secrets := strings.Join(app.Spec.ImagePullSecrets, ",") + args = append(args, + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesContainerImagePullSecrets, secrets), + ) + + return args, nil +} + +func pythonVersionOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.PythonVersion == nil || *app.Spec.PythonVersion == "" { + return nil, nil + } + args := []string{ + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesPysparkPythonVersion, *app.Spec.PythonVersion), + } + return args, nil +} + +func memoryOverheadFactorOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.MemoryOverheadFactor == nil || *app.Spec.MemoryOverheadFactor == "" { + return nil, nil + } + args := []string{ + "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesMemoryOverheadFactor, *app.Spec.MemoryOverheadFactor), + } + return args, nil +} + +func submissionWaitAppCompletionOption(_ *v1beta2.SparkApplication) ([]string, error) { + // spark-submit triggered by Spark operator should never wait for app completion + args := []string{ + "--conf", + fmt.Sprintf("%s=false", common.SparkKubernetesSubmissionWaitAppCompletion), + } + return args, nil +} + +func sparkConfOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.SparkConf == nil { + return nil, nil + } + var args []string + // Add Spark configuration properties. + for key, value := range app.Spec.SparkConf { + // Configuration property for the driver pod name has already been set. + if key != common.SparkKubernetesDriverPodName { + args = append(args, "--conf", fmt.Sprintf("%s=%s", key, value)) + } + } + return args, nil +} + +func hadoopConfOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.HadoopConf == nil { + return nil, nil + } + var args []string + // Add Hadoop configuration properties. + for key, value := range app.Spec.HadoopConf { + args = append(args, "--conf", fmt.Sprintf("spark.hadoop.%s=%s", key, value)) + } + return args, nil +} + +func nodeSelectorOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + for key, value := range app.Spec.NodeSelector { + property := fmt.Sprintf(common.SparkKubernetesNodeSelectorTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + return args, nil +} + +func driverConfOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + var property string + + property = fmt.Sprintf(common.SparkKubernetesDriverLabelTemplate, common.LabelSparkAppName) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, app.Name)) + + property = fmt.Sprintf(common.SparkKubernetesDriverLabelTemplate, common.LabelLaunchedBySparkOperator) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, "true")) + + property = fmt.Sprintf(common.SparkKubernetesDriverLabelTemplate, common.LabelSubmissionID) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, app.Status.SubmissionID)) + + if app.Spec.Driver.Image != nil && *app.Spec.Driver.Image != "" { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesDriverContainerImage, *app.Spec.Driver.Image)) + } else if app.Spec.Image != nil && *app.Spec.Image != "" { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesDriverContainerImage, *app.Spec.Image)) + } else { + return nil, fmt.Errorf("driver container image is not specified") + } + + if app.Spec.Driver.Cores != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkDriverCores, *app.Spec.Driver.Cores)) + } + + if app.Spec.Driver.CoreRequest != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesDriverRequestCores, *app.Spec.Driver.CoreRequest)) + } + + if app.Spec.Driver.CoreLimit != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesDriverLimitCores, *app.Spec.Driver.CoreLimit)) + } + + if app.Spec.Driver.Memory != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkDriverMemory, *app.Spec.Driver.Memory)) + } + + if app.Spec.Driver.MemoryOverhead != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkDriverMemoryOverhead, *app.Spec.Driver.MemoryOverhead)) + } + + if app.Spec.Driver.ServiceAccount != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", + common.SparkKubernetesAuthenticateDriverServiceAccountName, *app.Spec.Driver.ServiceAccount), + ) + } + + if app.Spec.Driver.JavaOptions != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkDriverExtraJavaOptions, *app.Spec.Driver.JavaOptions)) + } + + if app.Spec.Driver.KubernetesMaster != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesDriverMaster, *app.Spec.Driver.KubernetesMaster)) + } + + // Populate SparkApplication labels to driver pod + for key, value := range app.Labels { + property = fmt.Sprintf(common.SparkKubernetesDriverLabelTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Driver.Labels { + property = fmt.Sprintf(common.SparkKubernetesDriverLabelTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Driver.Annotations { + property = fmt.Sprintf(common.SparkKubernetesDriverAnnotationTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Driver.ServiceLabels { + property = fmt.Sprintf(common.SparkKubernetesDriverServiceLabelTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Driver.ServiceAnnotations { + property = fmt.Sprintf(common.SparkKubernetesDriverServiceAnnotationTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Driver.EnvSecretKeyRefs { + property = fmt.Sprintf(common.SparkKubernetesDriverSecretKeyRefTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s:%s", property, value.Name, value.Key)) + } + + return args, nil +} + +// driverSecretOption returns a list of spark-submit arguments for mounting secrets to driver pod. +func driverSecretOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + for _, secret := range app.Spec.Driver.Secrets { + property := fmt.Sprintf(common.SparkKubernetesDriverSecretsTemplate, secret.Name) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, secret.Path)) + if secret.Type == v1beta2.SecretTypeGCPServiceAccount { + property := fmt.Sprintf(common.SparkKubernetesDriverEnvTemplate, common.EnvGoogleApplicationCredentials) + conf := fmt.Sprintf("%s=%s", property, filepath.Join(secret.Path, common.ServiceAccountJSONKeyFileName)) + args = append(args, "--conf", conf) + } else if secret.Type == v1beta2.SecretTypeHadoopDelegationToken { + property := fmt.Sprintf(common.SparkKubernetesDriverEnvTemplate, common.EnvHadoopTokenFileLocation) + conf := fmt.Sprintf("%s=%s", property, filepath.Join(secret.Path, common.HadoopDelegationTokenFileName)) + args = append(args, "--conf", conf) + } + } + return args, nil +} + +func driverVolumeMountsOption(app *v1beta2.SparkApplication) ([]string, error) { + volumes := util.GetLocalVolumes(app) + if volumes == nil { + return nil, nil + } + + volumeMounts := util.GetDriverLocalVolumeMounts(app) + if volumeMounts == nil { + return nil, nil + } + + args := []string{} + for _, volumeMount := range volumeMounts { + volumeName := volumeMount.Name + volume, ok := volumes[volumeName] + if !ok { + return args, fmt.Errorf("volume %s not found", volumeName) + } + + var volumeType string + switch { + case volume.EmptyDir != nil: + volumeType = common.VolumeTypeEmptyDir + case volume.HostPath != nil: + volumeType = common.VolumeTypeHostPath + case volume.NFS != nil: + volumeType = common.VolumeTypeNFS + case volume.PersistentVolumeClaim != nil: + volumeType = common.VolumeTypePersistentVolumeClaim + default: + return nil, fmt.Errorf("unsupported volume type") + } + + if volumeMount.MountPath != "" { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesMountPathTemplate, + volumeType, + volumeName, + ), + volumeMount.MountPath, + ), + ) + } + + if volumeMount.SubPath != "" { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesMountSubPathTemplate, + volumeType, + volumeName, + ), + volumeMount.SubPath, + ), + ) + } + + if volumeMount.ReadOnly { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesMountReadOnlyTemplate, + volumeType, + volumeName, + ), + "true", + ), + ) + } + + switch volumeType { + case common.VolumeTypeEmptyDir: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypeEmptyDir, + volume.Name, + "sizeLimit", + ), + volume.EmptyDir.SizeLimit.String(), + ), + ) + case common.VolumeTypeHostPath: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypeHostPath, + volume.Name, + "path", + ), + volume.HostPath.Path, + ), + ) + + if volume.HostPath.Type != nil { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypeHostPath, + volume.Name, + "type", + ), + *volume.HostPath.Type, + ), + ) + } + + case common.VolumeTypeNFS: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypeNFS, + volume.Name, + "path", + ), + volume.NFS.Path, + ), + ) + + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypeNFS, + volume.Name, + "server", + ), + volume.NFS.Server, + ), + ) + + if volume.NFS.ReadOnly { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypeNFS, + volume.Name, + "readOnly", + ), + "true", + ), + ) + } + + case common.VolumeTypePersistentVolumeClaim: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypePersistentVolumeClaim, + volume.Name, + "claimName", + ), + volume.PersistentVolumeClaim.ClaimName, + ), + ) + + if volume.PersistentVolumeClaim.ReadOnly { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesDriverVolumesOptionsTemplate, + common.VolumeTypePersistentVolumeClaim, + volume.Name, + "readOnly", + ), + "true", + ), + ) + } + } + } + return args, nil +} + +// driverEnvOption returns a list of spark-submit arguments for configuring driver environment variables. +func driverEnvOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + for key, value := range app.Spec.Driver.EnvVars { + property := fmt.Sprintf(common.SparkKubernetesDriverEnvTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + return args, nil +} + +func executorConfOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + var property string + + property = fmt.Sprintf(common.SparkKubernetesExecutorLabelTemplate, common.LabelSparkAppName) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, app.Name)) + + property = fmt.Sprintf(common.SparkKubernetesExecutorLabelTemplate, common.LabelLaunchedBySparkOperator) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, "true")) + + property = fmt.Sprintf(common.SparkKubernetesExecutorLabelTemplate, common.LabelSubmissionID) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, app.Status.SubmissionID)) + + if app.Spec.Executor.Instances != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkExecutorInstances, *app.Spec.Executor.Instances)) + } + + if app.Spec.Executor.Image != nil && *app.Spec.Executor.Image != "" { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesExecutorContainerImage, *app.Spec.Executor.Image)) + } else if app.Spec.Image != nil && *app.Spec.Image != "" { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesExecutorContainerImage, *app.Spec.Image)) + } else { + return nil, fmt.Errorf("executor container image is not specified") + } + + if app.Spec.Executor.Cores != nil { + // Property "spark.executor.cores" does not allow float values. + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkExecutorCores, *app.Spec.Executor.Cores)) + } + if app.Spec.Executor.CoreRequest != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesExecutorRequestCores, *app.Spec.Executor.CoreRequest)) + } + if app.Spec.Executor.CoreLimit != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesExecutorLimitCores, *app.Spec.Executor.CoreLimit)) + } + if app.Spec.Executor.Memory != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkExecutorMemory, *app.Spec.Executor.Memory)) + } + if app.Spec.Executor.MemoryOverhead != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkExecutorMemoryOverhead, *app.Spec.Executor.MemoryOverhead)) + } + + if app.Spec.Executor.ServiceAccount != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%s", common.SparkKubernetesAuthenticateExecutorServiceAccountName, *app.Spec.Executor.ServiceAccount)) + } + + if app.Spec.Executor.DeleteOnTermination != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%t", common.SparkKubernetesExecutorDeleteOnTermination, *app.Spec.Executor.DeleteOnTermination)) + } + + // Populate SparkApplication labels to executor pod + for key, value := range app.Labels { + property := fmt.Sprintf(common.SparkKubernetesExecutorLabelTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + for key, value := range app.Spec.Executor.Labels { + property := fmt.Sprintf(common.SparkKubernetesExecutorLabelTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Executor.Annotations { + property := fmt.Sprintf(common.SparkKubernetesExecutorAnnotationTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + + for key, value := range app.Spec.Executor.EnvSecretKeyRefs { + property := fmt.Sprintf(common.SparkKubernetesExecutorSecretKeyRefTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s:%s", property, value.Name, value.Key)) + } + + if app.Spec.Executor.JavaOptions != nil { + args = append(args, "--conf", fmt.Sprintf("%s=%s", common.SparkExecutorExtraJavaOptions, *app.Spec.Executor.JavaOptions)) + } + + return args, nil +} + +func executorSecretOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + for _, secret := range app.Spec.Executor.Secrets { + property := fmt.Sprintf(common.SparkKubernetesExecutorSecretsTemplate, secret.Name) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, secret.Path)) + switch secret.Type { + case v1beta2.SecretTypeGCPServiceAccount: + property := fmt.Sprintf(common.SparkKubernetesDriverEnvTemplate, common.EnvGoogleApplicationCredentials) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, + filepath.Join(secret.Path, common.ServiceAccountJSONKeyFileName))) + case v1beta2.SecretTypeHadoopDelegationToken: + property := fmt.Sprintf(common.SparkKubernetesDriverEnvTemplate, common.EnvHadoopTokenFileLocation) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, + filepath.Join(secret.Path, common.HadoopDelegationTokenFileName))) + } + } + return args, nil +} + +func executorVolumeMountsOption(app *v1beta2.SparkApplication) ([]string, error) { + volumes := util.GetLocalVolumes(app) + if volumes == nil { + return nil, nil + } + + volumeMounts := util.GetExecutorLocalVolumeMounts(app) + if volumeMounts == nil { + return nil, nil + } + + args := []string{} + for _, volumeMount := range volumeMounts { + volumeName := volumeMount.Name + volume, ok := volumes[volumeName] + if !ok { + return args, fmt.Errorf("volume %s not found", volumeName) + } + + var volumeType string + switch { + case volume.EmptyDir != nil: + volumeType = common.VolumeTypeEmptyDir + case volume.HostPath != nil: + volumeType = common.VolumeTypeHostPath + case volume.NFS != nil: + volumeType = common.VolumeTypeNFS + case volume.PersistentVolumeClaim != nil: + volumeType = common.VolumeTypePersistentVolumeClaim + default: + return nil, fmt.Errorf("unsupported volume type") + } + + if volumeMount.MountPath != "" { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesMountPathTemplate, + volumeType, + volumeName, + ), + volumeMount.MountPath, + ), + ) + } + + if volumeMount.SubPath != "" { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesMountSubPathTemplate, + volumeType, + volumeName, + ), + volumeMount.SubPath, + ), + ) + } + + if volumeMount.ReadOnly { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesMountReadOnlyTemplate, + volumeType, + volumeName, + ), + "true", + ), + ) + } + switch volumeType { + case common.VolumeTypeEmptyDir: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypeEmptyDir, + volume.Name, + "sizeLimit", + ), + volume.EmptyDir.SizeLimit.String(), + ), + ) + case common.VolumeTypeHostPath: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypeHostPath, + volume.Name, + "path", + ), + volume.HostPath.Path, + ), + ) + + if volume.HostPath.Type != nil { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypeHostPath, + volume.Name, + "type", + ), + *volume.HostPath.Type, + ), + ) + } + + case common.VolumeTypeNFS: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypeNFS, + volume.Name, + "path", + ), + volume.NFS.Path, + ), + ) + + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypeNFS, + volume.Name, + "server", + ), + volume.NFS.Server, + ), + ) + + if volume.NFS.ReadOnly { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypeNFS, + volume.Name, + "readOnly", + ), + "true", + ), + ) + } + + case common.VolumeTypePersistentVolumeClaim: + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypePersistentVolumeClaim, + volume.Name, + "claimName", + ), + volume.PersistentVolumeClaim.ClaimName, + ), + ) + + if volume.PersistentVolumeClaim.ReadOnly { + args = append( + args, + "--conf", + fmt.Sprintf( + "%s=%s", + fmt.Sprintf( + common.SparkKubernetesExecutorVolumesOptionsTemplate, + common.VolumeTypePersistentVolumeClaim, + volume.Name, + "readOnly", + ), + "true", + ), + ) + } + } + } + return args, nil +} + +func executorEnvOption(app *v1beta2.SparkApplication) ([]string, error) { + var args []string + for key, value := range app.Spec.Executor.EnvVars { + property := fmt.Sprintf(common.SparkExecutorEnvTemplate, key) + args = append(args, "--conf", fmt.Sprintf("%s=%s", property, value)) + } + return args, nil +} + +func dynamicAllocationOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.DynamicAllocation == nil || !app.Spec.DynamicAllocation.Enabled { + return nil, nil + } + + var args []string + dynamicAllocation := app.Spec.DynamicAllocation + args = append(args, "--conf", + fmt.Sprintf("%s=true", common.SparkDynamicAllocationEnabled)) + + // Turn on shuffle tracking if dynamic allocation is enabled. + args = append(args, "--conf", + fmt.Sprintf("%s=true", common.SparkDynamicAllocationShuffleTrackingEnabled)) + + if dynamicAllocation.InitialExecutors != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkDynamicAllocationInitialExecutors, *dynamicAllocation.InitialExecutors)) + } + if dynamicAllocation.MinExecutors != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkDynamicAllocationMinExecutors, *dynamicAllocation.MinExecutors)) + } + if dynamicAllocation.MaxExecutors != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkDynamicAllocationMaxExecutors, *dynamicAllocation.MaxExecutors)) + } + if dynamicAllocation.ShuffleTrackingTimeout != nil { + args = append(args, "--conf", + fmt.Sprintf("%s=%d", common.SparkDynamicAllocationShuffleTrackingTimeout, *dynamicAllocation.ShuffleTrackingTimeout)) + } + + return args, nil +} + +func proxyUserOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.ProxyUser == nil || *app.Spec.ProxyUser == "" { + return nil, nil + } + args := []string{ + "--proxy-user", + *app.Spec.ProxyUser, + } + return args, nil +} + +func mainApplicationFileOption(app *v1beta2.SparkApplication) ([]string, error) { + if app.Spec.MainApplicationFile == nil { + return nil, nil + } + args := []string{*app.Spec.MainApplicationFile} + return args, nil +} + +// applicationOption returns the application arguments. +func applicationOption(app *v1beta2.SparkApplication) ([]string, error) { + return app.Spec.Arguments, nil +} diff --git a/internal/controller/sparkapplication/submission_test.go b/internal/controller/sparkapplication/submission_test.go new file mode 100644 index 0000000000..878438f84d --- /dev/null +++ b/internal/controller/sparkapplication/submission_test.go @@ -0,0 +1,696 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +// import ( +// "fmt" +// "os" +// "reflect" +// "sort" +// "strconv" +// "testing" + +// "github.com/google/uuid" +// "github.com/stretchr/testify/assert" +// corev1 "k8s.io/api/core/v1" +// "k8s.io/apimachinery/pkg/api/resource" +// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// "github.com/kubeflow/spark-operator/api/v1beta2" +// "github.com/kubeflow/spark-operator/pkg/common" +// "github.com/kubeflow/spark-operator/pkg/util" +// ) + +// const ( +// VolumeMountPathTemplate = "spark.kubernetes.%s.volumes.%s.%s.mount.path=%s" +// VolumeMountOptionPathTemplate = "spark.kubernetes.%s.volumes.%s.%s.options.%s=%s" +// SparkDriverLabelAnnotationTemplate = "spark.kubernetes.driver.label.sparkoperator.k8s.io/%s=%s" +// SparkDriverLabelTemplate = "spark.kubernetes.driver.label.%s=%s" +// SparkDriverServiceLabelTemplate = "spark.kubernetes.driver.service.label.%s=%s" +// SparkExecutorLabelAnnotationTemplate = "spark.kubernetes.executor.label.sparkoperator.k8s.io/%s=%s" +// SparkExecutorLabelTemplate = "spark.kubernetes.executor.label.%s=%s" +// ) + +// func TestAddLocalDir_HostPath(t *testing.T) { +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/mnt", +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 0, len(app.Spec.Volumes)) +// assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) +// assert.Equal(t, 2, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) +// } + +// func TestAddLocalDir_PVC(t *testing.T) { +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ +// ClaimName: "/tmp/mnt-1", +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 0, len(app.Spec.Volumes)) +// assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) +// assert.Equal(t, 2, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "persistentVolumeClaim", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "persistentVolumeClaim", volumes[0].Name, "claimName", volumes[0].PersistentVolumeClaim.ClaimName), localDirOptions[1]) +// } + +// func TestAddLocalDir_MixedVolumes(t *testing.T) { +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/mnt-1", +// }, +// }, +// }, +// { +// Name: "log-dir", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/var/log/spark", +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// { +// Name: "log-dir", +// MountPath: "/var/log/spark", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 1, len(app.Spec.Volumes)) +// assert.Equal(t, 1, len(app.Spec.Driver.VolumeMounts)) +// assert.Equal(t, 2, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) +// } + +// func TestAddLocalDir_MultipleScratchVolumes(t *testing.T) { +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/mnt-1", +// }, +// }, +// }, +// { +// Name: "spark-local-dir-2", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/mnt-2", +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// { +// Name: "spark-local-dir-2", +// MountPath: "/tmp/mnt-2", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 0, len(app.Spec.Volumes)) +// assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) +// assert.Equal(t, 4, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[1].Name, volumeMounts[1].MountPath), localDirOptions[2]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[1].Name, "path", volumes[1].HostPath.Path), localDirOptions[3]) +// } + +// func TestAddLocalDir_Executor(t *testing.T) { +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/mnt", +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Executor: v1beta2.ExecutorSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 0, len(app.Spec.Volumes)) +// assert.Equal(t, 0, len(app.Spec.Executor.VolumeMounts)) +// assert.Equal(t, 2, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) +// } + +// func TestAddLocalDir_Driver_Executor(t *testing.T) { +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/mnt", +// }, +// }, +// }, +// { +// Name: "test-volume", +// VolumeSource: corev1.VolumeSource{ +// HostPath: &corev1.HostPathVolumeSource{ +// Path: "/tmp/test", +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// { +// Name: "test-volume", +// MountPath: "/tmp/test", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// Executor: v1beta2.ExecutorSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 1, len(app.Spec.Volumes)) +// assert.Equal(t, 1, len(app.Spec.Driver.VolumeMounts)) +// assert.Equal(t, 1, len(app.Spec.Executor.VolumeMounts)) +// assert.Equal(t, 4, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[2]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[3]) +// } + +// func TestAddEmptyDir_Driver_Executor_WithSizeLimit(t *testing.T) { +// sizeLimit := resource.MustParse("5Gi") +// volumes := []corev1.Volume{ +// { +// Name: "spark-local-dir-1", +// VolumeSource: corev1.VolumeSource{ +// EmptyDir: &corev1.EmptyDirVolumeSource{ +// SizeLimit: &sizeLimit, +// }, +// }, +// }, +// } + +// volumeMounts := []corev1.VolumeMount{ +// { +// Name: "spark-local-dir-1", +// MountPath: "/tmp/mnt-1", +// }, +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Volumes: volumes, +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// Executor: v1beta2.ExecutorSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// VolumeMounts: volumeMounts, +// }, +// }, +// }, +// } + +// localDirOptions, err := addLocalDirConfOptions(app) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, 0, len(app.Spec.Volumes)) +// assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) +// assert.Equal(t, 0, len(app.Spec.Executor.VolumeMounts)) +// assert.Equal(t, 4, len(localDirOptions)) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "emptyDir", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "emptyDir", volumes[0].Name, "sizeLimit", volumes[0].EmptyDir.SizeLimit.String()), localDirOptions[1]) +// assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "emptyDir", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[2]) +// assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "emptyDir", volumes[0].Name, "sizeLimit", volumes[0].EmptyDir.SizeLimit.String()), localDirOptions[3]) +// } + +// func TestPopulateLabels_Driver_Executor(t *testing.T) { +// const ( +// AppLabelKey = "app-label-key" +// AppLabelValue = "app-label-value" +// DriverLabelKey = "driver-label-key" +// DriverLabelValue = "driver-label-key" +// DriverServiceLabelKey = "driver-svc-label-key" +// DriverServiceLabelValue = "driver-svc-label-value" +// ExecutorLabelKey = "executor-label-key" +// ExecutorLabelValue = "executor-label-key" +// ) + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// Labels: map[string]string{AppLabelKey: AppLabelValue}, +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Driver: v1beta2.DriverSpec{ +// ServiceLabels: map[string]string{DriverServiceLabelKey: DriverServiceLabelValue}, +// SparkPodSpec: v1beta2.SparkPodSpec{ +// Labels: map[string]string{DriverLabelKey: DriverLabelValue}, +// }, +// }, +// Executor: v1beta2.ExecutorSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// Labels: map[string]string{ExecutorLabelKey: ExecutorLabelValue}, +// }, +// }, +// }, +// } + +// submissionID := uuid.New().String() +// driverOptions, err := addDriverConfOptions(app, submissionID) +// if err != nil { +// t.Fatal(err) +// } +// assert.Equal(t, 6, len(driverOptions)) +// sort.Strings(driverOptions) +// expectedDriverLabels := []string{ +// fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), +// fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "app-name", "spark-test"), +// fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "submission-id", submissionID), +// fmt.Sprintf(SparkDriverLabelTemplate, AppLabelKey, AppLabelValue), +// fmt.Sprintf(SparkDriverLabelTemplate, DriverLabelKey, DriverLabelValue), +// fmt.Sprintf(SparkDriverServiceLabelTemplate, DriverServiceLabelKey, DriverServiceLabelValue), +// } +// sort.Strings(expectedDriverLabels) + +// if !reflect.DeepEqual(expectedDriverLabels, driverOptions) { +// t.Errorf("Executor labels: wanted %+q got %+q", expectedDriverLabels, driverOptions) +// } + +// executorOptions, err := addExecutorConfOptions(app, submissionID) +// sort.Strings(executorOptions) +// if err != nil { +// t.Fatal(err) +// } +// assert.Equal(t, 5, len(executorOptions)) +// expectedExecutorLabels := []string{ +// fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "app-name", "spark-test"), +// fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), +// fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "submission-id", submissionID), +// fmt.Sprintf(SparkExecutorLabelTemplate, AppLabelKey, AppLabelValue), +// fmt.Sprintf(SparkExecutorLabelTemplate, ExecutorLabelKey, ExecutorLabelValue), +// } +// sort.Strings(expectedExecutorLabels) + +// if !reflect.DeepEqual(expectedExecutorLabels, executorOptions) { +// t.Errorf("Executor labels: wanted %+q got %+q", expectedExecutorLabels, executorOptions) +// } +// } + +// func TestPopulateLabelsOverride_Driver_Executor(t *testing.T) { +// const ( +// AppLabelKey = "app-label-key" +// AppLabelValue = "app-label-value" +// DriverLabelKey = "driver-label-key" +// DriverLabelValue = "driver-label-key" +// DriverAppLabelOverride = "driver-app-label-override" +// ExecutorLabelKey = "executor-label-key" +// ExecutorLabelValue = "executor-label-key" +// ExecutorAppLabelOverride = "executor-app-label-override" +// ) + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// Labels: map[string]string{AppLabelKey: AppLabelValue}, +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Driver: v1beta2.DriverSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// Labels: map[string]string{DriverLabelKey: DriverLabelValue, AppLabelKey: DriverAppLabelOverride}, +// }, +// }, +// Executor: v1beta2.ExecutorSpec{ +// SparkPodSpec: v1beta2.SparkPodSpec{ +// Labels: map[string]string{ExecutorLabelKey: ExecutorLabelValue, AppLabelKey: ExecutorAppLabelOverride}, +// }, +// }, +// }, +// } + +// submissionID := uuid.New().String() +// driverOptions, err := addDriverConfOptions(app, submissionID) +// if err != nil { +// t.Fatal(err) +// } +// sort.Strings(driverOptions) +// assert.Equal(t, 5, len(driverOptions)) +// expectedDriverLabels := []string{ +// fmt.Sprintf(SparkDriverLabelTemplate, AppLabelKey, DriverAppLabelOverride), +// fmt.Sprintf(SparkDriverLabelTemplate, DriverLabelKey, DriverLabelValue), +// fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "app-name", "spark-test"), +// fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), +// fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "submission-id", submissionID), +// } +// sort.Strings(expectedDriverLabels) + +// if !reflect.DeepEqual(expectedDriverLabels, driverOptions) { +// t.Errorf("Executor labels: wanted %+q got %+q", expectedDriverLabels, driverOptions) +// } + +// executorOptions, err := addExecutorConfOptions(app, submissionID) +// if err != nil { +// t.Fatal(err) +// } +// sort.Strings(executorOptions) +// assert.Equal(t, 5, len(executorOptions)) +// expectedExecutorLabels := []string{ +// fmt.Sprintf(SparkExecutorLabelTemplate, AppLabelKey, ExecutorAppLabelOverride), +// fmt.Sprintf(SparkExecutorLabelTemplate, ExecutorLabelKey, ExecutorLabelValue), +// fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), +// fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "app-name", "spark-test"), +// fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "submission-id", submissionID), +// } +// sort.Strings(expectedExecutorLabels) + +// if !reflect.DeepEqual(expectedExecutorLabels, executorOptions) { +// t.Errorf("Executor labels: wanted %+q got %+q", expectedExecutorLabels, executorOptions) +// } +// } + +// func TestDynamicAllocationOptions(t *testing.T) { +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{}, +// } +// options := addDynamicAllocationConfOptions(app) +// assert.Equal(t, 0, len(options)) + +// app = &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// DynamicAllocation: &v1beta2.DynamicAllocation{ +// Enabled: true, +// InitialExecutors: util.Int32Ptr(2), +// MinExecutors: util.Int32Ptr(0), +// MaxExecutors: util.Int32Ptr(10), +// ShuffleTrackingTimeout: util.Int64Ptr(6000000), +// }, +// }, +// } + +// options = addDynamicAllocationConfOptions(app) +// assert.Equal(t, 6, len(options)) +// assert.Equal(t, fmt.Sprintf("%s=true", common.SparkDynamicAllocationEnabled), options[0]) +// assert.Equal(t, fmt.Sprintf("%s=true", common.SparkDynamicAllocationShuffleTrackingEnabled), options[1]) +// assert.Equal(t, fmt.Sprintf("%s=2", common.SparkDynamicAllocationInitialExecutors), options[2]) +// assert.Equal(t, fmt.Sprintf("%s=0", common.SparkDynamicAllocationMinExecutors), options[3]) +// assert.Equal(t, fmt.Sprintf("%s=10", common.SparkDynamicAllocationMaxExecutors), options[4]) +// assert.Equal(t, fmt.Sprintf("%s=6000000", common.SparkDynamicAllocationShuffleTrackingTimeout), options[5]) +// } + +// func TestProxyUserArg(t *testing.T) { +// const ( +// host = "localhost" +// port = "6443" +// ) + +// if err := os.Setenv(common.EnvKubernetesServiceHost, host); err != nil { +// t.Fatal(err) +// } +// if err := os.Setenv(common.EnvKubernetesServicePort, port); err != nil { +// t.Fatal(err) +// } + +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Mode: v1beta2.ClusterMode, +// ProxyUser: util.StringPtr("foo"), +// }, +// } + +// submissionID := uuid.New().String() +// driverPodName := app.GetDriverPodName() +// args, err := buildSubmissionCommandArgs(app, driverPodName, submissionID) +// if err != nil { +// t.Fatal(err) +// } + +// assert.Equal(t, "--master", args[0]) +// assert.Equal(t, fmt.Sprintf("k8s://https://%s:%s", host, port), args[1]) +// assert.Equal(t, "--deploy-mode", args[2]) +// assert.Equal(t, string(v1beta2.ClusterMode), args[3]) +// assert.Equal(t, "--proxy-user", args[4]) +// assert.Equal(t, "foo", args[5]) +// } + +// func Test_getMasterURL(t *testing.T) { +// setEnv := func(host string, port string) { +// if err := os.Setenv(common.EnvKubernetesServiceHost, host); err != nil { +// t.Fatal(err) +// } +// if err := os.Setenv(common.EnvKubernetesServicePort, port); err != nil { +// t.Fatal(err) +// } +// } + +// tests := []struct { +// name string +// host string +// port string +// want string +// wantErr assert.ErrorAssertionFunc +// }{ +// { +// name: "should return a valid master url when IPv4 address is used", +// host: "localhost", +// port: "6443", +// want: "k8s://https://localhost:6443", +// wantErr: assert.NoError, +// }, +// { +// name: "should return a valid master url when IPv6 address is used", +// host: "::1", +// port: "6443", +// want: "k8s://https://[::1]:6443", +// wantErr: assert.NoError, +// }, +// { +// name: "should throw an error when the host is empty", +// host: "", +// port: "6443", +// want: "", +// wantErr: assert.Error, +// }, +// } +// for _, tt := range tests { +// t.Run(tt.name, func(t *testing.T) { +// setEnv(tt.host, tt.port) +// got, err := getMasterURL() +// if !tt.wantErr(t, err, "getMasterURL()") { +// return +// } +// assert.Equalf(t, tt.want, got, "getMasterURL()") +// }) +// } +// } diff --git a/internal/controller/sparkapplication/suite_test.go b/internal/controller/sparkapplication/suite_test.go new file mode 100644 index 0000000000..02ce4c260b --- /dev/null +++ b/internal/controller/sparkapplication/suite_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication_test + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestControllers(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + log.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "..", "bin", "k8s", + fmt.Sprintf("1.29.3-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = v1beta2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = v1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/internal/controller/sparkapplication/validator.go b/internal/controller/sparkapplication/validator.go new file mode 100644 index 0000000000..0cb9850785 --- /dev/null +++ b/internal/controller/sparkapplication/validator.go @@ -0,0 +1,44 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +type Validator struct{} + +// Validator implements admission.CustomValidator. +var _ admission.CustomValidator = &Validator{} + +// ValidateCreate implements admission.CustomValidator. +func (s *Validator) ValidateCreate(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} + +// ValidateDelete implements admission.CustomValidator. +func (s *Validator) ValidateDelete(_ context.Context, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} + +// ValidateUpdate implements admission.CustomValidator. +func (s *Validator) ValidateUpdate(_ context.Context, _ runtime.Object, _ runtime.Object) (admission.Warnings, error) { + return nil, nil +} diff --git a/internal/controller/sparkapplication/web_ui.go b/internal/controller/sparkapplication/web_ui.go new file mode 100644 index 0000000000..284b4b6f4e --- /dev/null +++ b/internal/controller/sparkapplication/web_ui.go @@ -0,0 +1,92 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +import ( + "fmt" + "net/url" + "strconv" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +func (r *Reconciler) createWebUIService(app *v1beta2.SparkApplication) (*SparkService, error) { + portName := getWebUIServicePortName(app) + port, err := getWebUIServicePort(app) + if err != nil { + return nil, fmt.Errorf("invalid Spark UI servicePort: %d", port) + } + + targetPort, err := getWebUITargetPort(app) + if err != nil { + return nil, fmt.Errorf("invalid Spark UI targetPort: %d", targetPort) + } + + serviceName := util.GetDefaultUIServiceName(app) + serviceType := util.GetWebUIServiceType(app) + serviceLabels := util.GetWebUIServiceLabels(app) + serviceAnnotations := util.GetWebUIServiceAnnotations(app) + + return r.createDriverIngressService(app, portName, port, targetPort, serviceName, serviceType, serviceAnnotations, serviceLabels) +} + +func (r *Reconciler) createWebUIIngress(app *v1beta2.SparkApplication, service SparkService, ingressURL *url.URL, ingressClassName string) (*SparkIngress, error) { + ingressName := util.GetDefaultUIIngressName(app) + if util.IngressCapabilities.Has("networking.k8s.io/v1") { + return r.createDriverIngressV1(app, service, ingressName, ingressURL, ingressClassName) + } + return r.createDriverIngressLegacy(app, service, ingressName, ingressURL) +} + +func getWebUIServicePortName(app *v1beta2.SparkApplication) string { + if app.Spec.SparkUIOptions == nil { + return common.DefaultSparkWebUIPortName + } + portName := app.Spec.SparkUIOptions.ServicePortName + if portName != nil { + return *portName + } + return common.DefaultSparkWebUIPortName +} + +func getWebUIServicePort(app *v1beta2.SparkApplication) (int32, error) { + if app.Spec.SparkUIOptions == nil { + return getWebUITargetPort(app) + } + port := app.Spec.SparkUIOptions.ServicePort + if port != nil { + return *port, nil + } + return common.DefaultSparkWebUIPort, nil +} + +// getWebUITargetPort attempts to get the Spark web UI port from configuration property spark.ui.port +// in Spec.SparkConf if it is present, otherwise the default port is returned. +// Note that we don't attempt to get the port from Spec.SparkConfigMap. +func getWebUITargetPort(app *v1beta2.SparkApplication) (int32, error) { + portStr, ok := app.Spec.SparkConf[common.SparkUIPortKey] + if !ok { + return common.DefaultSparkWebUIPort, nil + } + port, err := strconv.Atoi(portStr) + if err != nil { + return common.DefaultSparkWebUIPort, nil + } + return int32(port), nil +} diff --git a/internal/controller/sparkapplication/web_ui_test.go b/internal/controller/sparkapplication/web_ui_test.go new file mode 100644 index 0000000000..a2d1566f33 --- /dev/null +++ b/internal/controller/sparkapplication/web_ui_test.go @@ -0,0 +1,655 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkapplication + +// func TestCreateSparkUIService(t *testing.T) { +// type testcase struct { +// name string +// app *v1beta2.SparkApplication +// expectedService SparkService +// expectedSelector map[string]string +// expectError bool +// } +// testFn := func(test testcase, t *testing.T) { +// fakeClient := fake.NewSimpleClientset() +// util.IngressCapabilities = map[string]bool{"networking.k8s.io/v1": true} +// sparkService, err := createSparkUIService(test.app, fakeClient) +// if err != nil { +// if test.expectError { +// return +// } +// t.Fatal(err) +// } +// if sparkService.serviceName != test.expectedService.serviceName { +// t.Errorf("%s: for service name wanted %s got %s", test.name, test.expectedService.serviceName, sparkService.serviceName) +// } +// service, err := fakeClient.CoreV1(). +// Services(test.app.Namespace). +// Get(context.TODO(), sparkService.serviceName, metav1.GetOptions{}) +// if err != nil { +// if test.expectError { +// return +// } +// t.Fatal(err) +// } +// if service.Labels[common.SparkAppNameLabel] != test.app.Name { +// t.Errorf("%s: service of app %s has the wrong labels", test.name, test.app.Name) +// } +// if !reflect.DeepEqual(test.expectedSelector, service.Spec.Selector) { +// t.Errorf("%s: for label selector wanted %s got %s", test.name, test.expectedSelector, service.Spec.Selector) +// } +// if service.Spec.Type != test.expectedService.serviceType { +// t.Errorf("%s: for service type wanted %s got %s", test.name, test.expectedService.serviceType, service.Spec.Type) +// } +// if len(service.Spec.Ports) != 1 { +// t.Errorf("%s: wanted a single port got %d ports", test.name, len(service.Spec.Ports)) +// } +// port := service.Spec.Ports[0] +// if port.Port != test.expectedService.servicePort { +// t.Errorf("%s: unexpected port wanted %d got %d", test.name, test.expectedService.servicePort, port.Port) +// } +// if port.Name != test.expectedService.servicePortName { +// t.Errorf("%s: unexpected port name wanted %s got %s", test.name, test.expectedService.servicePortName, port.Name) +// } +// serviceAnnotations := service.ObjectMeta.Annotations +// if !reflect.DeepEqual(serviceAnnotations, test.expectedService.serviceAnnotations) { +// t.Errorf("%s: unexpected annotations wanted %s got %s", test.name, test.expectedService.serviceAnnotations, serviceAnnotations) +// } +// serviceLabels := service.ObjectMeta.Labels +// if !reflect.DeepEqual(serviceLabels, test.expectedService.serviceLabels) { +// t.Errorf("%s: unexpected labels wanted %s got %s", test.name, test.expectedService.serviceLabels, serviceLabels) +// } +// } +// defaultPort := defaultSparkWebUIPort +// defaultPortName := defaultSparkWebUIPortName +// app1 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo1", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkConf: map[string]string{ +// sparkUIPortConfigurationKey: "4041", +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// ExecutionAttempts: 1, +// }, +// } +// app2 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo2", +// Namespace: "default", +// UID: "foo-123", +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-2", +// ExecutionAttempts: 2, +// }, +// } +// app3 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo3", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkConf: map[string]string{ +// sparkUIPortConfigurationKey: "4041x", +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-3", +// }, +// } +// var appPort int32 = 80 +// app4 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo4", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServicePort: &appPort, +// IngressAnnotations: nil, +// IngressTLS: nil, +// }, +// SparkConf: map[string]string{ +// sparkUIPortConfigurationKey: "4041", +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-3", +// }, +// } +// var serviceTypeNodePort apiv1.ServiceType = apiv1.ServiceTypeNodePort +// app5 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo5", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServiceType: &serviceTypeNodePort, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-2", +// ExecutionAttempts: 2, +// }, +// } +// appPortName := "http-spark-test" +// app6 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo6", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServicePort: &appPort, +// ServicePortName: &appPortName, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-6", +// }, +// } +// app7 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo7", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServiceAnnotations: map[string]string{ +// "key": "value", +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-7", +// ExecutionAttempts: 1, +// }, +// } +// app8 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo8", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServiceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo8", +// "key": "value", +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-8", +// ExecutionAttempts: 1, +// }, +// } +// testcases := []testcase{ +// { +// name: "service with custom serviceport and serviceport and target port are same", +// app: app1, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app1.GetName()), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: defaultPortName, +// servicePort: 4041, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo1", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo1", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with default port", +// app: app2, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app2.GetName()), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: defaultPortName, +// servicePort: int32(defaultPort), +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo2", +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo2", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom serviceport and serviceport and target port are different", +// app: app4, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app4.GetName()), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: defaultPortName, +// servicePort: 80, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo4", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo4", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom servicetype", +// app: app5, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app5.GetName()), +// serviceType: apiv1.ServiceTypeNodePort, +// servicePortName: defaultPortName, +// servicePort: int32(defaultPort), +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo5", +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo5", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom serviceportname", +// app: app6, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app6.GetName()), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: "http-spark-test", +// servicePort: int32(80), +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo6", +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo6", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with annotation", +// app: app7, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app7.GetName()), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: defaultPortName, +// servicePort: defaultPort, +// serviceAnnotations: map[string]string{ +// "key": "value", +// }, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo7", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo7", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with custom labels", +// app: app8, +// expectedService: SparkService{ +// serviceName: fmt.Sprintf("%s-ui-svc", app8.GetName()), +// serviceType: apiv1.ServiceTypeClusterIP, +// servicePortName: defaultPortName, +// servicePort: defaultPort, +// serviceLabels: map[string]string{ +// "sparkoperator.k8s.io/app-name": "foo8", +// "key": "value", +// }, +// targetPort: intstr.IntOrString{ +// Type: intstr.Int, +// IntVal: int32(4041), +// }, +// }, +// expectedSelector: map[string]string{ +// common.SparkAppNameLabel: "foo8", +// common.SparkRoleLabel: common.SparkDriverRole, +// }, +// expectError: false, +// }, +// { +// name: "service with bad port configurations", +// app: app3, +// expectError: true, +// }, +// } +// for _, test := range testcases { +// testFn(test, t) +// } +// } + +// func TestCreateSparkUIIngress(t *testing.T) { +// type testcase struct { +// name string +// app *v1beta2.SparkApplication +// expectedIngress SparkIngress +// expectError bool +// } + +// testFn := func(test testcase, t *testing.T, ingressURLFormat string, ingressClassName string) { +// fakeClient := fake.NewSimpleClientset() +// sparkService, err := createSparkUIService(test.app, fakeClient) +// if err != nil { +// t.Fatal(err) +// } +// ingressURL, err := getDriverIngressURL(ingressURLFormat, test.app.Name, test.app.Namespace) +// if err != nil { +// t.Fatal(err) +// } +// sparkIngress, err := createSparkUIIngress(test.app, *sparkService, ingressURL, ingressClassName, fakeClient) +// if err != nil { +// if test.expectError { +// return +// } +// t.Fatal(err) +// } +// if sparkIngress.ingressName != test.expectedIngress.ingressName { +// t.Errorf("Ingress name wanted %s got %s", test.expectedIngress.ingressName, sparkIngress.ingressName) +// } +// if sparkIngress.ingressURL.String() != test.expectedIngress.ingressURL.String() { +// t.Errorf("Ingress URL wanted %s got %s", test.expectedIngress.ingressURL, sparkIngress.ingressURL) +// } +// ingress, err := fakeClient.NetworkingV1().Ingresses(test.app.Namespace). +// Get(context.TODO(), sparkIngress.ingressName, metav1.GetOptions{}) +// if err != nil { +// t.Fatal(err) +// } +// if len(ingress.Annotations) != 0 { +// for key, value := range ingress.Annotations { +// if test.expectedIngress.annotations[key] != ingress.Annotations[key] { +// t.Errorf("Expected annotation: %s=%s but found : %s=%s", key, value, key, ingress.Annotations[key]) +// } +// } +// } +// if len(ingress.Spec.TLS) != 0 { +// for _, ingressTls := range ingress.Spec.TLS { +// if ingressTls.Hosts[0] != test.expectedIngress.ingressTLS[0].Hosts[0] { +// t.Errorf("Expected ingressTls host: %s but found : %s", test.expectedIngress.ingressTLS[0].Hosts[0], ingressTls.Hosts[0]) +// } +// if ingressTls.SecretName != test.expectedIngress.ingressTLS[0].SecretName { +// t.Errorf("Expected ingressTls secretName: %s but found : %s", test.expectedIngress.ingressTLS[0].SecretName, ingressTls.SecretName) +// } +// } +// } +// if ingress.Labels[common.SparkAppNameLabel] != test.app.Name { +// t.Errorf("Ingress of app %s has the wrong labels", test.app.Name) +// } + +// if len(ingress.Spec.Rules) != 1 { +// t.Errorf("No Ingress rules found.") +// } +// ingressRule := ingress.Spec.Rules[0] +// // If we have a path, then the ingress adds capture groups +// if ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "" && ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "/" { +// test.expectedIngress.ingressURL.Path = test.expectedIngress.ingressURL.Path + "(/|$)(.*)" +// } +// if ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path != test.expectedIngress.ingressURL.Host+test.expectedIngress.ingressURL.Path { + +// t.Errorf("Ingress of app %s has the wrong host %s", ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path, test.expectedIngress.ingressURL.Host+test.expectedIngress.ingressURL.Path) +// } + +// if len(ingressRule.IngressRuleValue.HTTP.Paths) != 1 { +// t.Errorf("No Ingress paths found.") +// } +// ingressPath := ingressRule.IngressRuleValue.HTTP.Paths[0] +// if ingressPath.Backend.Service.Name != sparkService.serviceName { +// t.Errorf("Service name wanted %s got %s", sparkService.serviceName, ingressPath.Backend.Service.Name) +// } +// if *ingressPath.PathType != networkingv1.PathTypeImplementationSpecific { +// t.Errorf("PathType wanted %s got %s", networkingv1.PathTypeImplementationSpecific, *ingressPath.PathType) +// } +// if ingressPath.Backend.Service.Port.Number != sparkService.servicePort { +// t.Errorf("Service port wanted %v got %v", sparkService.servicePort, ingressPath.Backend.Service.Port.Number) +// } +// } + +// var appPort int32 = 80 +// app1 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } +// app2 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServicePort: &appPort, +// IngressAnnotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } +// app3 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServicePort: &appPort, +// IngressAnnotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// IngressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } +// app4 := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "foo", +// Namespace: "default", +// UID: "foo-123", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// SparkUIOptions: &v1beta2.SparkUIConfiguration{ +// ServicePort: &appPort, +// IngressAnnotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// }, +// IngressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: ""}, +// }, +// }, +// }, +// Status: v1beta2.SparkApplicationStatus{ +// SparkApplicationID: "foo-1", +// DriverInfo: v1beta2.DriverInfo{ +// WebUIServiceName: "blah-service", +// }, +// }, +// } + +// testcases := []testcase{ +// { +// name: "simple ingress object", +// app: app1, +// expectedIngress: SparkIngress{ +// ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), +// ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), +// }, +// expectError: false, +// }, +// { +// name: "ingress with annotations and without tls configuration", +// app: app2, +// expectedIngress: SparkIngress{ +// ingressName: fmt.Sprintf("%s-ui-ingress", app2.GetName()), +// ingressURL: parseURLAndAssertError(app2.GetName()+".ingress.clusterName.com", t), +// annotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// }, +// expectError: false, +// }, +// { +// name: "ingress with annotations and tls configuration", +// app: app3, +// expectedIngress: SparkIngress{ +// ingressName: fmt.Sprintf("%s-ui-ingress", app3.GetName()), +// ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), +// annotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// ingressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, +// }, +// }, +// expectError: false, +// }, +// { +// name: "ingress with incomplete list of annotations", +// app: app4, +// expectedIngress: SparkIngress{ +// ingressName: fmt.Sprintf("%s-ui-ingress", app4.GetName()), +// ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), +// annotations: map[string]string{ +// "kubernetes.io/ingress.class": "nginx", +// "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", +// }, +// ingressTLS: []networkingv1.IngressTLS{ +// {Hosts: []string{"host1", "host2"}, SecretName: ""}, +// }, +// }, +// expectError: true, +// }, +// } + +// for _, test := range testcases { +// testFn(test, t, "{{$appName}}.ingress.clusterName.com", "") +// } + +// testcases = []testcase{ +// { +// name: "simple ingress object with ingress URL Format with path", +// app: app1, +// expectedIngress: SparkIngress{ +// ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), +// ingressURL: parseURLAndAssertError("ingress.clusterName.com/"+app1.GetNamespace()+"/"+app1.GetName(), t), +// annotations: map[string]string{ +// "nginx.ingress.kubernetes.io/rewrite-target": "/$2", +// }, +// }, +// expectError: false, +// }, +// } + +// for _, test := range testcases { +// testFn(test, t, "ingress.clusterName.com/{{$appNamespace}}/{{$appName}}", "") +// } + +// testcases = []testcase{ +// { +// name: "simple ingress object with ingressClassName set", +// app: app1, +// expectedIngress: SparkIngress{ +// ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), +// ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), +// ingressClassName: "nginx", +// }, +// expectError: false, +// }, +// } +// for _, test := range testcases { +// testFn(test, t, "{{$appName}}.ingress.clusterName.com", "nginx") +// } +// } + +// func parseURLAndAssertError(testURL string, t *testing.T) *url.URL { +// fallbackURL, _ := url.Parse("http://example.com") +// parsedURL, err := url.Parse(testURL) +// if err != nil { +// t.Errorf("failed to parse the url: %s", testURL) +// return fallbackURL +// } +// if parsedURL.Scheme == "" { +// //url does not contain any scheme, adding http:// so url.Parse can function correctly +// parsedURL, err = url.Parse("http://" + testURL) +// if err != nil { +// t.Errorf("failed to parse the url: %s", testURL) +// return fallbackURL +// } +// } +// return parsedURL +// } diff --git a/internal/controller/validatingwebhookconfiguration/controller.go b/internal/controller/validatingwebhookconfiguration/controller.go new file mode 100644 index 0000000000..7c641da06c --- /dev/null +++ b/internal/controller/validatingwebhookconfiguration/controller.go @@ -0,0 +1,100 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validatingwebhookconfiguration + +import ( + "context" + "fmt" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/kubeflow/spark-operator/pkg/certificate" +) + +var ( + logger = ctrl.Log.WithName("") +) + +// Reconciler reconciles a ValidatingWebhookConfiguration object. +type Reconciler struct { + client client.Client + certProvider *certificate.Provider + name string +} + +// ValidatingWebhookConfigurationReconciler implements reconcile.Reconciler interface. +var _ reconcile.Reconciler = &Reconciler{} + +// NewReconciler creates a new ValidatingWebhookConfigurationReconciler instance. +func NewReconciler(client client.Client, certProvider *certificate.Provider, name string) *Reconciler { + return &Reconciler{ + client: client, + certProvider: certProvider, + name: name, + } +} + +func (r *Reconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { + return ctrl.NewControllerManagedBy(mgr). + Named("validating-webhook-configuration-controller"). + Watches( + &admissionregistrationv1.ValidatingWebhookConfiguration{}, + NewEventHandler(), + builder.WithPredicates( + NewEventFilter(r.name), + ), + ). + WithOptions(options). + Complete(r) +} + +// Reconcile implements reconcile.Reconciler. +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger.Info("Updating CA bundle of ValidatingWebhookConfiguration", "name", req.Name) + if err := r.updateValidatingWebhookConfiguration(ctx, req.NamespacedName); err != nil { + return ctrl.Result{Requeue: true}, nil + } + return ctrl.Result{}, nil +} + +func (r *Reconciler) updateValidatingWebhookConfiguration(ctx context.Context, key types.NamespacedName) error { + webhook := &admissionregistrationv1.ValidatingWebhookConfiguration{} + if err := r.client.Get(ctx, key, webhook); err != nil { + return fmt.Errorf("failed to get validating webhook configuration %v: %v", key, err) + } + + caBundle, err := r.certProvider.CACert() + if err != nil { + return fmt.Errorf("failed to get CA certificate: %v", err) + } + + newWebhook := webhook.DeepCopy() + for i := range newWebhook.Webhooks { + newWebhook.Webhooks[i].ClientConfig.CABundle = caBundle + } + if err := r.client.Update(ctx, newWebhook); err != nil { + return fmt.Errorf("failed to update validating webhook configuration %v: %v", key, err) + } + + return nil +} diff --git a/internal/controller/validatingwebhookconfiguration/event_filter.go b/internal/controller/validatingwebhookconfiguration/event_filter.go new file mode 100644 index 0000000000..d78076e4b7 --- /dev/null +++ b/internal/controller/validatingwebhookconfiguration/event_filter.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validatingwebhookconfiguration + +import ( + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// EventFilter filters events for the ValidatingWebhookConfiguration. +type EventFilter struct { + name string +} + +func NewEventFilter(name string) *EventFilter { + return &EventFilter{ + name: name, + } +} + +// ValidatingWebhookConfigurationEventFilter implements predicate.Predicate interface. +var _ predicate.Predicate = &EventFilter{} + +// Create implements predicate.Predicate. +func (f *EventFilter) Create(e event.CreateEvent) bool { + return e.Object.GetName() == f.name +} + +// Update implements predicate.Predicate. +func (f *EventFilter) Update(e event.UpdateEvent) bool { + return e.ObjectOld.GetName() == f.name +} + +// Delete implements predicate.Predicate. +func (f *EventFilter) Delete(event.DeleteEvent) bool { + return false +} + +// Generic implements predicate.Predicate. +func (f *EventFilter) Generic(event.GenericEvent) bool { + return false +} diff --git a/internal/controller/validatingwebhookconfiguration/event_handler.go b/internal/controller/validatingwebhookconfiguration/event_handler.go new file mode 100644 index 0000000000..a35a473078 --- /dev/null +++ b/internal/controller/validatingwebhookconfiguration/event_handler.go @@ -0,0 +1,102 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validatingwebhookconfiguration + +import ( + "context" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" +) + +// EventHandler handles ValidatingWebhookConfiguration events. +type EventHandler struct{} + +var _ handler.EventHandler = &EventHandler{} + +// NewEventHandler creates a new ValidatingWebhookConfigurationEventHandler instance. +func NewEventHandler() *EventHandler { + return &EventHandler{} +} + +// Create implements handler.EventHandler. +func (h *EventHandler) Create(ctx context.Context, event event.CreateEvent, queue workqueue.RateLimitingInterface) { + vwc, ok := event.Object.(*admissionregistrationv1.ValidatingWebhookConfiguration) + if !ok { + return + } + logger.Info("ValidatingWebhookConfiguration created", "name", vwc.Name) + key := types.NamespacedName{ + Namespace: vwc.Namespace, + Name: vwc.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// Update implements handler.EventHandler. +func (h *EventHandler) Update(ctx context.Context, event event.UpdateEvent, queue workqueue.RateLimitingInterface) { + oldWebhook, ok := event.ObjectOld.(*admissionregistrationv1.ValidatingWebhookConfiguration) + if !ok { + return + } + newWebhook, ok := event.ObjectNew.(*admissionregistrationv1.ValidatingWebhookConfiguration) + if !ok { + return + } + if newWebhook.ResourceVersion == oldWebhook.ResourceVersion { + return + } + + logger.Info("ValidatingWebhookConfiguration updated", "name", newWebhook.Name, "namespace", newWebhook.Namespace) + key := types.NamespacedName{ + Namespace: newWebhook.Namespace, + Name: newWebhook.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// Delete implements handler.EventHandler. +func (h *EventHandler) Delete(ctx context.Context, event event.DeleteEvent, queue workqueue.RateLimitingInterface) { + vwc, ok := event.Object.(*admissionregistrationv1.ValidatingWebhookConfiguration) + if !ok { + return + } + logger.Info("ValidatingWebhookConfiguration deleted", "name", vwc.Name, "namespace", vwc.Namespace) + key := types.NamespacedName{ + Namespace: vwc.Namespace, + Name: vwc.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} + +// Generic implements handler.EventHandler. +func (h *EventHandler) Generic(ctx context.Context, event event.GenericEvent, queue workqueue.RateLimitingInterface) { + vwc, ok := event.Object.(*admissionregistrationv1.ValidatingWebhookConfiguration) + if !ok { + return + } + logger.Info("ValidatingWebhookConfiguration generic event", "name", vwc.Name, "namespace", vwc.Namespace) + key := types.NamespacedName{ + Namespace: vwc.Namespace, + Name: vwc.Name, + } + queue.AddRateLimited(ctrl.Request{NamespacedName: key}) +} diff --git a/internal/metrics/metrcis.go b/internal/metrics/metrcis.go new file mode 100644 index 0000000000..8250107550 --- /dev/null +++ b/internal/metrics/metrcis.go @@ -0,0 +1,23 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "sigs.k8s.io/controller-runtime/pkg/log" + +var ( + logger = log.Log.WithName("") +) diff --git a/internal/metrics/sparkapplication_metrics.go b/internal/metrics/sparkapplication_metrics.go new file mode 100644 index 0000000000..5a52a55d62 --- /dev/null +++ b/internal/metrics/sparkapplication_metrics.go @@ -0,0 +1,386 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +type SparkApplicationMetrics struct { + prefix string + labels []string + jobStartLatencyBuckets []float64 + + count *prometheus.CounterVec + submitCount *prometheus.CounterVec + failedSubmissionCount *prometheus.CounterVec + runningCount *prometheus.GaugeVec + successCount *prometheus.CounterVec + failureCount *prometheus.CounterVec + + successExecutionTimeSeconds *prometheus.SummaryVec + failureExecutionTimeSeconds *prometheus.SummaryVec + + startLatencySeconds *prometheus.SummaryVec + startLatencySecondsHistogram *prometheus.HistogramVec +} + +func NewSparkApplicationMetrics(prefix string, labels []string, jobStartLatencyBuckets []float64) *SparkApplicationMetrics { + validLabels := make([]string, 0, len(labels)) + for _, label := range labels { + validLabel := util.CreateValidMetricNameLabel("", label) + validLabels = append(validLabels, validLabel) + } + + return &SparkApplicationMetrics{ + prefix: prefix, + labels: validLabels, + jobStartLatencyBuckets: jobStartLatencyBuckets, + + count: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationCount), + Help: "Total number of SparkApplication", + }, + validLabels, + ), + submitCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationSubmitCount), + Help: "Total number of submitted SparkApplication", + }, + validLabels, + ), + failedSubmissionCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationFailedSubmissionCount), + Help: "Total number of failed SparkApplication submission", + }, + validLabels, + ), + runningCount: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationRunningCount), + Help: "Total number of running SparkApplication", + }, + validLabels, + ), + successCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationSuccessCount), + Help: "Total number of successful SparkApplication", + }, + validLabels, + ), + failureCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationFailureCount), + Help: "Total number of failed SparkApplication", + }, + validLabels, + ), + successExecutionTimeSeconds: prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationSuccessExecutionTimeSeconds), + }, + validLabels, + ), + failureExecutionTimeSeconds: prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationFailureExecutionTimeSeconds), + }, + validLabels, + ), + startLatencySeconds: prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationStartLatencySeconds), + Help: "Spark App Start Latency via the Operator", + }, + validLabels, + ), + startLatencySecondsHistogram: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkApplicationStartLatencySecondsHistogram), + Help: "Spark App Start Latency counts in buckets via the Operator", + Buckets: jobStartLatencyBuckets, + }, + validLabels, + ), + } +} + +func (m *SparkApplicationMetrics) Register() { + if err := metrics.Registry.Register(m.count); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationCount) + } + if err := metrics.Registry.Register(m.submitCount); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationSubmitCount) + } + if err := metrics.Registry.Register(m.failedSubmissionCount); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationFailedSubmissionCount) + } + if err := metrics.Registry.Register(m.runningCount); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationRunningCount) + } + if err := metrics.Registry.Register(m.successCount); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationSuccessCount) + } + if err := metrics.Registry.Register(m.failureCount); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationFailureCount) + } + if err := metrics.Registry.Register(m.successExecutionTimeSeconds); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationSuccessExecutionTimeSeconds) + } + if err := metrics.Registry.Register(m.failureExecutionTimeSeconds); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationFailureExecutionTimeSeconds) + } + if err := metrics.Registry.Register(m.startLatencySeconds); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationStartLatencySeconds) + } + if err := metrics.Registry.Register(m.startLatencySecondsHistogram); err != nil { + logger.Error(err, "Failed to register spark application metric", "name", common.MetricSparkApplicationStartLatencySecondsHistogram) + } +} + +func (m *SparkApplicationMetrics) HandleSparkApplicationCreate(app *v1beta2.SparkApplication) { + state := util.GetApplicationState(app) + + switch state { + case v1beta2.ApplicationStateNew: + m.incCount(app) + case v1beta2.ApplicationStateSubmitted: + m.incSubmitCount(app) + case v1beta2.ApplicationStateFailedSubmission: + m.incFailedSubmissionCount(app) + case v1beta2.ApplicationStateRunning: + m.incRunningCount(app) + case v1beta2.ApplicationStateFailed: + m.incFailureCount(app) + case v1beta2.ApplicationStateCompleted: + m.incSuccessCount(app) + } +} + +func (m *SparkApplicationMetrics) HandleSparkApplicationUpdate(oldApp *v1beta2.SparkApplication, newApp *v1beta2.SparkApplication) { + oldState := util.GetApplicationState(oldApp) + newState := util.GetApplicationState(newApp) + if newState == oldState { + return + } + + switch oldState { + case v1beta2.ApplicationStateRunning: + m.decRunningCount(oldApp) + } + + switch newState { + case v1beta2.ApplicationStateNew: + m.incCount(newApp) + case v1beta2.ApplicationStateSubmitted: + m.incSubmitCount(newApp) + case v1beta2.ApplicationStateFailedSubmission: + m.incFailedSubmissionCount(newApp) + case v1beta2.ApplicationStateRunning: + m.incRunningCount(newApp) + m.observeStartLatencySeconds(newApp) + case v1beta2.ApplicationStateCompleted: + m.incSuccessCount(newApp) + m.observeSuccessExecutionTimeSeconds(newApp) + case v1beta2.ApplicationStateFailed: + m.incFailureCount(newApp) + m.observeFailureExecutionTimeSeconds(newApp) + } +} + +func (m *SparkApplicationMetrics) HandleSparkApplicationDelete(app *v1beta2.SparkApplication) { + state := util.GetApplicationState(app) + + switch state { + case v1beta2.ApplicationStateRunning: + m.decRunningCount(app) + } +} + +func (m *SparkApplicationMetrics) incCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + counter, err := m.count.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationCount, "labels", labels) + return + } + + counter.Inc() + logger.V(1).Info("Increased spark application count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) incSubmitCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + counter, err := m.submitCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSubmitCount, "labels", labels) + return + } + + counter.Inc() + logger.V(1).Info("Increased spark application submit count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSubmitCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) incFailedSubmissionCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + counter, err := m.failedSubmissionCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailedSubmissionCount, "labels", labels) + return + } + + counter.Inc() + logger.V(1).Info("Increased spark application failed submission count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailedSubmissionCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) incRunningCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + gauge, err := m.runningCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationRunningCount, "labels", labels) + return + } + + gauge.Inc() + logger.V(1).Info("Increased spark application running count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationRunningCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) decRunningCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + gauge, err := m.runningCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationRunningCount, "labels", labels) + return + } + + gauge.Dec() + logger.V(1).Info("Decreased SparkApplication running count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationRunningCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) incSuccessCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + counter, err := m.successCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSuccessCount, "labels", labels) + return + } + + counter.Inc() + logger.V(1).Info("Increased spark application success count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSuccessCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) incFailureCount(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + counter, err := m.failureCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailureCount, "labels", labels) + return + } + + counter.Inc() + logger.V(1).Info("Increased spark application failure count", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailureCount, "labels", labels) +} + +func (m *SparkApplicationMetrics) observeSuccessExecutionTimeSeconds(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + observer, err := m.successExecutionTimeSeconds.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSuccessExecutionTimeSeconds, "labels", labels) + } + + if app.Status.LastSubmissionAttemptTime.IsZero() || app.Status.TerminationTime.IsZero() { + err := fmt.Errorf("last submission attempt time or termination time is zero") + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSuccessExecutionTimeSeconds, "labels", labels) + return + } + duration := app.Status.TerminationTime.Sub(app.Status.LastSubmissionAttemptTime.Time) + observer.Observe(duration.Seconds()) + logger.V(1).Info("Observed spark application success execution time seconds", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationSuccessExecutionTimeSeconds, "labels", labels, "value", duration.Seconds()) +} + +func (m *SparkApplicationMetrics) observeFailureExecutionTimeSeconds(app *v1beta2.SparkApplication) { + labels := m.getMetricLabels(app) + observer, err := m.failureExecutionTimeSeconds.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailureExecutionTimeSeconds, "labels", labels) + } + + if app.Status.LastSubmissionAttemptTime.IsZero() || app.Status.TerminationTime.IsZero() { + err := fmt.Errorf("last submission attempt time or termination time is zero") + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailureExecutionTimeSeconds, "labels", labels) + return + } + duration := app.Status.TerminationTime.Sub(app.Status.LastSubmissionAttemptTime.Time) + observer.Observe(duration.Seconds()) + logger.V(1).Info("Observed spark application failure execution time seconds", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationFailureExecutionTimeSeconds, "labels", labels, "value", duration.Seconds()) +} + +func (m *SparkApplicationMetrics) observeStartLatencySeconds(app *v1beta2.SparkApplication) { + // Only export the spark application start latency seconds metric for the first time + if app.Status.ExecutionAttempts != 1 { + return + } + + labels := m.getMetricLabels(app) + latency := time.Since(app.CreationTimestamp.Time) + if observer, err := m.startLatencySeconds.GetMetricWith(labels); err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationStartLatencySeconds, "labels", labels) + } else { + observer.Observe(latency.Seconds()) + logger.V(1).Info("Observed spark application start latency seconds", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationStartLatencySeconds, "labels", labels, "value", latency.Seconds()) + } + + if histogram, err := m.startLatencySecondsHistogram.GetMetricWith(labels); err != nil { + logger.Error(err, "Failed to collect metric for SparkApplication", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationStartLatencySecondsHistogram, "labels", labels) + } else { + histogram.Observe(latency.Seconds()) + logger.V(1).Info("Observed spark application start latency seconds", "name", app.Name, "namespace", app.Namespace, "metric", common.MetricSparkApplicationStartLatencySecondsHistogram, "labels", labels, "value", latency.Seconds()) + } +} + +func (m *SparkApplicationMetrics) getMetricLabels(app *v1beta2.SparkApplication) map[string]string { + // Convert spark application validLabels to valid metric validLabels. + validLabels := make(map[string]string) + for key, val := range app.Labels { + newKey := util.CreateValidMetricNameLabel(m.prefix, key) + validLabels[newKey] = val + } + + metricLabels := make(map[string]string) + for _, label := range m.labels { + if _, ok := validLabels[label]; ok { + metricLabels[label] = validLabels[label] + } else if label == "namespace" { + metricLabels[label] = app.Namespace + } else { + metricLabels[label] = "Unknown" + } + } + return metricLabels +} diff --git a/internal/metrics/sparkpod_metrics.go b/internal/metrics/sparkpod_metrics.go new file mode 100644 index 0000000000..91edc96b96 --- /dev/null +++ b/internal/metrics/sparkpod_metrics.go @@ -0,0 +1,191 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/metrics" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +type SparkExecutorMetrics struct { + prefix string + labels []string + + runningCount *prometheus.GaugeVec + successCount *prometheus.CounterVec + failureCount *prometheus.CounterVec +} + +func NewSparkExecutorMetrics(prefix string, labels []string) *SparkExecutorMetrics { + validLabels := make([]string, 0, len(labels)) + for _, label := range labels { + validLabel := util.CreateValidMetricNameLabel("", label) + validLabels = append(validLabels, validLabel) + } + + return &SparkExecutorMetrics{ + prefix: prefix, + labels: labels, + + runningCount: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkExecutorRunningCount), + Help: "Total number of running Spark executors", + }, + validLabels, + ), + successCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkExecutorSuccessCount), + Help: "Total number of successful Spark executors", + }, + validLabels, + ), + failureCount: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: util.CreateValidMetricNameLabel(prefix, common.MetricSparkExecutorFailureCount), + Help: "Total number of failed Spark executors", + }, + validLabels, + ), + } +} + +func (m *SparkExecutorMetrics) Register() { + if err := metrics.Registry.Register(m.runningCount); err != nil { + logger.Error(err, "Failed to register spark executor metric", "name", common.MetricSparkExecutorRunningCount) + } + if err := metrics.Registry.Register(m.successCount); err != nil { + logger.Error(err, "Failed to register spark executor metric", "name", common.MetricSparkExecutorSuccessCount) + } + if err := metrics.Registry.Register(m.failureCount); err != nil { + logger.Error(err, "Failed to register spark executor metric", "name", common.MetricSparkExecutorFailureCount) + } +} + +func (m *SparkExecutorMetrics) HandleSparkExecutorCreate(pod *corev1.Pod) { + state := util.GetExecutorState(pod) + switch state { + case v1beta2.ExecutorStateRunning: + m.incRunningCount(pod) + } +} + +func (m *SparkExecutorMetrics) HandleSparkExecutorUpdate(oldPod, newPod *corev1.Pod) { + oldState := util.GetExecutorState(oldPod) + newState := util.GetExecutorState(newPod) + if newState == oldState { + return + } + + switch oldState { + case v1beta2.ExecutorStateRunning: + m.decRunningCount(oldPod) + } + + switch newState { + case v1beta2.ExecutorStateRunning: + m.incRunningCount(newPod) + case v1beta2.ExecutorStateCompleted: + m.incSuccessCount(newPod) + case v1beta2.ExecutorStateFailed: + m.incFailureCount(newPod) + } +} + +func (m *SparkExecutorMetrics) HandleSparkExecutorDelete(pod *corev1.Pod) { + state := util.GetExecutorState(pod) + + switch state { + case v1beta2.ExecutorStateRunning: + m.decRunningCount(pod) + } +} + +func (m *SparkExecutorMetrics) incRunningCount(pod *corev1.Pod) { + labels := m.getMetricLabels(pod) + runningCount, err := m.runningCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for Spark executor", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorRunningCount, "labels", labels) + return + } + + runningCount.Inc() + logger.V(1).Info("Increased Spark executor running count", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorRunningCount, "labels", labels) +} + +func (m *SparkExecutorMetrics) decRunningCount(pod *corev1.Pod) { + labels := m.getMetricLabels(pod) + runningCount, err := m.runningCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for Spark executor", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorRunningCount, "labels", labels) + return + } + + runningCount.Dec() + logger.V(1).Info("Decreased Spark executor running count", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorRunningCount, "labels", labels) +} + +func (m *SparkExecutorMetrics) incSuccessCount(pod *corev1.Pod) { + labels := m.getMetricLabels(pod) + successCount, err := m.successCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for Spark executor", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorSuccessCount, "labels", labels) + return + } + + successCount.Inc() + logger.V(1).Info("Increased Spark executor success count", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorSuccessCount, "labels", labels) +} + +func (m *SparkExecutorMetrics) incFailureCount(pod *corev1.Pod) { + labels := m.getMetricLabels(pod) + failureCount, err := m.failureCount.GetMetricWith(labels) + if err != nil { + logger.Error(err, "Failed to collect metric for Spark executor", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorFailureCount, "labels", labels) + return + } + + failureCount.Inc() + logger.V(1).Info("Increased Spark executor running count", "name", pod.Name, "namespace", pod.Namespace, "metric", common.MetricSparkExecutorFailureCount, "labels", labels) +} + +func (m *SparkExecutorMetrics) getMetricLabels(pod *corev1.Pod) map[string]string { + // Convert pod metricLabels to valid metric metricLabels. + validLabels := make(map[string]string) + for key, val := range pod.Labels { + newKey := util.CreateValidMetricNameLabel("", key) + validLabels[newKey] = val + } + + metricLabels := make(map[string]string) + for _, label := range m.labels { + if _, ok := validLabels[label]; ok { + metricLabels[label] = validLabels[label] + } else if label == "namespace" { + metricLabels[label] = pod.Namespace + } else { + metricLabels[label] = "Unknown" + } + } + return metricLabels +} diff --git a/internal/scheduler/registry.go b/internal/scheduler/registry.go new file mode 100644 index 0000000000..caa4a939eb --- /dev/null +++ b/internal/scheduler/registry.go @@ -0,0 +1,75 @@ +/* +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "fmt" + "sync" +) + +var registry *Registry + +// Registry is a registry of scheduler factories. +type Registry struct { + factories map[string]Factory + + mu sync.Mutex +} + +func GetRegistry() *Registry { + if registry == nil { + registry = &Registry{ + factories: make(map[string]Factory), + } + } + return registry +} + +func (r *Registry) GetScheduler(name string, config Config) (Interface, error) { + r.mu.Lock() + defer r.mu.Unlock() + + factory, exists := r.factories[name] + if !exists { + return nil, fmt.Errorf("scheduler %s not found", name) + } + + return factory(config) +} + +// RegisterScheduler registers a scheduler to the manager. +func (r *Registry) Register(name string, factory Factory) error { + r.mu.Lock() + defer r.mu.Unlock() + + if _, ok := r.factories[name]; ok { + return fmt.Errorf("scheduler %s is already registered", name) + } + + r.factories[name] = factory + logger.Info("Registered scheduler", "name", name) + return nil +} + +// GetRegisteredSchedulerNames gets the registered scheduler names. +func (r *Registry) GetRegisteredSchedulerNames() []string { + var names []string + for name := range r.factories { + names = append(names, name) + } + return names +} diff --git a/internal/scheduler/scheduler.go b/internal/scheduler/scheduler.go new file mode 100644 index 0000000000..2ab2f07dd3 --- /dev/null +++ b/internal/scheduler/scheduler.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/kubeflow/spark-operator/api/v1beta2" +) + +var ( + logger = log.Log.WithName("") +) + +// Interface defines the interface of a batch scheduler. +type Interface interface { + Name() string + ShouldSchedule(app *v1beta2.SparkApplication) bool + Schedule(app *v1beta2.SparkApplication) error + Cleanup(app *v1beta2.SparkApplication) error +} + +// Config defines the configuration of a batch scheduler. +type Config interface{} + +// Factory defines the factory of a batch scheduler. +type Factory func(config Config) (Interface, error) diff --git a/internal/scheduler/volcano/scheduler.go b/internal/scheduler/volcano/scheduler.go new file mode 100644 index 0000000000..d75912f1cb --- /dev/null +++ b/internal/scheduler/volcano/scheduler.go @@ -0,0 +1,229 @@ +/* +Copyright 2019 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volcano + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/log" + "volcano.sh/apis/pkg/apis/scheduling/v1beta1" + volcanoclientset "volcano.sh/apis/pkg/client/clientset/versioned" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/internal/scheduler" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var ( + logger = log.Log.WithName("") +) + +// Scheduler is a batch scheduler that uses Volcano to schedule Spark applications. +type Scheduler struct { + extensionClient apiextensionsclientset.Interface + volcanoClient volcanoclientset.Interface +} + +// Scheduler implements scheduler.Interface. +var _ scheduler.Interface = &Scheduler{} + +// Config defines the configurations of Volcano scheduler. +type Config struct { + RestConfig *rest.Config +} + +// Config implements scheduler.Config. +var _ scheduler.Config = &Config{} + +// Factory creates a new VolcanoScheduler instance. +func Factory(config scheduler.Config) (scheduler.Interface, error) { + c, ok := config.(*Config) + if !ok { + return nil, fmt.Errorf("failed to get volcano scheduler config") + } + + extensionClient, err := apiextensionsclientset.NewForConfig(c.RestConfig) + if err != nil { + return nil, fmt.Errorf("failed to initialize k8s extension client: %v", err) + } + + if _, err := extensionClient.ApiextensionsV1().CustomResourceDefinitions().Get( + context.TODO(), + common.VolcanoPodGroupName, + metav1.GetOptions{}, + ); err != nil { + // For backward compatibility check v1beta1 API version of CustomResourceDefinitions + if _, err := extensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get( + context.TODO(), + common.VolcanoPodGroupName, + metav1.GetOptions{}, + ); err != nil { + return nil, fmt.Errorf("CRD PodGroup does not exist: %v", err) + } + } + + volcanoClient, err := volcanoclientset.NewForConfig(c.RestConfig) + if err != nil { + return nil, fmt.Errorf("failed to initialize volcano client: %v", err) + } + + scheduler := &Scheduler{ + extensionClient: extensionClient, + volcanoClient: volcanoClient, + } + return scheduler, nil +} + +// Name implements batchscheduler.Interface. +func (s *Scheduler) Name() string { + return common.VolcanoSchedulerName +} + +// ShouldSchedule implements batchscheduler.Interface. +func (s *Scheduler) ShouldSchedule(_ *v1beta2.SparkApplication) bool { + // There is no additional requirement for volcano scheduler + return true +} + +// Schedule implements batchscheduler.Interface. +func (s *Scheduler) Schedule(app *v1beta2.SparkApplication) error { + if app.ObjectMeta.Annotations == nil { + app.ObjectMeta.Annotations = make(map[string]string) + } + if app.Spec.Driver.Annotations == nil { + app.Spec.Driver.Annotations = make(map[string]string) + } + if app.Spec.Executor.Annotations == nil { + app.Spec.Executor.Annotations = make(map[string]string) + } + + switch app.Spec.Mode { + case v1beta2.DeployModeClient: + return s.syncPodGroupInClientMode(app) + case v1beta2.DeployModeCluster: + return s.syncPodGroupInClusterMode(app) + } + return nil +} + +// Cleanup implements batchscheduler.Interface. +func (s *Scheduler) Cleanup(app *v1beta2.SparkApplication) error { + name := getPodGroupName(app) + namespace := app.Namespace + if err := s.volcanoClient.SchedulingV1beta1().PodGroups(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { + return err + } + logger.Info("Deleted PodGroup", "name", name, "namespace", namespace) + return nil +} + +func (s *Scheduler) syncPodGroupInClientMode(app *v1beta2.SparkApplication) error { + // We only care about the executor pods in client mode + if _, ok := app.Spec.Executor.Annotations[v1beta1.KubeGroupNameAnnotationKey]; !ok { + totalResource := util.GetExecutorRequestResource(app) + + if app.Spec.BatchSchedulerOptions != nil && len(app.Spec.BatchSchedulerOptions.Resources) > 0 { + totalResource = app.Spec.BatchSchedulerOptions.Resources + } + if err := s.syncPodGroup(app, 1, totalResource); err == nil { + app.Spec.Executor.Annotations[v1beta1.KubeGroupNameAnnotationKey] = getPodGroupName(app) + } else { + return err + } + } + return nil +} + +func (s *Scheduler) syncPodGroupInClusterMode(app *v1beta2.SparkApplication) error { + // We need mark both driver and executor when submitting. + // In cluster mode, the initial size of PodGroup is set to 1 in order to schedule driver pod first. + if _, ok := app.Spec.Driver.Annotations[v1beta1.KubeGroupNameAnnotationKey]; !ok { + // Both driver and executor resource will be considered. + totalResource := util.SumResourceList([]corev1.ResourceList{util.GetDriverRequestResource(app), util.GetExecutorRequestResource(app)}) + if app.Spec.BatchSchedulerOptions != nil && len(app.Spec.BatchSchedulerOptions.Resources) > 0 { + totalResource = app.Spec.BatchSchedulerOptions.Resources + } + + if err := s.syncPodGroup(app, 1, totalResource); err != nil { + return err + } + app.Spec.Driver.Annotations[v1beta1.KubeGroupNameAnnotationKey] = getPodGroupName(app) + app.Spec.Executor.Annotations[v1beta1.KubeGroupNameAnnotationKey] = getPodGroupName(app) + } + return nil +} + +func (s *Scheduler) syncPodGroup(app *v1beta2.SparkApplication, size int32, minResource corev1.ResourceList) error { + var err error + var pg *v1beta1.PodGroup + name := getPodGroupName(app) + namespace := app.Namespace + + if pg, err = s.volcanoClient.SchedulingV1beta1().PodGroups(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { + if !errors.IsNotFound(err) { + return err + } + + podGroup := v1beta1.PodGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(app, v1beta2.SchemeGroupVersion.WithKind("SparkApplication")), + }, + }, + Spec: v1beta1.PodGroupSpec{ + MinMember: size, + MinResources: &minResource, + }, + Status: v1beta1.PodGroupStatus{ + Phase: v1beta1.PodGroupPending, + }, + } + + if app.Spec.BatchSchedulerOptions != nil { + // Update pod group queue if it's specified in Spark Application + if app.Spec.BatchSchedulerOptions.Queue != nil { + podGroup.Spec.Queue = *app.Spec.BatchSchedulerOptions.Queue + } + // Update pod group priorityClassName if it's specified in Spark Application + if app.Spec.BatchSchedulerOptions.PriorityClassName != nil { + podGroup.Spec.PriorityClassName = *app.Spec.BatchSchedulerOptions.PriorityClassName + } + } + _, err = s.volcanoClient.SchedulingV1beta1().PodGroups(namespace).Create(context.TODO(), &podGroup, metav1.CreateOptions{}) + } else { + if pg.Spec.MinMember != size { + pg.Spec.MinMember = size + _, err = s.volcanoClient.SchedulingV1beta1().PodGroups(namespace).Update(context.TODO(), pg, metav1.UpdateOptions{}) + } + } + + if err != nil { + return fmt.Errorf("failed to sync PodGroup with error: %s. Abandon schedule pods via volcano", err) + } + logger.Info("Created PodGroup", "name", name, "namespace", namespace) + + return nil +} diff --git a/pkg/batchscheduler/volcano/volcano_scheduler_test.go b/internal/scheduler/volcano/scheduler_test.go similarity index 80% rename from pkg/batchscheduler/volcano/volcano_scheduler_test.go rename to internal/scheduler/volcano/scheduler_test.go index 1587ef1065..fb6b8caed5 100644 --- a/pkg/batchscheduler/volcano/volcano_scheduler_test.go +++ b/internal/scheduler/volcano/scheduler_test.go @@ -14,32 +14,32 @@ See the License for the specific language governing permissions and limitations under the License. */ -package volcano +package volcano_test import ( "testing" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/util" ) func TestGetDriverResource(t *testing.T) { - var oneCore int32 = 1 oneCoreStr := "1" oneGB := "1024m" twoCoresStr := "2" - result := v1.ResourceList{} - result[v1.ResourceCPU] = resource.MustParse("1") - result[v1.ResourceMemory] = resource.MustParse("2048m") + result := corev1.ResourceList{} + result[corev1.ResourceCPU] = resource.MustParse("1") + result[corev1.ResourceMemory] = resource.MustParse("2048m") - testcases := []struct { + testCases := []struct { Name string app v1beta2.SparkApplication - result v1.ResourceList + result corev1.ResourceList }{ { Name: "Validate Core and memory", @@ -74,10 +74,10 @@ func TestGetDriverResource(t *testing.T) { }, } - for _, testcase := range testcases { - t.Run(testcase.Name, func(t *testing.T) { - r := getDriverRequestResource(&testcase.app) - for name, quantity := range testcase.result { + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + r := util.GetDriverRequestResource(&tc.app) + for name, quantity := range tc.result { if actual, ok := r[name]; !ok { t.Errorf("expecting driver pod to have resource %s, while get none", name) } else { @@ -99,14 +99,14 @@ func TestGetExecutorResource(t *testing.T) { twoCores := int32(2) instances := int32(2) - result := v1.ResourceList{} - result[v1.ResourceCPU] = resource.MustParse("2") - result[v1.ResourceMemory] = resource.MustParse("4096m") + result := corev1.ResourceList{} + result[corev1.ResourceCPU] = resource.MustParse("2") + result[corev1.ResourceMemory] = resource.MustParse("4096m") - testcases := []struct { + testCases := []struct { Name string app v1beta2.SparkApplication - result v1.ResourceList + result corev1.ResourceList }{ { Name: "Validate Core and memory", @@ -159,10 +159,10 @@ func TestGetExecutorResource(t *testing.T) { }, } - for _, testcase := range testcases { - t.Run(testcase.Name, func(t *testing.T) { - r := getExecutorRequestResource(&testcase.app) - for name, quantity := range testcase.result { + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + r := util.GetExecutorRequestResource(&tc.app) + for name, quantity := range tc.result { if actual, ok := r[name]; !ok { t.Errorf("expecting executor pod to have resource %s, while get none", name) } else { diff --git a/pkg/util/array_flag.go b/internal/scheduler/volcano/util.go similarity index 70% rename from pkg/util/array_flag.go rename to internal/scheduler/volcano/util.go index 730db6975e..75fe54c3b0 100644 --- a/pkg/util/array_flag.go +++ b/internal/scheduler/volcano/util.go @@ -1,5 +1,5 @@ /* -Copyright 2018 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,17 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package volcano -import "fmt" +import ( + "fmt" -type ArrayFlags []string + "github.com/kubeflow/spark-operator/api/v1beta2" +) -func (a *ArrayFlags) String() string { - return fmt.Sprint(*a) -} - -func (a *ArrayFlags) Set(value string) error { - *a = append(*a, value) - return nil +func getPodGroupName(app *v1beta2.SparkApplication) string { + return fmt.Sprintf("spark-%s-pg", app.Name) } diff --git a/pkg/webhook/doc.go b/internal/webhook/doc.go similarity index 100% rename from pkg/webhook/doc.go rename to internal/webhook/doc.go diff --git a/internal/webhook/resourcequota.go b/internal/webhook/resourcequota.go new file mode 100644 index 0000000000..5e51098462 --- /dev/null +++ b/internal/webhook/resourcequota.go @@ -0,0 +1,259 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var javaStringSuffixes = map[string]int64{ + "b": 1, + "kb": 1 << 10, + "k": 1 << 10, + "mb": 1 << 20, + "m": 1 << 20, + "gb": 1 << 30, + "g": 1 << 30, + "tb": 1 << 40, + "t": 1 << 40, + "pb": 1 << 50, + "p": 1 << 50, +} + +var javaStringPattern = regexp.MustCompile(`([0-9]+)([a-z]+)?`) +var javaFractionStringPattern = regexp.MustCompile(`([0-9]+\.[0-9]+)([a-z]+)?`) + +// getResourceList returns the resource requests of the given SparkApplication. +func getResourceList(app *v1beta2.SparkApplication) (corev1.ResourceList, error) { + coresRequests, err := getCoresRequests(app) + if err != nil { + return nil, err + } + + coresLimits, err := getCoresLimits(app) + if err != nil { + return nil, err + } + + memoryRequests, err := getMemoryRequests(app) + if err != nil { + return nil, err + } + + memoryLimits, err := getMemoryLimits(app) + if err != nil { + return nil, err + } + + resourceList := util.SumResourceList([]corev1.ResourceList{ + coresRequests, + coresLimits, + memoryRequests, + memoryLimits, + }) + + return resourceList, nil +} + +func getCoresRequests(app *v1beta2.SparkApplication) (corev1.ResourceList, error) { + // Calculate driver cores requests. + driverCoresRequests, err := getSparkPodCoresRequests(&app.Spec.Driver.SparkPodSpec, 1) + if err != nil { + return nil, err + } + + // Calculate executor cores requests. + var replicas int64 = 1 + if app.Spec.Executor.Instances != nil { + replicas = int64(*app.Spec.Executor.Instances) + } + executorCoresRequests, err := getSparkPodCoresRequests(&app.Spec.Executor.SparkPodSpec, replicas) + if err != nil { + return nil, err + } + + return util.SumResourceList([]corev1.ResourceList{driverCoresRequests, executorCoresRequests}), nil +} + +func getSparkPodCoresRequests(podSpec *v1beta2.SparkPodSpec, replicas int64) (corev1.ResourceList, error) { + var milliCores int64 + if podSpec.Cores != nil { + milliCores = int64(*podSpec.Cores) * 1000 + } else { + milliCores = common.DefaultCPUMilliCores + } + resourceList := corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(milliCores*replicas, resource.DecimalSI), + corev1.ResourceRequestsCPU: *resource.NewMilliQuantity(milliCores*replicas, resource.DecimalSI), + } + return resourceList, nil +} + +func getCoresLimits(app *v1beta2.SparkApplication) (corev1.ResourceList, error) { + // Calculate driver cores limits. + driverCoresLimits, err := getSparkPodCoresLimits(&app.Spec.Driver.SparkPodSpec, 1) + if err != nil { + return nil, err + } + + // Calculate executor cores requests. + var replicas int64 = 1 + if app.Spec.Executor.Instances != nil { + replicas = int64(*app.Spec.Executor.Instances) + } + executorCoresLimits, err := getSparkPodCoresLimits(&app.Spec.Executor.SparkPodSpec, replicas) + if err != nil { + return nil, err + } + + return util.SumResourceList([]corev1.ResourceList{driverCoresLimits, executorCoresLimits}), nil +} + +func getSparkPodCoresLimits(podSpec *v1beta2.SparkPodSpec, replicas int64) (corev1.ResourceList, error) { + var milliCores int64 + if podSpec.CoreLimit != nil { + quantity, err := resource.ParseQuantity(*podSpec.CoreLimit) + if err != nil { + return nil, err + } + milliCores = quantity.MilliValue() + } else if podSpec.Cores != nil { + milliCores = int64(*podSpec.Cores) * 1000 + } else { + milliCores = common.DefaultCPUMilliCores + } + resourceList := corev1.ResourceList{ + corev1.ResourceLimitsCPU: *resource.NewMilliQuantity(milliCores*replicas, resource.DecimalSI), + } + return resourceList, nil +} + +func getMemoryRequests(app *v1beta2.SparkApplication) (corev1.ResourceList, error) { + // If memory overhead factor is set, use it. Otherwise, use the default value. + var memoryOverheadFactor float64 + if app.Spec.MemoryOverheadFactor != nil { + parsed, err := strconv.ParseFloat(*app.Spec.MemoryOverheadFactor, 64) + if err != nil { + return nil, err + } + memoryOverheadFactor = parsed + } else if app.Spec.Type == v1beta2.SparkApplicationTypeJava { + memoryOverheadFactor = common.DefaultJVMMemoryOverheadFactor + } else { + memoryOverheadFactor = common.DefaultNonJVMMemoryOverheadFactor + } + + // Calculate driver pod memory requests. + driverResourceList, err := getSparkPodMemoryRequests(&app.Spec.Driver.SparkPodSpec, memoryOverheadFactor, 1) + if err != nil { + return nil, err + } + + // Calculate executor pod memory requests. + var replicas int64 = 1 + if app.Spec.Executor.Instances != nil { + replicas = int64(*app.Spec.Executor.Instances) + } + executorResourceList, err := getSparkPodMemoryRequests(&app.Spec.Executor.SparkPodSpec, memoryOverheadFactor, replicas) + if err != nil { + return nil, err + } + + return util.SumResourceList([]corev1.ResourceList{driverResourceList, executorResourceList}), nil +} + +func getSparkPodMemoryRequests(podSpec *v1beta2.SparkPodSpec, memoryOverheadFactor float64, replicas int64) (corev1.ResourceList, error) { + var memoryBytes, memoryOverheadBytes int64 + if podSpec.Memory != nil { + parsed, err := parseJavaMemoryString(*podSpec.Memory) + if err != nil { + return nil, err + } + memoryBytes = parsed + } + + if podSpec.MemoryOverhead != nil { + parsed, err := parseJavaMemoryString(*podSpec.MemoryOverhead) + if err != nil { + return nil, err + } + memoryOverheadBytes = parsed + } else { + memoryOverheadBytes = int64(math.Max(float64(memoryBytes)*memoryOverheadFactor, common.MinMemoryOverhead)) + } + + resourceList := corev1.ResourceList{ + corev1.ResourceMemory: *resource.NewQuantity((memoryBytes+memoryOverheadBytes)*replicas, resource.BinarySI), + corev1.ResourceRequestsMemory: *resource.NewQuantity((memoryBytes+memoryOverheadBytes)*replicas, resource.BinarySI), + } + return resourceList, nil +} + +// For Spark pod, memory requests and limits are the same. +func getMemoryLimits(app *v1beta2.SparkApplication) (corev1.ResourceList, error) { + return getMemoryRequests(app) +} + +// Logic copied from https://github.com/apache/spark/blob/5264164a67df498b73facae207eda12ee133be7d/common/network-common/src/main/java/org/apache/spark/network/util/JavaUtils.java#L276 +func parseJavaMemoryString(s string) (int64, error) { + lower := strings.ToLower(s) + if matches := javaStringPattern.FindStringSubmatch(lower); matches != nil { + value, err := strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, err + } + suffix := matches[2] + if multiplier, present := javaStringSuffixes[suffix]; present { + return multiplier * value, nil + } + } else if matches = javaFractionStringPattern.FindStringSubmatch(lower); matches != nil { + value, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return 0, err + } + suffix := matches[2] + if multiplier, present := javaStringSuffixes[suffix]; present { + return int64(float64(multiplier) * value), nil + } + } + return 0, fmt.Errorf("could not parse string '%s' as a Java-style memory value. Examples: 100kb, 1.5mb, 1g", s) +} + +// Check whether the resource list will satisfy the resource quota. +func validateResourceQuota(resourceList corev1.ResourceList, resourceQuota corev1.ResourceQuota) bool { + for key, quantity := range resourceList { + if _, ok := resourceQuota.Status.Hard[key]; !ok { + continue + } + quantity.Add(resourceQuota.Status.Used[key]) + if quantity.Cmp(resourceQuota.Spec.Hard[key]) > 0 { + return false + } + } + return true +} diff --git a/internal/webhook/resourcequota_test.go b/internal/webhook/resourcequota_test.go new file mode 100644 index 0000000000..285e4841f5 --- /dev/null +++ b/internal/webhook/resourcequota_test.go @@ -0,0 +1,41 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "testing" +) + +func assertMemory(memoryString string, expectedBytes int64, t *testing.T) { + m, err := parseJavaMemoryString(memoryString) + if err != nil { + t.Error(err) + return + } + if m != expectedBytes { + t.Errorf("%s: expected %v bytes, got %v bytes", memoryString, expectedBytes, m) + return + } +} + +func TestJavaMemoryString(t *testing.T) { + assertMemory("1b", 1, t) + assertMemory("100k", 100*1024, t) + assertMemory("1gb", 1024*1024*1024, t) + assertMemory("10TB", 10*1024*1024*1024*1024, t) + assertMemory("10PB", 10*1024*1024*1024*1024*1024, t) +} diff --git a/internal/webhook/scheduledsparkapplication_defaulter.go b/internal/webhook/scheduledsparkapplication_defaulter.go new file mode 100644 index 0000000000..afdf4304e6 --- /dev/null +++ b/internal/webhook/scheduledsparkapplication_defaulter.go @@ -0,0 +1,49 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/kubeflow/spark-operator/api/v1beta2" +) + +// +kubebuilder:webhook:admissionReviewVersions=v1,failurePolicy=fail,groups=sparkoperator.k8s.io,matchPolicy=Exact,mutating=false,name=mutate-scheduledsparkapplication.sparkoperator.k8s.io,path=/validate-sparkoperator-k8s-io-v1beta2-sparkapplication,reinvocationPolicy=Never,resources=scheduledsparkapplications,sideEffects=None,verbs=create;update,versions=v1beta2,webhookVersions=v1 + +// ScheduledSparkApplicationDefaulter sets default values for a SparkApplication. +type ScheduledSparkApplicationDefaulter struct{} + +// NewSparkApplicationValidator creates a new SparkApplicationValidator instance. +func NewScheduledSparkApplicationDefaulter() *ScheduledSparkApplicationDefaulter { + return &ScheduledSparkApplicationDefaulter{} +} + +// SparkApplicationDefaulter implements admission.CustomDefaulter. +var _ admission.CustomDefaulter = &ScheduledSparkApplicationDefaulter{} + +// Default implements admission.CustomDefaulter. +func (d *ScheduledSparkApplicationDefaulter) Default(ctx context.Context, obj runtime.Object) error { + app, ok := obj.(*v1beta2.ScheduledSparkApplication) + if !ok { + return nil + } + logger.Info("Defaulting ScheduledSparkApplication", "name", app.Name, "namespace", app.Namespace) + return nil +} diff --git a/internal/webhook/scheduledsparkapplication_validator.go b/internal/webhook/scheduledsparkapplication_validator.go new file mode 100644 index 0000000000..4f3f19b9c2 --- /dev/null +++ b/internal/webhook/scheduledsparkapplication_validator.go @@ -0,0 +1,80 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/kubeflow/spark-operator/api/v1beta2" +) + +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:admissionReviewVersions=v1,failurePolicy=fail,groups=sparkoperator.k8s.io,matchPolicy=Exact,mutating=false,name=validate-scheduledsparkapplication.sparkoperator.k8s.io,path=/validate-sparkoperator-k8s-io-v1beta2-scheduledsparkapplication,reinvocationPolicy=Never,resources=scheduledsparkapplications,sideEffects=None,verbs=create;update,versions=v1beta2,webhookVersions=v1 + +type ScheduledSparkApplicationValidator struct{} + +// NewScheduledSparkApplicationValidator creates a new ScheduledSparkApplicationValidator instance. +func NewScheduledSparkApplicationValidator() *ScheduledSparkApplicationValidator { + return &ScheduledSparkApplicationValidator{} +} + +var _ admission.CustomValidator = &ScheduledSparkApplicationValidator{} + +// ValidateCreate implements admission.CustomValidator. +func (v *ScheduledSparkApplicationValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + app, ok := obj.(*v1beta2.ScheduledSparkApplication) + if !ok { + return nil, nil + } + logger.Info("Validating SchedulingSparkApplication create", "name", app.Name, "namespace", app.Namespace) + if err := v.validate(app); err != nil { + return nil, err + } + return nil, nil +} + +// ValidateUpdate implements admission.CustomValidator. +func (v *ScheduledSparkApplicationValidator) ValidateUpdate(ctx context.Context, oldObj runtime.Object, newObj runtime.Object) (warnings admission.Warnings, err error) { + newApp, ok := newObj.(*v1beta2.ScheduledSparkApplication) + if !ok { + return nil, nil + } + logger.Info("Validating SchedulingSparkApplication update", "name", newApp.Name, "namespace", newApp.Namespace) + if err := v.validate(newApp); err != nil { + return nil, err + } + return nil, nil +} + +// ValidateDelete implements admission.CustomValidator. +func (v *ScheduledSparkApplicationValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + app, ok := obj.(*v1beta2.ScheduledSparkApplication) + if !ok { + return nil, nil + } + logger.Info("Validating ScheduledSparkApplication delete", "name", app.Name, "namespace", app.Namespace) + return nil, nil +} + +func (v *ScheduledSparkApplicationValidator) validate(_ *v1beta2.ScheduledSparkApplication) error { + // TODO: implement validate logic + return nil +} diff --git a/internal/webhook/sparkapplication_defaulter.go b/internal/webhook/sparkapplication_defaulter.go new file mode 100644 index 0000000000..661ecf708a --- /dev/null +++ b/internal/webhook/sparkapplication_defaulter.go @@ -0,0 +1,125 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "strconv" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +// +kubebuilder:webhook:admissionReviewVersions=v1,failurePolicy=fail,groups=sparkoperator.k8s.io,matchPolicy=Exact,mutating=true,name=mutate-sparkapplication.sparkoperator.k8s.io,path=/mutate-sparkoperator-k8s-io-v1beta2-sparkapplication,reinvocationPolicy=Never,resources=sparkapplications,sideEffects=None,verbs=create;update,versions=v1beta2,webhookVersions=v1 + +// SparkApplicationDefaulter sets default values for a SparkApplication. +type SparkApplicationDefaulter struct{} + +// NewSparkApplicationValidator creates a new SparkApplicationValidator instance. +func NewSparkApplicationDefaulter() *SparkApplicationDefaulter { + return &SparkApplicationDefaulter{} +} + +// SparkApplicationDefaulter implements admission.CustomDefaulter. +var _ admission.CustomDefaulter = &SparkApplicationDefaulter{} + +// Default implements admission.CustomDefaulter. +func (d *SparkApplicationDefaulter) Default(ctx context.Context, obj runtime.Object) error { + app, ok := obj.(*v1beta2.SparkApplication) + if !ok { + return nil + } + + // Only set the default values for spark applications with new state or invalidating state. + state := util.GetApplicationState(app) + if state != v1beta2.ApplicationStateNew && state != v1beta2.ApplicationStateInvalidating { + return nil + } + + logger.Info("Defaulting SparkApplication", "name", app.Name, "namespace", app.Namespace, "state", util.GetApplicationState(app)) + defaultSparkApplication(app) + return nil +} + +// defaultSparkApplication sets default values for certain fields of a SparkApplication. +func defaultSparkApplication(app *v1beta2.SparkApplication) { + if app.Spec.Mode == "" { + app.Spec.Mode = v1beta2.DeployModeCluster + } + + if app.Spec.RestartPolicy.Type == "" { + app.Spec.RestartPolicy.Type = v1beta2.RestartPolicyNever + } + + if app.Spec.RestartPolicy.Type != v1beta2.RestartPolicyNever { + if app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval == nil { + app.Spec.RestartPolicy.OnSubmissionFailureRetryInterval = util.Int64Ptr(5) + } + if app.Spec.RestartPolicy.OnFailureRetryInterval == nil { + app.Spec.RestartPolicy.OnFailureRetryInterval = util.Int64Ptr(5) + } + } + + defaultDriverSpec(app) + defaultExecutorSpec(app) +} + +func defaultDriverSpec(app *v1beta2.SparkApplication) { + if app.Spec.Driver.Cores == nil { + if app.Spec.SparkConf == nil || app.Spec.SparkConf[common.SparkDriverCores] == "" { + app.Spec.Driver.Cores = util.Int32Ptr(1) + } + } + + if app.Spec.Driver.Memory == nil { + if app.Spec.SparkConf == nil || app.Spec.SparkConf[common.SparkDriverMemory] == "" { + app.Spec.Driver.Memory = util.StringPtr("1g") + } + } +} + +func defaultExecutorSpec(app *v1beta2.SparkApplication) { + if app.Spec.Executor.Cores == nil { + if app.Spec.SparkConf == nil || app.Spec.SparkConf[common.SparkExecutorCores] == "" { + app.Spec.Executor.Cores = util.Int32Ptr(1) + } + } + + if app.Spec.Executor.Memory == nil { + if app.Spec.SparkConf == nil || app.Spec.SparkConf[common.SparkExecutorMemory] == "" { + app.Spec.Executor.Memory = util.StringPtr("1g") + } + } + + if app.Spec.Executor.Instances == nil { + // Check whether dynamic allocation is enabled in application spec. + enableDynamicAllocation := app.Spec.DynamicAllocation != nil && app.Spec.DynamicAllocation.Enabled + // Check whether dynamic allocation is enabled in spark conf. + if !enableDynamicAllocation && app.Spec.SparkConf != nil { + if dynamicConf, _ := strconv.ParseBool(app.Spec.SparkConf[common.SparkDynamicAllocationEnabled]); dynamicConf { + enableDynamicAllocation = true + } + if !enableDynamicAllocation && app.Spec.SparkConf[common.SparkExecutorInstances] == "" { + app.Spec.Executor.Instances = util.Int32Ptr(1) + } + } + } +} diff --git a/internal/webhook/sparkapplication_validator.go b/internal/webhook/sparkapplication_validator.go new file mode 100644 index 0000000000..7b1fd41085 --- /dev/null +++ b/internal/webhook/sparkapplication_validator.go @@ -0,0 +1,173 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/util" +) + +// NOTE: The 'path' attribute must follow a specific pattern and should not be modified directly here. +// Modifying the path for an invalid path can cause API server errors; failing to locate the webhook. +// +kubebuilder:webhook:admissionReviewVersions=v1,failurePolicy=fail,groups=sparkoperator.k8s.io,matchPolicy=Exact,mutating=false,name=validate-sparkapplication.sparkoperator.k8s.io,path=/validate-sparkoperator-k8s-io-v1beta2-sparkapplication,reinvocationPolicy=Never,resources=sparkapplications,sideEffects=None,verbs=create;update,versions=v1beta2,webhookVersions=v1 + +type SparkApplicationValidator struct { + client client.Client + + enableResourceQuotaEnforcement bool +} + +// NewSparkApplicationValidator creates a new SparkApplicationValidator instance. +func NewSparkApplicationValidator(client client.Client, enableResourceQuotaEnforcement bool) *SparkApplicationValidator { + return &SparkApplicationValidator{ + client: client, + + enableResourceQuotaEnforcement: enableResourceQuotaEnforcement, + } +} + +var _ admission.CustomValidator = &SparkApplicationValidator{} + +// ValidateCreate implements admission.CustomValidator. +func (v *SparkApplicationValidator) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + app, ok := obj.(*v1beta2.SparkApplication) + if !ok { + return nil, nil + } + logger.Info("Validating SparkApplication create", "name", app.Name, "namespace", app.Namespace, "state", util.GetApplicationState(app)) + if err := v.validateSpec(ctx, app); err != nil { + return nil, err + } + + if v.enableResourceQuotaEnforcement { + if err := v.validateResourceUsage(ctx, app); err != nil { + return nil, err + } + } + + return nil, nil +} + +// ValidateUpdate implements admission.CustomValidator. +func (v *SparkApplicationValidator) ValidateUpdate(ctx context.Context, oldObj runtime.Object, newObj runtime.Object) (warnings admission.Warnings, err error) { + oldApp, ok := oldObj.(*v1beta2.SparkApplication) + if !ok { + return nil, nil + } + + newApp, ok := newObj.(*v1beta2.SparkApplication) + if !ok { + return nil, nil + } + + logger.Info("Validating SparkApplication update", "name", newApp.Name, "namespace", newApp.Namespace) + + // Skip validating when spec does not change. + if equality.Semantic.DeepEqual(oldApp.Spec, newApp.Spec) { + return nil, nil + } + + if err := v.validateSpec(ctx, newApp); err != nil { + return nil, err + } + + // Validate SparkApplication resource usage when resource quota enforcement is enabled. + if v.enableResourceQuotaEnforcement { + if err := v.validateResourceUsage(ctx, newApp); err != nil { + return nil, err + } + } + + return nil, nil +} + +// ValidateDelete implements admission.CustomValidator. +func (v *SparkApplicationValidator) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + app, ok := obj.(*v1beta2.SparkApplication) + if !ok { + return nil, nil + } + logger.Info("Validating SparkApplication delete", "name", app.Name, "namespace", app.Namespace, "state", util.GetApplicationState(app)) + return nil, nil +} + +func (v *SparkApplicationValidator) validateSpec(_ context.Context, app *v1beta2.SparkApplication) error { + logger.V(1).Info("Validating SparkApplication spec", "name", app.Name, "namespace", app.Namespace, "state", util.GetApplicationState(app)) + + if app.Spec.NodeSelector != nil && (app.Spec.Driver.NodeSelector != nil || app.Spec.Executor.NodeSelector != nil) { + return fmt.Errorf("node selector cannot be defined at both SparkApplication and Driver/Executor") + } + + servicePorts := make(map[int32]bool) + ingressURLFormats := make(map[string]bool) + for _, item := range app.Spec.DriverIngressOptions { + if item.ServicePort == nil { + return fmt.Errorf("DriverIngressOptions has nill ServicePort") + } + if servicePorts[*item.ServicePort] { + return fmt.Errorf("DriverIngressOptions has duplicate ServicePort: %d", *item.ServicePort) + } + servicePorts[*item.ServicePort] = true + + if item.IngressURLFormat == "" { + return fmt.Errorf("DriverIngressOptions has empty IngressURLFormat") + } + if ingressURLFormats[item.IngressURLFormat] { + return fmt.Errorf("DriverIngressOptions has duplicate IngressURLFormat: %s", item.IngressURLFormat) + } + ingressURLFormats[item.IngressURLFormat] = true + } + + return nil +} + +func (v *SparkApplicationValidator) validateResourceUsage(ctx context.Context, app *v1beta2.SparkApplication) error { + logger.V(1).Info("Validating SparkApplication resource usage", "name", app.Name, "namespace", app.Namespace, "state", util.GetApplicationState(app)) + + requests, err := getResourceList(app) + if err != nil { + return fmt.Errorf("failed to calculate resource quests: %v", err) + } + + resourceQuotaList := &corev1.ResourceQuotaList{} + if err := v.client.List(ctx, resourceQuotaList, client.InNamespace(app.Namespace)); err != nil { + return fmt.Errorf("failed to list resource quotas: %v", err) + } + + for _, resourceQuota := range resourceQuotaList.Items { + // Scope selectors not currently supported, ignore any ResourceQuota that does not match everything. + // TODO: Add support for scope selectors. + if resourceQuota.Spec.ScopeSelector != nil || len(resourceQuota.Spec.Scopes) > 0 { + continue + } + + if !validateResourceQuota(requests, resourceQuota) { + return fmt.Errorf("failed to validate resource quota \"%s/%s\"", resourceQuota.Namespace, resourceQuota.Name) + } + } + + return nil +} diff --git a/internal/webhook/sparkpod_defaulter.go b/internal/webhook/sparkpod_defaulter.go new file mode 100644 index 0000000000..c7a7a858cf --- /dev/null +++ b/internal/webhook/sparkpod_defaulter.go @@ -0,0 +1,732 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +const ( + maxNameLength = 63 +) + +// +kubebuilder:webhook:admissionReviewVersions=v1,failurePolicy=fail,groups="",matchPolicy=Exact,mutating=true,name=mutate-pod.sparkoperator.k8s.io,path=/mutate--v1-pod,reinvocationPolicy=Never,resources=pods,sideEffects=None,verbs=create;update,versions=v1,webhookVersions=v1 + +// SparkPodDefaulter defaults Spark pods. +type SparkPodDefaulter struct { + client client.Client + sparkJobNamespaces map[string]bool +} + +// SparkPodDefaulter implements admission.CustomDefaulter. +var _ admission.CustomDefaulter = &SparkPodDefaulter{} + +// NewSparkPodDefaulter creates a new SparkPodDefaulter instance. +func NewSparkPodDefaulter(client client.Client, sparkJobNamespaces []string) *SparkPodDefaulter { + m := make(map[string]bool) + for _, ns := range sparkJobNamespaces { + m[ns] = true + } + + return &SparkPodDefaulter{ + client: client, + sparkJobNamespaces: m, + } +} + +// Default implements admission.CustomDefaulter. +func (d *SparkPodDefaulter) Default(ctx context.Context, obj runtime.Object) error { + pod, ok := obj.(*corev1.Pod) + if !ok { + return nil + } + + namespace := pod.Namespace + if !d.isSparkJobNamespace(namespace) { + return nil + } + + appName := pod.Labels[common.LabelSparkAppName] + if appName == "" { + return nil + } + + app := &v1beta2.SparkApplication{} + if err := d.client.Get(ctx, types.NamespacedName{Name: appName, Namespace: namespace}, app); err != nil { + return fmt.Errorf("failed to get SparkApplication %s/%s: %v", namespace, appName, err) + } + + logger.Info("Mutating Spark pod", "name", pod.Name, "namespace", namespace, "phase", pod.Status.Phase) + if err := mutateSparkPod(pod, app); err != nil { + logger.Info("Denying Spark pod", "name", pod.Name, "namespace", namespace, "errorMessage", err.Error()) + return fmt.Errorf("failed to mutate Spark pod: %v", err) + } + + return nil +} + +func (d *SparkPodDefaulter) isSparkJobNamespace(ns string) bool { + return d.sparkJobNamespaces[ns] +} + +type mutateSparkPodOption func(pod *corev1.Pod, app *v1beta2.SparkApplication) error + +func mutateSparkPod(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + options := []mutateSparkPodOption{ + addOwnerReference, + addEnvVars, + addEnvFrom, + addHadoopConfigMap, + addSparkConfigMap, + addGeneralConfigMaps, + addVolumes, + addContainerPorts, + addHostNetwork, + addHostAliases, + addInitContainers, + addSidecarContainers, + addDNSConfig, + addPriorityClassName, + addSchedulerName, + addNodeSelectors, + addAffinity, + addTolerations, + addGPU, + addPrometheusConfig, + addContainerSecurityContext, + addPodSecurityContext, + addTerminationGracePeriodSeconds, + addPodLifeCycleConfig, + addShareProcessNamespace, + } + + for _, option := range options { + if err := option(pod, app); err != nil { + return err + } + } + + return nil +} + +func addOwnerReference(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + if !util.IsDriverPod(pod) { + return nil + } + ownerReference := util.GetOwnerReference(app) + pod.ObjectMeta.OwnerReferences = append(pod.ObjectMeta.OwnerReferences, ownerReference) + return nil +} + +func addVolumes(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + volumes := app.Spec.Volumes + + volumeMap := make(map[string]corev1.Volume) + for _, v := range volumes { + volumeMap[v.Name] = v + } + + var volumeMounts []corev1.VolumeMount + if util.IsDriverPod(pod) { + volumeMounts = app.Spec.Driver.VolumeMounts + } else if util.IsExecutorPod(pod) { + volumeMounts = app.Spec.Executor.VolumeMounts + } + + addedVolumeMap := make(map[string]corev1.Volume) + for _, m := range volumeMounts { + // Skip adding localDirVolumes + if strings.HasPrefix(m.Name, common.SparkLocalDirVolumePrefix) { + continue + } + + if v, ok := volumeMap[m.Name]; ok { + if _, ok := addedVolumeMap[m.Name]; !ok { + _ = addVolume(pod, v) + addedVolumeMap[m.Name] = v + } + _ = addVolumeMount(pod, m) + } + } + return nil +} + +func addVolume(pod *corev1.Pod, volume corev1.Volume) error { + pod.Spec.Volumes = append(pod.Spec.Volumes, volume) + return nil +} + +func addVolumeMount(pod *corev1.Pod, mount corev1.VolumeMount) error { + i := findContainer(pod) + if i < 0 { + logger.Info("not able to add VolumeMount %s as Spark container was not found in pod %s", mount.Name, pod.Name) + } + + pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts, mount) + return nil +} + +func addEnvVars(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + i := findContainer(pod) + if util.IsDriverPod(pod) { + if len(app.Spec.Driver.Env) == 0 { + return nil + } else if i < 0 { + return fmt.Errorf("failed to add envs as driver container not found") + } + pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, app.Spec.Driver.Env...) + } else if util.IsExecutorPod(pod) { + if len(app.Spec.Driver.Env) == 0 { + return nil + } else if i < 0 { + return fmt.Errorf("failed to add envs as executor container not found") + } + pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, app.Spec.Executor.Env...) + } + return nil +} + +func addEnvFrom(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var envFrom []corev1.EnvFromSource + if util.IsDriverPod(pod) { + envFrom = app.Spec.Driver.EnvFrom + } else if util.IsExecutorPod(pod) { + envFrom = app.Spec.Executor.EnvFrom + } + + i := findContainer(pod) + if i < 0 { + return fmt.Errorf("not able to add EnvFrom as Spark container was not found in pod") + } + + pod.Spec.Containers[i].EnvFrom = append(pod.Spec.Containers[i].EnvFrom, envFrom...) + return nil +} + +func addEnvironmentVariable(pod *corev1.Pod, name, value string) error { + i := findContainer(pod) + if i < 0 { + return fmt.Errorf("not able to add environment variable as Spark container was not found") + } + + pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, corev1.EnvVar{ + Name: name, + Value: value, + }) + return nil +} + +func addSparkConfigMap(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + if app.Spec.SparkConfigMap == nil { + return nil + } + + if err := addConfigMapVolume(pod, *app.Spec.SparkConfigMap, common.SparkConfigMapVolumeName); err != nil { + return err + } + + if err := addConfigMapVolumeMount(pod, common.SparkConfigMapVolumeName, common.DefaultSparkConfDir); err != nil { + return err + } + + if err := addEnvironmentVariable(pod, common.EnvSparkConfDir, common.DefaultSparkConfDir); err != nil { + return err + } + + return nil +} + +func addHadoopConfigMap(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + if app.Spec.HadoopConfigMap == nil { + return nil + } + + if err := addConfigMapVolume(pod, *app.Spec.HadoopConfigMap, common.HadoopConfigMapVolumeName); err != nil { + return err + } + + if err := addConfigMapVolumeMount(pod, common.HadoopConfigMapVolumeName, common.DefaultHadoopConfDir); err != nil { + return err + } + + if err := addEnvironmentVariable(pod, common.EnvHadoopConfDir, common.DefaultHadoopConfDir); err != nil { + return err + } + + return nil +} + +func addGeneralConfigMaps(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var configMaps []v1beta2.NamePath + if util.IsDriverPod(pod) { + configMaps = app.Spec.Driver.ConfigMaps + } else if util.IsExecutorPod(pod) { + configMaps = app.Spec.Executor.ConfigMaps + } + + for _, namePath := range configMaps { + volumeName := namePath.Name + "-vol" + if len(volumeName) > maxNameLength { + volumeName = volumeName[0:maxNameLength] + logger.Info(fmt.Sprintf("ConfigMap volume name is too long. Truncating to length %d. Result: %s.", maxNameLength, volumeName)) + } + addConfigMapVolume(pod, namePath.Name, volumeName) + addConfigMapVolumeMount(pod, volumeName, namePath.Path) + } + return nil +} + +func addPrometheusConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + // Skip if Prometheus Monitoring is not enabled or an in-container ConfigFile is used, + // in which cases a Prometheus ConfigMap won't be created. + if !util.PrometheusMonitoringEnabled(app) || (util.HasMetricsPropertiesFile(app) && util.HasPrometheusConfigFile(app)) { + return nil + } + + if util.IsDriverPod(pod) && !util.ExposeDriverMetrics(app) { + return nil + } + if util.IsExecutorPod(pod) && !util.ExposeExecutorMetrics(app) { + return nil + } + + name := util.GetPrometheusConfigMapName(app) + volumeName := name + "-vol" + mountPath := common.PrometheusConfigMapMountPath + promPort := common.DefaultPrometheusJavaAgentPort + if app.Spec.Monitoring.Prometheus.Port != nil { + promPort = *app.Spec.Monitoring.Prometheus.Port + } + promProtocol := common.DefaultPrometheusPortProtocol + promPortName := common.DefaultPrometheusPortName + if app.Spec.Monitoring.Prometheus.PortName != nil { + promPortName = *app.Spec.Monitoring.Prometheus.PortName + } + addConfigMapVolume(pod, name, volumeName) + addConfigMapVolumeMount(pod, volumeName, mountPath) + logger.Info("could not mount volume %s in path %s", volumeName, mountPath) + addContainerPort(pod, promPort, promProtocol, promPortName) + logger.Info("could not expose port %d to scrape metrics outside the pod", promPort) + return nil +} + +func addContainerPorts(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var ports []v1beta2.Port + + if util.IsDriverPod(pod) { + ports = app.Spec.Driver.Ports + } else if util.IsExecutorPod(pod) { + ports = app.Spec.Executor.Ports + } + + for _, p := range ports { + addContainerPort(pod, p.ContainerPort, p.Protocol, p.Name) + { + logger.Info("could not expose port named %s", p.Name) + continue + } + } + return nil +} + +func addContainerPort(pod *corev1.Pod, port int32, protocol string, portName string) error { + i := findContainer(pod) + if i < 0 { + return fmt.Errorf("not able to add containerPort %d as Spark container was not found in pod", port) + } + + containerPort := corev1.ContainerPort{ + Name: portName, + ContainerPort: port, + Protocol: corev1.Protocol(protocol), + } + pod.Spec.Containers[i].Ports = append(pod.Spec.Containers[i].Ports, containerPort) + return nil +} + +func addConfigMapVolume(pod *corev1.Pod, configMapName string, configMapVolumeName string) error { + volume := corev1.Volume{ + Name: configMapVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMapName, + }, + }, + }, + } + return addVolume(pod, volume) +} + +func addConfigMapVolumeMount(pod *corev1.Pod, configMapVolumeName string, mountPath string) error { + mount := corev1.VolumeMount{ + Name: configMapVolumeName, + ReadOnly: true, + MountPath: mountPath, + } + return addVolumeMount(pod, mount) +} + +func addAffinity(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var affinity *corev1.Affinity + if util.IsDriverPod(pod) { + affinity = app.Spec.Driver.Affinity + } else if util.IsExecutorPod(pod) { + affinity = app.Spec.Executor.Affinity + } + if affinity == nil { + return nil + } + pod.Spec.Affinity = affinity.DeepCopy() + return nil +} + +func addTolerations(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var tolerations []corev1.Toleration + if util.IsDriverPod(pod) { + tolerations = app.Spec.Driver.SparkPodSpec.Tolerations + } else if util.IsExecutorPod(pod) { + tolerations = app.Spec.Executor.SparkPodSpec.Tolerations + } + + if pod.Spec.Tolerations == nil { + pod.Spec.Tolerations = []corev1.Toleration{} + } + + pod.Spec.Tolerations = append(pod.Spec.Tolerations, tolerations...) + return nil +} + +func addNodeSelectors(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var nodeSelector map[string]string + if util.IsDriverPod(pod) { + nodeSelector = app.Spec.Driver.NodeSelector + } else if util.IsExecutorPod(pod) { + nodeSelector = app.Spec.Executor.NodeSelector + } + + if pod.Spec.NodeSelector == nil { + pod.Spec.NodeSelector = make(map[string]string) + } + + for k, v := range nodeSelector { + pod.Spec.NodeSelector[k] = v + } + return nil +} + +func addDNSConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var dnsConfig *corev1.PodDNSConfig + if util.IsDriverPod(pod) { + dnsConfig = app.Spec.Driver.DNSConfig + } else if util.IsExecutorPod(pod) { + dnsConfig = app.Spec.Executor.DNSConfig + } + + if dnsConfig != nil { + pod.Spec.DNSConfig = dnsConfig + } + return nil +} + +func addSchedulerName(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var schedulerName *string + // NOTE: Preferred to use `BatchScheduler` if application spec has it configured. + if app.Spec.BatchScheduler != nil { + schedulerName = app.Spec.BatchScheduler + } else if util.IsDriverPod(pod) { + schedulerName = app.Spec.Driver.SchedulerName + } else if util.IsExecutorPod(pod) { + schedulerName = app.Spec.Executor.SchedulerName + } + + if schedulerName == nil || *schedulerName == "" { + return nil + } + + pod.Spec.SchedulerName = *schedulerName + return nil +} + +func addPriorityClassName(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var priorityClassName *string + if app.Spec.BatchSchedulerOptions != nil { + priorityClassName = app.Spec.BatchSchedulerOptions.PriorityClassName + } + + if priorityClassName != nil && *priorityClassName != "" { + pod.Spec.PriorityClassName = *priorityClassName + if pod.Spec.Priority != nil { + pod.Spec.Priority = nil + } + if pod.Spec.PreemptionPolicy != nil { + pod.Spec.PreemptionPolicy = nil + } + } + return nil +} + +func addPodSecurityContext(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var securityContext *corev1.PodSecurityContext + if util.IsDriverPod(pod) { + securityContext = app.Spec.Driver.PodSecurityContext + } else if util.IsExecutorPod(pod) { + securityContext = app.Spec.Executor.PodSecurityContext + } + + if securityContext != nil { + pod.Spec.SecurityContext = securityContext + } + return nil +} + +func addContainerSecurityContext(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + i := findContainer(pod) + if util.IsDriverPod(pod) { + if i < 0 { + return fmt.Errorf("driver container not found in pod") + } + if app.Spec.Driver.SecurityContext == nil { + return nil + } + pod.Spec.Containers[i].SecurityContext = app.Spec.Driver.SecurityContext + } else if util.IsExecutorPod(pod) { + if i < 0 { + return fmt.Errorf("executor container not found in pod") + } + if app.Spec.Driver.SecurityContext == nil { + return nil + } + pod.Spec.Containers[i].SecurityContext = app.Spec.Executor.SecurityContext + } + return nil +} + +func addSidecarContainers(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var sidecars []corev1.Container + if util.IsDriverPod(pod) { + sidecars = app.Spec.Driver.Sidecars + } else if util.IsExecutorPod(pod) { + sidecars = app.Spec.Executor.Sidecars + } + + for _, sidecar := range sidecars { + if !hasContainer(pod, &sidecar) { + pod.Spec.Containers = append(pod.Spec.Containers, *sidecar.DeepCopy()) + } + } + return nil +} + +func addInitContainers(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var initContainers []corev1.Container + if util.IsDriverPod(pod) { + initContainers = app.Spec.Driver.InitContainers + } else if util.IsExecutorPod(pod) { + initContainers = app.Spec.Executor.InitContainers + } + + if pod.Spec.InitContainers == nil { + pod.Spec.InitContainers = []corev1.Container{} + } + + for _, container := range initContainers { + if !hasInitContainer(pod, &container) { + pod.Spec.InitContainers = append(pod.Spec.InitContainers, *container.DeepCopy()) + } + } + return nil +} + +func addGPU(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var gpu *v1beta2.GPUSpec + if util.IsDriverPod(pod) { + gpu = app.Spec.Driver.GPU + } + if util.IsExecutorPod(pod) { + gpu = app.Spec.Executor.GPU + } + if gpu == nil { + return nil + } + if gpu.Name == "" { + logger.V(1).Info(fmt.Sprintf("Please specify GPU resource name, such as: nvidia.com/gpu, amd.com/gpu etc. Current gpu spec: %+v", gpu)) + return nil + } + if gpu.Quantity <= 0 { + logger.V(1).Info(fmt.Sprintf("GPU Quantity must be positive. Current gpu spec: %+v", gpu)) + return nil + } + + i := findContainer(pod) + if i < 0 { + return fmt.Errorf("not able to add GPU as Spark container was not found in pod %s", pod.Name) + } + if pod.Spec.Containers[i].Resources.Limits == nil { + pod.Spec.Containers[i].Resources.Limits = make(corev1.ResourceList) + } + pod.Spec.Containers[i].Resources.Limits[corev1.ResourceName(gpu.Name)] = *resource.NewQuantity(gpu.Quantity, resource.DecimalSI) + return nil +} + +func addHostNetwork(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var hostNetwork *bool + if util.IsDriverPod(pod) { + hostNetwork = app.Spec.Driver.HostNetwork + } + if util.IsExecutorPod(pod) { + hostNetwork = app.Spec.Executor.HostNetwork + } + + if hostNetwork == nil || !*hostNetwork { + return nil + } + + // For Pods with hostNetwork, explicitly set its DNS policy to “ClusterFirstWithHostNet” + // Detail: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + pod.Spec.HostNetwork = true + pod.Spec.DNSPolicy = corev1.DNSClusterFirstWithHostNet + return nil +} + +func addTerminationGracePeriodSeconds(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var gracePeriodSeconds *int64 + if util.IsDriverPod(pod) { + gracePeriodSeconds = app.Spec.Driver.TerminationGracePeriodSeconds + } else if util.IsExecutorPod(pod) { + gracePeriodSeconds = app.Spec.Executor.TerminationGracePeriodSeconds + } + + if gracePeriodSeconds == nil { + return nil + } + + pod.Spec.TerminationGracePeriodSeconds = gracePeriodSeconds + return nil +} + +func addPodLifeCycleConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var lifeCycle *corev1.Lifecycle + var containerName string + if util.IsDriverPod(pod) { + lifeCycle = app.Spec.Driver.Lifecycle + containerName = common.SparkDriverContainerName + } else if util.IsExecutorPod(pod) { + lifeCycle = app.Spec.Executor.Lifecycle + containerName = common.SparkExecutorContainerName + } + if lifeCycle == nil { + return nil + } + + i := 0 + // Find the driver container in the pod. + for ; i < len(pod.Spec.Containers); i++ { + if pod.Spec.Containers[i].Name == containerName { + break + } + } + if i == len(pod.Spec.Containers) { + logger.Info("Spark container %s not found in pod %s", containerName, pod.Name) + return nil + } + + pod.Spec.Containers[i].Lifecycle = lifeCycle + return nil +} + +func addHostAliases(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var hostAliases []corev1.HostAlias + if util.IsDriverPod(pod) { + hostAliases = app.Spec.Driver.HostAliases + } else if util.IsExecutorPod(pod) { + hostAliases = app.Spec.Executor.HostAliases + } + + pod.Spec.HostAliases = append(pod.Spec.HostAliases, hostAliases...) + return nil +} + +func addShareProcessNamespace(pod *corev1.Pod, app *v1beta2.SparkApplication) error { + var shareProcessNamespace *bool + if util.IsDriverPod(pod) { + shareProcessNamespace = app.Spec.Driver.ShareProcessNamespace + } else if util.IsExecutorPod(pod) { + shareProcessNamespace = app.Spec.Executor.ShareProcessNamespace + } + + if shareProcessNamespace == nil || !*shareProcessNamespace { + return nil + } + + pod.Spec.ShareProcessNamespace = shareProcessNamespace + return nil +} + +func findContainer(pod *corev1.Pod) int { + var candidateContainerNames []string + if util.IsDriverPod(pod) { + candidateContainerNames = append(candidateContainerNames, common.SparkDriverContainerName) + } else if util.IsExecutorPod(pod) { + // Spark 3.x changed the default executor container name so we need to include both. + candidateContainerNames = append(candidateContainerNames, common.SparkExecutorContainerName, common.Spark3DefaultExecutorContainerName) + } + + if len(candidateContainerNames) == 0 { + return -1 + } + + for i := 0; i < len(pod.Spec.Containers); i++ { + for _, name := range candidateContainerNames { + if pod.Spec.Containers[i].Name == name { + return i + } + } + } + return -1 +} + +func hasContainer(pod *corev1.Pod, container *corev1.Container) bool { + for _, c := range pod.Spec.Containers { + if container.Name == c.Name && container.Image == c.Image { + return true + } + } + return false +} + +func hasInitContainer(pod *corev1.Pod, container *corev1.Container) bool { + for _, c := range pod.Spec.InitContainers { + if container.Name == c.Name && container.Image == c.Image { + return true + } + } + return false +} diff --git a/pkg/webhook/patch_test.go b/internal/webhook/sparkpod_defaulter_test.go similarity index 76% rename from pkg/webhook/patch_test.go rename to internal/webhook/sparkpod_defaulter_test.go index 99f821f37c..f81eac24b1 100644 --- a/pkg/webhook/patch_test.go +++ b/internal/webhook/sparkpod_defaulter_test.go @@ -17,19 +17,16 @@ limitations under the License. package webhook import ( - "encoding/json" "fmt" "testing" - jsonpatch "github.com/evanphx/json-patch" "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" ) func TestPatchSparkPod_OwnerReference(t *testing.T) { @@ -44,14 +41,14 @@ func TestPatchSparkPod_OwnerReference(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -63,7 +60,7 @@ func TestPatchSparkPod_OwnerReference(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 1, len(modifiedPod.OwnerReferences)) + assert.Len(t, modifiedPod.OwnerReferences, 1) // Test patching a pod with existing OwnerReference and Volume. pod.OwnerReferences = append(pod.OwnerReferences, metav1.OwnerReference{Name: "owner-reference1"}) @@ -72,7 +69,7 @@ func TestPatchSparkPod_OwnerReference(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedPod.OwnerReferences)) + assert.Len(t, modifiedPod.OwnerReferences, 2) } func TestPatchSparkPod_Local_Volumes(t *testing.T) { @@ -121,14 +118,14 @@ func TestPatchSparkPod_Local_Volumes(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -141,7 +138,7 @@ func TestPatchSparkPod_Local_Volumes(t *testing.T) { } // local volume will not be added by webhook - assert.Equal(t, 0, len(modifiedPod.Spec.Volumes)) + assert.Empty(t, modifiedPod.Spec.Volumes) } func TestPatchSparkPod_Volumes_Subpath(t *testing.T) { @@ -184,14 +181,14 @@ func TestPatchSparkPod_Volumes_Subpath(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -204,9 +201,9 @@ func TestPatchSparkPod_Volumes_Subpath(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 1, len(modifiedPod.Spec.Volumes)) + assert.Len(t, modifiedPod.Spec.Volumes, 1) assert.Equal(t, app.Spec.Volumes[0], modifiedPod.Spec.Volumes[0]) - assert.Equal(t, 2, len(modifiedPod.Spec.Containers[0].VolumeMounts)) + assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 2) assert.Equal(t, app.Spec.Driver.VolumeMounts[0], modifiedPod.Spec.Containers[0].VolumeMounts[0]) assert.Equal(t, app.Spec.Driver.VolumeMounts[1], modifiedPod.Spec.Containers[0].VolumeMounts[1]) } @@ -255,14 +252,14 @@ func TestPatchSparkPod_Volumes(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -275,10 +272,10 @@ func TestPatchSparkPod_Volumes(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedPod.Spec.Volumes)) + assert.Len(t, modifiedPod.Spec.Volumes, 2) assert.Equal(t, app.Spec.Volumes[0], modifiedPod.Spec.Volumes[0]) assert.Equal(t, app.Spec.Volumes[1], modifiedPod.Spec.Volumes[1]) - assert.Equal(t, 2, len(modifiedPod.Spec.Containers[0].VolumeMounts)) + assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 2) assert.Equal(t, app.Spec.Driver.VolumeMounts[0], modifiedPod.Spec.Containers[0].VolumeMounts[0]) assert.Equal(t, app.Spec.Driver.VolumeMounts[1], modifiedPod.Spec.Containers[0].VolumeMounts[1]) @@ -293,10 +290,10 @@ func TestPatchSparkPod_Volumes(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 3, len(modifiedPod.Spec.Volumes)) + assert.Len(t, modifiedPod.Spec.Volumes, 3) assert.Equal(t, app.Spec.Volumes[0], modifiedPod.Spec.Volumes[1]) assert.Equal(t, app.Spec.Volumes[1], modifiedPod.Spec.Volumes[2]) - assert.Equal(t, 3, len(modifiedPod.Spec.Containers[0].VolumeMounts)) + assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 3) assert.Equal(t, app.Spec.Driver.VolumeMounts[0], modifiedPod.Spec.Containers[0].VolumeMounts[1]) assert.Equal(t, app.Spec.Driver.VolumeMounts[1], modifiedPod.Spec.Containers[0].VolumeMounts[2]) } @@ -315,7 +312,7 @@ func TestPatchSparkPod_Affinity(t *testing.T) { RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ { LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{config.SparkRoleLabel: config.SparkDriverRole}, + MatchLabels: map[string]string{common.LabelSparkRole: common.SparkRoleDriver}, }, TopologyKey: "kubernetes.io/hostname", }, @@ -331,14 +328,14 @@ func TestPatchSparkPod_Affinity(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -351,9 +348,8 @@ func TestPatchSparkPod_Affinity(t *testing.T) { t.Fatal(err) } - assert.True(t, modifiedPod.Spec.Affinity != nil) - assert.Equal(t, 1, - len(modifiedPod.Spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution)) + assert.NotNil(t, modifiedPod.Spec.Affinity) + assert.Len(t, modifiedPod.Spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, 1) assert.Equal(t, "kubernetes.io/hostname", modifiedPod.Spec.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution[0].TopologyKey) } @@ -380,14 +376,14 @@ func TestPatchSparkPod_ConfigMaps(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -399,12 +395,12 @@ func TestPatchSparkPod_ConfigMaps(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedPod.Spec.Volumes)) + assert.Len(t, modifiedPod.Spec.Volumes, 2) assert.Equal(t, "foo-vol", modifiedPod.Spec.Volumes[0].Name) - assert.True(t, modifiedPod.Spec.Volumes[0].ConfigMap != nil) + assert.NotNil(t, modifiedPod.Spec.Volumes[0].ConfigMap) assert.Equal(t, "bar-vol", modifiedPod.Spec.Volumes[1].Name) - assert.True(t, modifiedPod.Spec.Volumes[1].ConfigMap != nil) - assert.Equal(t, 2, len(modifiedPod.Spec.Containers[0].VolumeMounts)) + assert.NotNil(t, modifiedPod.Spec.Volumes[1].ConfigMap) + assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 2) assert.Equal(t, "/path/to/foo", modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) assert.Equal(t, "/path/to/bar", modifiedPod.Spec.Containers[0].VolumeMounts[1].MountPath) } @@ -425,14 +421,14 @@ func TestPatchSparkPod_SparkConfigMap(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -444,13 +440,13 @@ func TestPatchSparkPod_SparkConfigMap(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 1, len(modifiedPod.Spec.Volumes)) - assert.Equal(t, config.SparkConfigMapVolumeName, modifiedPod.Spec.Volumes[0].Name) - assert.True(t, modifiedPod.Spec.Volumes[0].ConfigMap != nil) - assert.Equal(t, 1, len(modifiedPod.Spec.Containers[0].VolumeMounts)) - assert.Equal(t, config.DefaultSparkConfDir, modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) - assert.Equal(t, 1, len(modifiedPod.Spec.Containers[0].Env)) - assert.Equal(t, config.DefaultSparkConfDir, modifiedPod.Spec.Containers[0].Env[0].Value) + assert.Len(t, modifiedPod.Spec.Volumes, 1) + assert.Equal(t, common.SparkConfigMapVolumeName, modifiedPod.Spec.Volumes[0].Name) + assert.NotNil(t, modifiedPod.Spec.Volumes[0].ConfigMap) + assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 1) + assert.Equal(t, common.DefaultSparkConfDir, modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) + assert.Len(t, modifiedPod.Spec.Containers[0].Env, 1) + assert.Equal(t, common.DefaultSparkConfDir, modifiedPod.Spec.Containers[0].Env[0].Value) } func TestPatchSparkPod_HadoopConfigMap(t *testing.T) { @@ -469,14 +465,14 @@ func TestPatchSparkPod_HadoopConfigMap(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -488,75 +484,75 @@ func TestPatchSparkPod_HadoopConfigMap(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 1, len(modifiedPod.Spec.Volumes)) - assert.Equal(t, config.HadoopConfigMapVolumeName, modifiedPod.Spec.Volumes[0].Name) - assert.True(t, modifiedPod.Spec.Volumes[0].ConfigMap != nil) - assert.Equal(t, 1, len(modifiedPod.Spec.Containers[0].VolumeMounts)) - assert.Equal(t, config.DefaultHadoopConfDir, modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) - assert.Equal(t, 1, len(modifiedPod.Spec.Containers[0].Env)) - assert.Equal(t, config.DefaultHadoopConfDir, modifiedPod.Spec.Containers[0].Env[0].Value) + assert.Len(t, modifiedPod.Spec.Volumes, 1) + assert.Equal(t, common.HadoopConfigMapVolumeName, modifiedPod.Spec.Volumes[0].Name) + assert.NotNil(t, modifiedPod.Spec.Volumes[0].ConfigMap) + assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 1) + assert.Equal(t, common.DefaultHadoopConfDir, modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) + assert.Len(t, modifiedPod.Spec.Containers[0].Env, 1) + assert.Equal(t, common.DefaultHadoopConfDir, modifiedPod.Spec.Containers[0].Env[0].Value) } -func TestPatchSparkPod_PrometheusConfigMaps(t *testing.T) { - var appPort int32 = 9999 - appPortName := "jmx-exporter" - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Monitoring: &v1beta2.MonitoringSpec{ - Prometheus: &v1beta2.PrometheusSpec{ - JmxExporterJar: "", - Port: &appPort, - PortName: &appPortName, - ConfigFile: nil, - Configuration: nil, - }, - ExposeDriverMetrics: true, - }, - }, - } - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-driver", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: config.SparkDriverContainerName, - Image: "spark-driver:latest", - }, - }, - }, - } - - modifiedPod, err := getModifiedPod(pod, app) - if err != nil { - t.Fatal(err) - } - - expectedConfigMapName := config.GetPrometheusConfigMapName(app) - expectedVolumeName := expectedConfigMapName + "-vol" - expectedContainerPort := *app.Spec.Monitoring.Prometheus.Port - expectedContainerPortName := *app.Spec.Monitoring.Prometheus.PortName - assert.Equal(t, 1, len(modifiedPod.Spec.Volumes)) - assert.Equal(t, expectedVolumeName, modifiedPod.Spec.Volumes[0].Name) - assert.True(t, modifiedPod.Spec.Volumes[0].ConfigMap != nil) - assert.Equal(t, expectedConfigMapName, modifiedPod.Spec.Volumes[0].ConfigMap.Name) - assert.Equal(t, 1, len(modifiedPod.Spec.Containers[0].VolumeMounts)) - assert.Equal(t, expectedVolumeName, modifiedPod.Spec.Containers[0].VolumeMounts[0].Name) - assert.Equal(t, config.PrometheusConfigMapMountPath, modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) - assert.Equal(t, expectedContainerPort, modifiedPod.Spec.Containers[0].Ports[0].ContainerPort) - assert.Equal(t, expectedContainerPortName, modifiedPod.Spec.Containers[0].Ports[0].Name) - assert.Equal(t, corev1.Protocol(config.DefaultPrometheusPortProtocol), modifiedPod.Spec.Containers[0].Ports[0].Protocol) -} +// func TestPatchSparkPod_PrometheusConfigMaps(t *testing.T) { +// var appPort int32 = 9999 +// appPortName := "jmx-exporter" +// app := &v1beta2.SparkApplication{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-test", +// UID: "spark-test-1", +// }, +// Spec: v1beta2.SparkApplicationSpec{ +// Monitoring: &v1beta2.MonitoringSpec{ +// Prometheus: &v1beta2.PrometheusSpec{ +// JmxExporterJar: "", +// Port: &appPort, +// PortName: &appPortName, +// ConfigFile: nil, +// Configuration: nil, +// }, +// ExposeDriverMetrics: true, +// }, +// }, +// } + +// pod := &corev1.Pod{ +// ObjectMeta: metav1.ObjectMeta{ +// Name: "spark-driver", +// Labels: map[string]string{ +// common.LabelSparkRole: common.SparkRoleDriver, +// common.LabelLaunchedBySparkOperator: "true", +// }, +// }, +// Spec: corev1.PodSpec{ +// Containers: []corev1.Container{ +// { +// Name: common.SparkDriverContainerName, +// Image: "spark-driver:latest", +// }, +// }, +// }, +// } + +// modifiedPod, err := getModifiedPod(pod, app) +// if err != nil { +// t.Fatal(err) +// } + +// expectedConfigMapName := GetPrometheusConfigMapName(app) +// expectedVolumeName := expectedConfigMapName + "-vol" +// expectedContainerPort := *app.Spec.Monitoring.Prometheus.Port +// expectedContainerPortName := *app.Spec.Monitoring.Prometheus.PortName +// assert.Len(t, modifiedPod.Spec.Volumes, 1) +// assert.Equal(t, expectedVolumeName, modifiedPod.Spec.Volumes[0].Name) +// assert.NotNil(t, modifiedPod.Spec.Volumes[0].ConfigMap) +// assert.Equal(t, expectedConfigMapName, modifiedPod.Spec.Volumes[0].ConfigMap.Name) +// assert.Len(t, modifiedPod.Spec.Containers[0].VolumeMounts, 1) +// assert.Equal(t, expectedVolumeName, modifiedPod.Spec.Containers[0].VolumeMounts[0].Name) +// assert.Equal(t, common.PrometheusConfigMapMountPath, modifiedPod.Spec.Containers[0].VolumeMounts[0].MountPath) +// assert.Equal(t, expectedContainerPort, modifiedPod.Spec.Containers[0].Ports[0].ContainerPort) +// assert.Equal(t, expectedContainerPortName, modifiedPod.Spec.Containers[0].Ports[0].Name) +// assert.Equal(t, corev1.Protocol(common.DefaultPrometheusPortProtocol), modifiedPod.Spec.Containers[0].Ports[0].Protocol) +// } func TestPatchSparkPod_Tolerations(t *testing.T) { app := &v1beta2.SparkApplication{ @@ -591,14 +587,14 @@ func TestPatchSparkPod_Tolerations(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -610,7 +606,7 @@ func TestPatchSparkPod_Tolerations(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedPod.Spec.Tolerations)) + assert.Len(t, modifiedPod.Spec.Tolerations, 2) assert.Equal(t, app.Spec.Driver.Tolerations[0], modifiedPod.Spec.Tolerations[0]) assert.Equal(t, app.Spec.Driver.Tolerations[1], modifiedPod.Spec.Tolerations[1]) } @@ -655,14 +651,14 @@ func TestPatchSparkPod_SecurityContext(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -680,14 +676,14 @@ func TestPatchSparkPod_SecurityContext(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -727,15 +723,15 @@ func TestPatchSparkPod_SchedulerName(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ SchedulerName: defaultScheduler, Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -753,15 +749,15 @@ func TestPatchSparkPod_SchedulerName(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ SchedulerName: defaultScheduler, Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -801,14 +797,14 @@ func TestPatchSparkPod_PriorityClassName(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -822,20 +818,20 @@ func TestPatchSparkPod_PriorityClassName(t *testing.T) { //Driver priorityClassName should be populated when specified assert.Equal(t, priorityClassName, modifiedDriverPod.Spec.PriorityClassName) - var defaultPriority int32 = 0 + var defaultPriority int32 var defaultPolicy corev1.PreemptionPolicy = corev1.PreemptLowerPriority executorPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -896,14 +892,14 @@ func TestPatchSparkPod_Sidecars(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -914,7 +910,7 @@ func TestPatchSparkPod_Sidecars(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 3, len(modifiedDriverPod.Spec.Containers)) + assert.Len(t, modifiedDriverPod.Spec.Containers, 3) assert.Equal(t, "sidecar1", modifiedDriverPod.Spec.Containers[1].Name) assert.Equal(t, "sidecar2", modifiedDriverPod.Spec.Containers[2].Name) @@ -922,14 +918,14 @@ func TestPatchSparkPod_Sidecars(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -940,7 +936,7 @@ func TestPatchSparkPod_Sidecars(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 3, len(modifiedExecutorPod.Spec.Containers)) + assert.Len(t, modifiedExecutorPod.Spec.Containers, 3) assert.Equal(t, "sidecar1", modifiedExecutorPod.Spec.Containers[1].Name) assert.Equal(t, "sidecar2", modifiedExecutorPod.Spec.Containers[2].Name) } @@ -987,14 +983,14 @@ func TestPatchSparkPod_InitContainers(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1005,7 +1001,7 @@ func TestPatchSparkPod_InitContainers(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedDriverPod.Spec.InitContainers)) + assert.Len(t, modifiedDriverPod.Spec.InitContainers, 2) assert.Equal(t, "init-container1", modifiedDriverPod.Spec.InitContainers[0].Name) assert.Equal(t, "init-container2", modifiedDriverPod.Spec.InitContainers[1].Name) @@ -1013,14 +1009,14 @@ func TestPatchSparkPod_InitContainers(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1031,7 +1027,7 @@ func TestPatchSparkPod_InitContainers(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedExecutorPod.Spec.InitContainers)) + assert.Len(t, modifiedExecutorPod.Spec.InitContainers, 2) assert.Equal(t, "init-container1", modifiedExecutorPod.Spec.InitContainers[0].Name) assert.Equal(t, "init-container2", modifiedExecutorPod.Spec.InitContainers[1].Name) } @@ -1065,14 +1061,14 @@ func TestPatchSparkPod_DNSConfig(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1090,14 +1086,14 @@ func TestPatchSparkPod_DNSConfig(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1138,14 +1134,14 @@ func TestPatchSparkPod_NodeSector(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1156,7 +1152,7 @@ func TestPatchSparkPod_NodeSector(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedDriverPod.Spec.NodeSelector)) + assert.Len(t, modifiedDriverPod.Spec.NodeSelector, 2) assert.Equal(t, "ssd", modifiedDriverPod.Spec.NodeSelector["disk"]) assert.Equal(t, "secondvalue", modifiedDriverPod.Spec.NodeSelector["secondkey"]) @@ -1164,14 +1160,14 @@ func TestPatchSparkPod_NodeSector(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1182,7 +1178,7 @@ func TestPatchSparkPod_NodeSector(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedExecutorPod.Spec.NodeSelector)) + assert.Len(t, modifiedExecutorPod.Spec.NodeSelector, 2) assert.Equal(t, "gpu", modifiedExecutorPod.Spec.NodeSelector["nodeType"]) assert.Equal(t, "secondvalue", modifiedExecutorPod.Spec.NodeSelector["secondkey"]) } @@ -1328,14 +1324,14 @@ func TestPatchSparkPod_GPU(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1355,14 +1351,14 @@ func TestPatchSparkPod_GPU(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1411,14 +1407,14 @@ func TestPatchSparkPod_HostNetwork(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1430,23 +1426,23 @@ func TestPatchSparkPod_HostNetwork(t *testing.T) { t.Fatal(err) } if test == nil || *test == false { - assert.Equal(t, false, modifiedDriverPod.Spec.HostNetwork) + assert.False(t, modifiedDriverPod.Spec.HostNetwork) } else { - assert.Equal(t, true, modifiedDriverPod.Spec.HostNetwork) + assert.True(t, true, modifiedDriverPod.Spec.HostNetwork) assert.Equal(t, corev1.DNSClusterFirstWithHostNet, modifiedDriverPod.Spec.DNSPolicy) } executorPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1458,9 +1454,9 @@ func TestPatchSparkPod_HostNetwork(t *testing.T) { t.Fatal(err) } if test == nil || *test == false { - assert.Equal(t, false, modifiedExecutorPod.Spec.HostNetwork) + assert.False(t, modifiedExecutorPod.Spec.HostNetwork) } else { - assert.Equal(t, true, modifiedExecutorPod.Spec.HostNetwork) + assert.True(t, true, modifiedExecutorPod.Spec.HostNetwork) assert.Equal(t, corev1.DNSClusterFirstWithHostNet, modifiedExecutorPod.Spec.DNSPolicy) } } @@ -1505,14 +1501,14 @@ func TestPatchSparkPod_Env(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-driver:latest", }, }, @@ -1523,14 +1519,14 @@ func TestPatchSparkPod_Env(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1542,20 +1538,20 @@ func TestPatchSparkPod_Env(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 1, len(modifiedExecutorPod.Spec.Containers[0].Env)) + assert.Len(t, modifiedExecutorPod.Spec.Containers[0].Env, 1) assert.Equal(t, exeEnvKey, modifiedExecutorPod.Spec.Containers[0].Env[0].Name) assert.Equal(t, exeEnvVal, modifiedExecutorPod.Spec.Containers[0].Env[0].Value) - assert.True(t, modifiedExecutorPod.Spec.Containers[0].Env[0].ValueFrom == nil) + assert.Nil(t, modifiedExecutorPod.Spec.Containers[0].Env[0].ValueFrom) modifiedDriverPod, err := getModifiedPod(driverPod, app) if err != nil { t.Fatal(err) } - assert.Equal(t, 1, len(modifiedDriverPod.Spec.Containers[0].Env)) + assert.Len(t, modifiedDriverPod.Spec.Containers[0].Env, 1) assert.Equal(t, drvEnvKey, modifiedDriverPod.Spec.Containers[0].Env[0].Name) assert.Equal(t, drvEnvVal, modifiedDriverPod.Spec.Containers[0].Env[0].Value) - assert.True(t, modifiedDriverPod.Spec.Containers[0].Env[0].ValueFrom == nil) + assert.Nil(t, modifiedDriverPod.Spec.Containers[0].Env[0].ValueFrom) } func TestPatchSparkPod_EnvFrom(t *testing.T) { @@ -1615,14 +1611,14 @@ func TestPatchSparkPod_EnvFrom(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1633,14 +1629,14 @@ func TestPatchSparkPod_EnvFrom(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-driver:latest", }, }, @@ -1651,7 +1647,7 @@ func TestPatchSparkPod_EnvFrom(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedDriverPod.Spec.Containers[0].EnvFrom)) + assert.Len(t, modifiedDriverPod.Spec.Containers[0].EnvFrom, 2) assert.Equal(t, configMapName, modifiedDriverPod.Spec.Containers[0].EnvFrom[0].ConfigMapRef.Name) assert.Equal(t, secretName, modifiedDriverPod.Spec.Containers[0].EnvFrom[1].SecretRef.Name) @@ -1659,7 +1655,7 @@ func TestPatchSparkPod_EnvFrom(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedExecutorPod.Spec.Containers[0].EnvFrom)) + assert.Len(t, modifiedExecutorPod.Spec.Containers[0].EnvFrom, 2) assert.Equal(t, configMapName, modifiedExecutorPod.Spec.Containers[0].EnvFrom[0].ConfigMapRef.Name) assert.Equal(t, secretName, modifiedExecutorPod.Spec.Containers[0].EnvFrom[1].SecretRef.Name) } @@ -1694,14 +1690,14 @@ func TestPatchSparkPod_GracePeriodSeconds(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1713,7 +1709,7 @@ func TestPatchSparkPod_GracePeriodSeconds(t *testing.T) { t.Fatal(err) } if test == nil { - assert.True(t, modifiedDriverPod.Spec.TerminationGracePeriodSeconds == nil) + assert.Nil(t, modifiedDriverPod.Spec.TerminationGracePeriodSeconds) } else { assert.Equal(t, int64(60), *modifiedDriverPod.Spec.TerminationGracePeriodSeconds) } @@ -1722,14 +1718,14 @@ func TestPatchSparkPod_GracePeriodSeconds(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1741,7 +1737,7 @@ func TestPatchSparkPod_GracePeriodSeconds(t *testing.T) { t.Fatal(err) } if test == nil { - assert.True(t, modifiedDriverPod.Spec.TerminationGracePeriodSeconds == nil) + assert.Nil(t, modifiedDriverPod.Spec.TerminationGracePeriodSeconds) } else { assert.Equal(t, int64(60), *modifiedExecPod.Spec.TerminationGracePeriodSeconds) } @@ -1778,14 +1774,14 @@ func TestPatchSparkPod_Lifecycle(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1796,14 +1792,14 @@ func TestPatchSparkPod_Lifecycle(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1822,31 +1818,12 @@ func TestPatchSparkPod_Lifecycle(t *testing.T) { assert.Equal(t, postStartTest, modifiedExecutorPod.Spec.Containers[0].Lifecycle.PostStart.Exec) } -func getModifiedPod(pod *corev1.Pod, app *v1beta2.SparkApplication) (*corev1.Pod, error) { - patchOps := patchSparkPod(pod.DeepCopy(), app) - patchBytes, err := json.Marshal(patchOps) - if err != nil { - return nil, err - } - patch, err := jsonpatch.DecodePatch(patchBytes) - if err != nil { - return nil, err - } - - original, err := json.Marshal(pod) - if err != nil { - return nil, err - } - modified, err := patch.Apply(original) - if err != nil { - return nil, err - } - modifiedPod := &corev1.Pod{} - if err := json.Unmarshal(modified, modifiedPod); err != nil { +func getModifiedPod(old *corev1.Pod, app *v1beta2.SparkApplication) (*corev1.Pod, error) { + newPod := old.DeepCopy() + if err := mutateSparkPod(newPod, app); err != nil { return nil, err } - - return modifiedPod, nil + return newPod, nil } func TestPatchSparkPod_HostAliases(t *testing.T) { @@ -1899,14 +1876,14 @@ func TestPatchSparkPod_HostAliases(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1917,7 +1894,7 @@ func TestPatchSparkPod_HostAliases(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 4, len(modifiedDriverPod.Spec.HostAliases)) + assert.Len(t, modifiedDriverPod.Spec.HostAliases, 4) assert.Equal(t, "127.0.0.1", modifiedDriverPod.Spec.HostAliases[0].IP) assert.Equal(t, "192.168.0.1", modifiedDriverPod.Spec.HostAliases[1].IP) assert.Equal(t, "192.168.0.2", modifiedDriverPod.Spec.HostAliases[2].IP) @@ -1927,14 +1904,14 @@ func TestPatchSparkPod_HostAliases(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -1945,7 +1922,7 @@ func TestPatchSparkPod_HostAliases(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedExecutorPod.Spec.HostAliases)) + assert.Len(t, modifiedExecutorPod.Spec.HostAliases, 2) assert.Equal(t, "127.0.0.1", modifiedExecutorPod.Spec.HostAliases[0].IP) assert.Equal(t, "192.168.0.1", modifiedExecutorPod.Spec.HostAliases[1].IP) } @@ -1976,14 +1953,14 @@ func TestPatchSparkPod_Ports(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -1995,7 +1972,7 @@ func TestPatchSparkPod_Ports(t *testing.T) { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedDriverPod.Spec.Containers[0].Ports)) + assert.Len(t, modifiedDriverPod.Spec.Containers[0].Ports, 2) assert.Equal(t, "driverPort1", modifiedDriverPod.Spec.Containers[0].Ports[0].Name) assert.Equal(t, "driverPort2", modifiedDriverPod.Spec.Containers[0].Ports[1].Name) assert.Equal(t, int32(8080), modifiedDriverPod.Spec.Containers[0].Ports[0].ContainerPort) @@ -2005,14 +1982,14 @@ func TestPatchSparkPod_Ports(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -2023,7 +2000,7 @@ func TestPatchSparkPod_Ports(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(t, 2, len(modifiedExecutorPod.Spec.Containers[0].Ports)) + assert.Len(t, modifiedExecutorPod.Spec.Containers[0].Ports, 2) assert.Equal(t, "executorPort1", modifiedExecutorPod.Spec.Containers[0].Ports[0].Name) assert.Equal(t, "executorPort2", modifiedExecutorPod.Spec.Containers[0].Ports[1].Name) assert.Equal(t, int32(8082), modifiedExecutorPod.Spec.Containers[0].Ports[0].ContainerPort) @@ -2046,8 +2023,8 @@ func TestPatchSparkPod_ShareProcessNamespace(t *testing.T) { }, } - var shareProcessNamespaceTrue = true - var shareProcessNamespaceFalse = false + shareProcessNamespaceTrue := true + shareProcessNamespaceFalse := false tests := []*bool{ nil, &shareProcessNamespaceTrue, @@ -2055,21 +2032,20 @@ func TestPatchSparkPod_ShareProcessNamespace(t *testing.T) { } for _, test := range tests { - app.Spec.Driver.ShareProcessNamespace = test app.Spec.Executor.ShareProcessNamespace = test driverPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "spark-driver", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleDriver, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkDriverContainerName, + Name: common.SparkDriverContainerName, Image: "spark-driver:latest", }, }, @@ -2080,14 +2056,14 @@ func TestPatchSparkPod_ShareProcessNamespace(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "spark-executor", Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.LaunchedBySparkOperatorLabel: "true", + common.LabelSparkRole: common.SparkRoleExecutor, + common.LabelLaunchedBySparkOperator: "true", }, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { - Name: config.SparkExecutorContainerName, + Name: common.SparkExecutorContainerName, Image: "spark-executor:latest", }, }, @@ -2108,8 +2084,8 @@ func TestPatchSparkPod_ShareProcessNamespace(t *testing.T) { assert.Nil(t, modifiedDriverPod.Spec.ShareProcessNamespace) assert.Nil(t, modifiedExecutorPod.Spec.ShareProcessNamespace) } else { - assert.Equal(t, true, *modifiedDriverPod.Spec.ShareProcessNamespace) - assert.Equal(t, true, *modifiedExecutorPod.Spec.ShareProcessNamespace) + assert.True(t, *modifiedDriverPod.Spec.ShareProcessNamespace) + assert.True(t, *modifiedExecutorPod.Spec.ShareProcessNamespace) } } } diff --git a/internal/webhook/suite_test.go b/internal/webhook/suite_test.go new file mode 100644 index 0000000000..4ce5dc1fee --- /dev/null +++ b/internal/webhook/suite_test.go @@ -0,0 +1,150 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook_test + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "path/filepath" + "runtime" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + admissionv1 "k8s.io/api/admission/v1" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment +var ctx context.Context +var cancel context.CancelFunc + +func TestWebhooks(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Webhook Suite") +} + +var _ = BeforeSuite(func() { + log.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: false, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "..", "bin", "k8s", + fmt.Sprintf("1.29.3-%s-%s", runtime.GOOS, runtime.GOARCH)), + + WebhookInstallOptions: envtest.WebhookInstallOptions{ + Paths: []string{filepath.Join("..", "..", "config", "webhook")}, + }, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = v1beta2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = v1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + err = admissionv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // start webhook server using Manager + webhookInstallOptions := &testEnv.WebhookInstallOptions + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + WebhookServer: webhook.NewServer(webhook.Options{ + Host: webhookInstallOptions.LocalServingHost, + Port: webhookInstallOptions.LocalServingPort, + CertDir: webhookInstallOptions.LocalServingCertDir, + }), + LeaderElection: false, + Metrics: metricsserver.Options{BindAddress: "0"}, + }) + Expect(err).NotTo(HaveOccurred()) + + // err = (&v1beta2.SparkApplication{}).SetupWebhookWithManager(mgr) + // Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:webhook + + go func() { + defer GinkgoRecover() + err = mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + // wait for the webhook server to get ready + dialer := &net.Dialer{Timeout: time.Second} + addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) + Eventually(func() error { + conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) + if err != nil { + return err + } + return conn.Close() + }).Should(Succeed()) + +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/internal/webhook/webhook.go b/internal/webhook/webhook.go new file mode 100644 index 0000000000..8a2088f7a6 --- /dev/null +++ b/internal/webhook/webhook.go @@ -0,0 +1,37 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + ctrl "sigs.k8s.io/controller-runtime" +) + +var ( + logger = ctrl.Log.WithName("") +) + +type Options struct { + SparkJobNamespaces []string + WebhookName string + WebhookPort int + WebhookSecretName string + WebhookSecretNamespace string + WebhookServiceName string + WebhookServiceNamespace string + WebhookMetricsBindAddress string + EnableResourceQuotaEnforcement bool +} diff --git a/main.go b/main.go deleted file mode 100644 index 58f6505b74..0000000000 --- a/main.go +++ /dev/null @@ -1,312 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "context" - "flag" - "fmt" - "os" - "os/signal" - "strings" - "syscall" - "time" - - "github.com/golang/glog" - apiv1 "k8s.io/api/core/v1" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/leaderelection" - "k8s.io/client-go/tools/leaderelection/resourcelock" - "k8s.io/client-go/tools/record" - "k8s.io/utils/clock" - - "github.com/kubeflow/spark-operator/pkg/batchscheduler" - crclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" - crinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - operatorConfig "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/controller/scheduledsparkapplication" - "github.com/kubeflow/spark-operator/pkg/controller/sparkapplication" - "github.com/kubeflow/spark-operator/pkg/util" - "github.com/kubeflow/spark-operator/pkg/webhook" -) - -var ( - master = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") - kubeConfig = flag.String("kubeConfig", "", "Path to a kube config. Only required if out-of-cluster.") - controllerThreads = flag.Int("controller-threads", 10, "Number of worker threads used by the SparkApplication controller.") - resyncInterval = flag.Int("resync-interval", 30, "Informer resync interval in seconds.") - namespace = flag.String("namespace", apiv1.NamespaceAll, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.") - labelSelectorFilter = flag.String("label-selector-filter", "", "A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels.") - enableWebhook = flag.Bool("enable-webhook", false, "Whether to enable the mutating admission webhook for admitting and patching Spark pods.") - webhookTimeout = flag.Int("webhook-timeout", 30, "Webhook Timeout in seconds before the webhook returns a timeout") - enableResourceQuotaEnforcement = flag.Bool("enable-resource-quota-enforcement", false, "Whether to enable ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled.") - ingressURLFormat = flag.String("ingress-url-format", "", "Ingress URL format.") - enableUIService = flag.Bool("enable-ui-service", true, "Enable Spark service UI.") - enableLeaderElection = flag.Bool("leader-election", false, "Enable Spark operator leader election.") - leaderElectionLockNamespace = flag.String("leader-election-lock-namespace", "spark-operator", "Namespace in which to create the ConfigMap for leader election.") - leaderElectionLockName = flag.String("leader-election-lock-name", "spark-operator-lock", "Name of the ConfigMap for leader election.") - leaderElectionLeaseDuration = flag.Duration("leader-election-lease-duration", 15*time.Second, "Leader election lease duration.") - leaderElectionRenewDeadline = flag.Duration("leader-election-renew-deadline", 14*time.Second, "Leader election renew deadline.") - leaderElectionRetryPeriod = flag.Duration("leader-election-retry-period", 4*time.Second, "Leader election retry period.") - enableBatchScheduler = flag.Bool("enable-batch-scheduler", false, fmt.Sprintf("Enable batch schedulers for pods' scheduling, the available batch schedulers are: (%s).", strings.Join(batchscheduler.GetRegisteredNames(), ","))) - enableMetrics = flag.Bool("enable-metrics", false, "Whether to enable the metrics endpoint.") - metricsPort = flag.String("metrics-port", "10254", "Port for the metrics endpoint.") - metricsEndpoint = flag.String("metrics-endpoint", "/metrics", "Metrics endpoint.") - metricsPrefix = flag.String("metrics-prefix", "", "Prefix for the metrics.") - ingressClassName = flag.String("ingress-class-name", "", "Set ingressClassName for ingress resources created.") - metricsLabels util.ArrayFlags - metricsJobStartLatencyBuckets util.HistogramBuckets = util.DefaultJobStartLatencyBuckets -) - -func main() { - flag.Var(&metricsLabels, "metrics-labels", "Labels for the metrics") - flag.Var(&metricsJobStartLatencyBuckets, "metrics-job-start-latency-buckets", - "Comma-separated boundary values (in seconds) for the job start latency histogram bucket; "+ - "it accepts any numerical values that can be parsed into a 64-bit floating point") - flag.Parse() - - // Create the client config. Use kubeConfig if given, otherwise assume in-cluster. - config, err := buildConfig(*master, *kubeConfig) - if err != nil { - glog.Fatal(err) - } - kubeClient, err := clientset.NewForConfig(config) - if err != nil { - glog.Fatal(err) - } - - signalCh := make(chan os.Signal, 1) - signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM) - - stopCh := make(chan struct{}, 1) - startCh := make(chan struct{}, 1) - - if *enableLeaderElection { - podName := os.Getenv("POD_NAME") - hostname, err := os.Hostname() - if err != nil { - glog.Fatal(err) - } - broadcaster := record.NewBroadcaster() - source := apiv1.EventSource{Component: "spark-operator-leader-elector", Host: hostname} - recorder := broadcaster.NewRecorder(scheme.Scheme, source) - resourceLock := &resourcelock.LeaseLock{ - LeaseMeta: metav1.ObjectMeta{ - Namespace: *leaderElectionLockNamespace, - Name: *leaderElectionLockName, - }, - Client: kubeClient.CoordinationV1(), - LockConfig: resourcelock.ResourceLockConfig{ - Identity: podName, - EventRecorder: recorder, - }, - } - if err != nil { - glog.Fatal(err) - } - - electionCfg := leaderelection.LeaderElectionConfig{ - Lock: resourceLock, - LeaseDuration: *leaderElectionLeaseDuration, - RenewDeadline: *leaderElectionRenewDeadline, - RetryPeriod: *leaderElectionRetryPeriod, - Callbacks: leaderelection.LeaderCallbacks{ - OnStartedLeading: func(c context.Context) { - close(startCh) - }, - OnStoppedLeading: func() { - close(stopCh) - }, - }, - } - - elector, err := leaderelection.NewLeaderElector(electionCfg) - if err != nil { - glog.Fatal(err) - } - - go elector.Run(context.Background()) - } - - glog.Info("Starting the Spark Operator") - - crClient, err := crclientset.NewForConfig(config) - if err != nil { - glog.Fatal(err) - } - apiExtensionsClient, err := apiextensionsclient.NewForConfig(config) - if err != nil { - glog.Fatal(err) - } - - if err = util.InitializeIngressCapabilities(kubeClient); err != nil { - glog.Fatalf("Error retrieving Kubernetes cluster capabilities: %s", err.Error()) - } - - var batchSchedulerMgr *batchscheduler.SchedulerManager - if *enableBatchScheduler { - if !*enableWebhook { - glog.Fatal( - "failed to initialize the batch scheduler manager as it requires the webhook to be enabled") - } - batchSchedulerMgr = batchscheduler.NewSchedulerManager(config) - } - - crInformerFactory := buildCustomResourceInformerFactory(crClient) - podInformerFactory := buildPodInformerFactory(kubeClient) - - var metricConfig *util.MetricConfig - if *enableMetrics { - metricConfig = &util.MetricConfig{ - MetricsEndpoint: *metricsEndpoint, - MetricsPort: *metricsPort, - MetricsPrefix: *metricsPrefix, - MetricsLabels: metricsLabels, - MetricsJobStartLatencyBuckets: metricsJobStartLatencyBuckets, - } - - glog.Info("Enabling metrics collecting and exporting to Prometheus") - util.InitializeMetrics(metricConfig) - } - - applicationController := sparkapplication.NewController( - crClient, kubeClient, crInformerFactory, podInformerFactory, metricConfig, *namespace, *ingressURLFormat, *ingressClassName, batchSchedulerMgr, *enableUIService) - scheduledApplicationController := scheduledsparkapplication.NewController( - crClient, kubeClient, apiExtensionsClient, crInformerFactory, clock.RealClock{}) - - // Start the informer factory that in turn starts the informer. - go crInformerFactory.Start(stopCh) - go podInformerFactory.Start(stopCh) - - var hook *webhook.WebHook - if *enableWebhook { - var coreV1InformerFactory informers.SharedInformerFactory - if *enableResourceQuotaEnforcement { - coreV1InformerFactory = buildCoreV1InformerFactory(kubeClient) - } - var err error - // Don't deregister webhook on exit if leader election enabled (i.e. multiple webhooks running) - hook, err = webhook.New(kubeClient, crInformerFactory, *namespace, !*enableLeaderElection, *enableResourceQuotaEnforcement, coreV1InformerFactory, webhookTimeout) - if err != nil { - glog.Fatal(err) - } - - if *enableResourceQuotaEnforcement { - go coreV1InformerFactory.Start(stopCh) - } - - if err = hook.Start(stopCh); err != nil { - glog.Fatal(err) - } - } else if *enableResourceQuotaEnforcement { - glog.Fatal("Webhook must be enabled to use resource quota enforcement.") - } - - if *enableLeaderElection { - glog.Info("Waiting to be elected leader before starting application controller goroutines") - select { - case <-signalCh: - os.Exit(0) - case <-startCh: - } - } - - glog.Info("Starting application controller goroutines") - - if err = applicationController.Start(*controllerThreads, stopCh); err != nil { - glog.Fatal(err) - } - if err = scheduledApplicationController.Start(*controllerThreads, stopCh); err != nil { - glog.Fatal(err) - } - - select { - case <-signalCh: - close(stopCh) - case <-stopCh: - } - - glog.Info("Shutting down the Spark Operator") - applicationController.Stop() - scheduledApplicationController.Stop() - if *enableWebhook { - if err := hook.Stop(); err != nil { - glog.Fatal(err) - } - } -} - -func buildConfig(masterURL string, kubeConfig string) (*rest.Config, error) { - if kubeConfig != "" { - return clientcmd.BuildConfigFromFlags(masterURL, kubeConfig) - } - return rest.InClusterConfig() -} - -func buildCustomResourceInformerFactory(crClient crclientset.Interface) crinformers.SharedInformerFactory { - var factoryOpts []crinformers.SharedInformerOption - if *namespace != apiv1.NamespaceAll { - factoryOpts = append(factoryOpts, crinformers.WithNamespace(*namespace)) - } - if len(*labelSelectorFilter) > 0 { - tweakListOptionsFunc := func(options *metav1.ListOptions) { - options.LabelSelector = *labelSelectorFilter - } - factoryOpts = append(factoryOpts, crinformers.WithTweakListOptions(tweakListOptionsFunc)) - } - return crinformers.NewSharedInformerFactoryWithOptions( - crClient, - // resyncPeriod. Every resyncPeriod, all resources in the cache will re-trigger events. - time.Duration(*resyncInterval)*time.Second, - factoryOpts...) -} - -func buildPodInformerFactory(kubeClient clientset.Interface) informers.SharedInformerFactory { - var podFactoryOpts []informers.SharedInformerOption - if *namespace != apiv1.NamespaceAll { - podFactoryOpts = append(podFactoryOpts, informers.WithNamespace(*namespace)) - } - tweakListOptionsFunc := func(options *metav1.ListOptions) { - options.LabelSelector = fmt.Sprintf("%s,%s", operatorConfig.SparkRoleLabel, operatorConfig.LaunchedBySparkOperatorLabel) - if len(*labelSelectorFilter) > 0 { - options.LabelSelector = options.LabelSelector + "," + *labelSelectorFilter - } - } - podFactoryOpts = append(podFactoryOpts, informers.WithTweakListOptions(tweakListOptionsFunc)) - return informers.NewSharedInformerFactoryWithOptions(kubeClient, time.Duration(*resyncInterval)*time.Second, podFactoryOpts...) -} - -func buildCoreV1InformerFactory(kubeClient clientset.Interface) informers.SharedInformerFactory { - var coreV1FactoryOpts []informers.SharedInformerOption - if *namespace != apiv1.NamespaceAll { - coreV1FactoryOpts = append(coreV1FactoryOpts, informers.WithNamespace(*namespace)) - } - if len(*labelSelectorFilter) > 0 { - tweakListOptionsFunc := func(options *metav1.ListOptions) { - options.LabelSelector = *labelSelectorFilter - } - coreV1FactoryOpts = append(coreV1FactoryOpts, informers.WithTweakListOptions(tweakListOptionsFunc)) - } - return informers.NewSharedInformerFactoryWithOptions(kubeClient, time.Duration(*resyncInterval)*time.Second, coreV1FactoryOpts...) -} diff --git a/manifest/spark-application-rbac/kustomization.yaml b/manifest/spark-application-rbac/kustomization.yaml deleted file mode 100644 index 1e4e490c93..0000000000 --- a/manifest/spark-application-rbac/kustomization.yaml +++ /dev/null @@ -1,23 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: spark - -resources: - - spark-application-rbac.yaml diff --git a/manifest/spark-application-rbac/spark-application-rbac.yaml b/manifest/spark-application-rbac/spark-application-rbac.yaml deleted file mode 100644 index 662f227d10..0000000000 --- a/manifest/spark-application-rbac/spark-application-rbac.yaml +++ /dev/null @@ -1,52 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -apiVersion: v1 -kind: Namespace -metadata: - name: spark ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: spark - namespace: spark ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - namespace: spark - name: spark-role -rules: -- apiGroups: [""] - resources: ["pods"] - verbs: ["*"] -- apiGroups: [""] - resources: ["services"] - verbs: ["*"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: spark-role-binding - namespace: spark -subjects: -- kind: ServiceAccount - name: spark - namespace: spark -roleRef: - kind: Role - name: spark-role - apiGroup: rbac.authorization.k8s.io diff --git a/manifest/spark-operator-install/kustomization.yaml b/manifest/spark-operator-install/kustomization.yaml deleted file mode 100644 index 1d102d262e..0000000000 --- a/manifest/spark-operator-install/kustomization.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: spark-operator - -resources: - - spark-operator-rbac.yaml - - ../crds - - spark-operator.yaml diff --git a/manifest/spark-operator-install/spark-operator-rbac.yaml b/manifest/spark-operator-install/spark-operator-rbac.yaml deleted file mode 100644 index 71a053b487..0000000000 --- a/manifest/spark-operator-install/spark-operator-rbac.yaml +++ /dev/null @@ -1,97 +0,0 @@ -# -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: v1 -kind: Namespace -metadata: - name: spark-operator ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: sparkoperator - namespace: spark-operator ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: sparkoperator -rules: -- apiGroups: [""] - resources: ["pods"] - verbs: ["*"] -- apiGroups: [""] - resources: ["configmaps"] - verbs: ["*"] -- apiGroups: [""] - resources: ["services", "secrets"] - verbs: ["create", "get", "delete"] -- apiGroups: ["extensions"] - resources: ["ingresses"] - verbs: ["create", "get", "delete"] -- apiGroups: [""] - resources: ["nodes"] - verbs: ["get"] -- apiGroups: [""] - resources: ["resourcequotas"] - verbs: ["get", "list", "watch"] -- apiGroups: [""] - resources: ["events"] - verbs: ["create", "update", "patch"] -- apiGroups: ["apiextensions.k8s.io"] - resources: ["customresourcedefinitions"] - verbs: ["create", "get", "update", "delete"] -- apiGroups: ["admissionregistration.k8s.io"] - resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] - verbs: ["create", "get", "update", "delete"] -- apiGroups: ["sparkoperator.k8s.io"] - resources: ["sparkapplications", "scheduledsparkapplications", "sparkapplications/status", "scheduledsparkapplications/status", "sparkapplications/finalizers", "scheduledsparkapplications/finalizers"] - verbs: ["*"] -- apiGroups: ["scheduling.volcano.sh"] - resources: ["podgroups", "queues", "queues/status"] - verbs: ["get", "list", "watch", "create", "delete", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: sparkoperator -subjects: - - kind: ServiceAccount - name: sparkoperator - namespace: spark-operator -roleRef: - kind: ClusterRole - name: sparkoperator - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - rbac.authorization.k8s.io/aggregate-to-admin: "true" - name: sparkoperator-aggregate-to-admin -rules: -- apiGroups: ["sparkoperator.k8s.io"] - resources: ["sparkapplications", "scheduledsparkapplications"] - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch diff --git a/manifest/spark-operator-install/spark-operator.yaml b/manifest/spark-operator-install/spark-operator.yaml deleted file mode 100644 index b4b31d1ad6..0000000000 --- a/manifest/spark-operator-install/spark-operator.yaml +++ /dev/null @@ -1,45 +0,0 @@ -# -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sparkoperator - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - strategy: - type: Recreate - template: - metadata: - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - spec: - serviceAccountName: sparkoperator - containers: - - name: sparkoperator - image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 - imagePullPolicy: Always - args: - - -logtostderr diff --git a/manifest/spark-operator-with-webhook-install/kustomization.yaml b/manifest/spark-operator-with-webhook-install/kustomization.yaml deleted file mode 100644 index ec3b237e7b..0000000000 --- a/manifest/spark-operator-with-webhook-install/kustomization.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: spark-operator - -resources: - - ../spark-operator-install - - spark-operator-webhook.yaml - -patchesStrategicMerge: - - spark-operator-patch.yaml diff --git a/manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml b/manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml deleted file mode 100644 index e752063c8e..0000000000 --- a/manifest/spark-operator-with-webhook-install/spark-operator-patch.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sparkoperator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - namespace: spark-operator -spec: - template: - spec: - volumes: - - name: webhook-certs - secret: - secretName: spark-webhook-certs - containers: - - name: sparkoperator - args: - - -logtostderr - - -enable-webhook=true - - -v=2 - volumeMounts: - - name: webhook-certs - mountPath: /etc/webhook-certs - diff --git a/manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml b/manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml deleted file mode 100644 index eaad8660d0..0000000000 --- a/manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml +++ /dev/null @@ -1,53 +0,0 @@ -# -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: batch/v1 -kind: Job -metadata: - name: sparkoperator-init - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 -spec: - backoffLimit: 3 - template: - metadata: - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 - spec: - serviceAccountName: sparkoperator - restartPolicy: Never - containers: - - name: main - image: gcr.io/spark-operator/spark-operator:v1beta2-1.3.0-3.1.1 - imagePullPolicy: IfNotPresent - command: ["/usr/bin/gencerts.sh", "-p"] ---- -kind: Service -apiVersion: v1 -metadata: - name: spark-webhook - namespace: spark-operator -spec: - ports: - - port: 443 - targetPort: 8080 - name: webhook - selector: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v1beta2-1.3.0-3.1.1 diff --git a/manifest/spark-operator-with-webhook-install/spark-operator-with-webhook.yaml b/manifest/spark-operator-with-webhook-install/spark-operator-with-webhook.yaml deleted file mode 100644 index 5e4f318055..0000000000 --- a/manifest/spark-operator-with-webhook-install/spark-operator-with-webhook.yaml +++ /dev/null @@ -1,96 +0,0 @@ -# -# Copyright 2017 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sparkoperator - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 - strategy: - type: Recreate - template: - metadata: - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 - initializers: - pending: [] - spec: - serviceAccountName: sparkoperator - volumes: - - name: webhook-certs - secret: - secretName: spark-webhook-certs - containers: - - name: sparkoperator - image: gcr.io/spark-operator/spark-operator:v2.4.0-v1beta1-latest - imagePullPolicy: Always - volumeMounts: - - name: webhook-certs - mountPath: /etc/webhook-certs - ports: - - containerPort: 8080 - args: - - -logtostderr - - -enable-webhook=true - - -v=2 ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: sparkoperator-init - namespace: spark-operator - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 -spec: - backoffLimit: 3 - template: - metadata: - labels: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 - spec: - serviceAccountName: sparkoperator - restartPolicy: Never - containers: - - name: main - image: gcr.io/spark-operator/spark-operator:v2.4.0-v1beta1-latest - imagePullPolicy: IfNotPresent - command: ["/usr/bin/gencerts.sh", "-p"] ---- -kind: Service -apiVersion: v1 -metadata: - name: spark-webhook - namespace: spark-operator -spec: - ports: - - port: 443 - targetPort: 8080 - name: webhook - selector: - app.kubernetes.io/name: sparkoperator - app.kubernetes.io/version: v2.4.0-v1beta1 diff --git a/pkg/batchscheduler/scheduler_manager.go b/pkg/batchscheduler/scheduler_manager.go deleted file mode 100644 index 41ff744b0e..0000000000 --- a/pkg/batchscheduler/scheduler_manager.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright 2019 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package batchscheduler - -import ( - "fmt" - "sync" - - "k8s.io/client-go/rest" - - "github.com/kubeflow/spark-operator/pkg/batchscheduler/interface" - "github.com/kubeflow/spark-operator/pkg/batchscheduler/volcano" -) - -type schedulerInitializeFunc func(config *rest.Config) (schedulerinterface.BatchScheduler, error) - -var schedulerContainers = map[string]schedulerInitializeFunc{ - volcano.GetPluginName(): volcano.New, -} - -func GetRegisteredNames() []string { - var pluginNames []string - for key := range schedulerContainers { - pluginNames = append(pluginNames, key) - } - return pluginNames -} - -type SchedulerManager struct { - sync.Mutex - config *rest.Config - plugins map[string]schedulerinterface.BatchScheduler -} - -func NewSchedulerManager(config *rest.Config) *SchedulerManager { - manager := SchedulerManager{ - config: config, - plugins: make(map[string]schedulerinterface.BatchScheduler), - } - return &manager -} - -func (batch *SchedulerManager) GetScheduler(schedulerName string) (schedulerinterface.BatchScheduler, error) { - iniFunc, registered := schedulerContainers[schedulerName] - if !registered { - return nil, fmt.Errorf("unregistered scheduler plugin %s", schedulerName) - } - - batch.Lock() - defer batch.Unlock() - - if plugin, existed := batch.plugins[schedulerName]; existed && plugin != nil { - return plugin, nil - } else if existed && plugin == nil { - return nil, fmt.Errorf( - "failed to get scheduler plugin %s, previous initialization has failed", schedulerName) - } else { - if plugin, err := iniFunc(batch.config); err != nil { - batch.plugins[schedulerName] = nil - return nil, err - } else { - batch.plugins[schedulerName] = plugin - return plugin, nil - } - } -} diff --git a/pkg/batchscheduler/volcano/volcano_scheduler.go b/pkg/batchscheduler/volcano/volcano_scheduler.go deleted file mode 100644 index a232784c7c..0000000000 --- a/pkg/batchscheduler/volcano/volcano_scheduler.go +++ /dev/null @@ -1,307 +0,0 @@ -/* -Copyright 2019 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package volcano - -import ( - "context" - "fmt" - - corev1 "k8s.io/api/core/v1" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" - - "volcano.sh/apis/pkg/apis/scheduling/v1beta1" - volcanoclient "volcano.sh/apis/pkg/client/clientset/versioned" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - schedulerinterface "github.com/kubeflow/spark-operator/pkg/batchscheduler/interface" -) - -const ( - PodGroupName = "podgroups.scheduling.volcano.sh" -) - -type VolcanoBatchScheduler struct { - extensionClient apiextensionsclient.Interface - volcanoClient volcanoclient.Interface -} - -func GetPluginName() string { - return "volcano" -} - -func (v *VolcanoBatchScheduler) Name() string { - return GetPluginName() -} - -func (v *VolcanoBatchScheduler) ShouldSchedule(app *v1beta2.SparkApplication) bool { - //NOTE: There is no additional requirement for volcano scheduler - return true -} - -func (v *VolcanoBatchScheduler) DoBatchSchedulingOnSubmission(app *v1beta2.SparkApplication) error { - if app.Spec.Executor.Annotations == nil { - app.Spec.Executor.Annotations = make(map[string]string) - } - - if app.Spec.Driver.Annotations == nil { - app.Spec.Driver.Annotations = make(map[string]string) - } - - if app.Spec.Mode == v1beta2.ClientMode { - return v.syncPodGroupInClientMode(app) - } else if app.Spec.Mode == v1beta2.ClusterMode { - return v.syncPodGroupInClusterMode(app) - } - return nil -} - -func (v *VolcanoBatchScheduler) syncPodGroupInClientMode(app *v1beta2.SparkApplication) error { - // We only care about the executor pods in client mode - if _, ok := app.Spec.Executor.Annotations[v1beta1.KubeGroupNameAnnotationKey]; !ok { - totalResource := getExecutorRequestResource(app) - - if app.Spec.BatchSchedulerOptions != nil && len(app.Spec.BatchSchedulerOptions.Resources) > 0 { - totalResource = app.Spec.BatchSchedulerOptions.Resources - } - if err := v.syncPodGroup(app, 1, totalResource); err == nil { - app.Spec.Executor.Annotations[v1beta1.KubeGroupNameAnnotationKey] = v.getAppPodGroupName(app) - } else { - return err - } - } - return nil -} - -func (v *VolcanoBatchScheduler) syncPodGroupInClusterMode(app *v1beta2.SparkApplication) error { - //We need both mark Driver and Executor when submitting - //NOTE: In cluster mode, the initial size of PodGroup is set to 1 in order to schedule driver pod first. - if _, ok := app.Spec.Driver.Annotations[v1beta1.KubeGroupNameAnnotationKey]; !ok { - //Both driver and executor resource will be considered. - totalResource := sumResourceList([]corev1.ResourceList{getExecutorRequestResource(app), getDriverRequestResource(app)}) - - if app.Spec.BatchSchedulerOptions != nil && len(app.Spec.BatchSchedulerOptions.Resources) > 0 { - totalResource = app.Spec.BatchSchedulerOptions.Resources - } - if err := v.syncPodGroup(app, 1, totalResource); err == nil { - app.Spec.Executor.Annotations[v1beta1.KubeGroupNameAnnotationKey] = v.getAppPodGroupName(app) - app.Spec.Driver.Annotations[v1beta1.KubeGroupNameAnnotationKey] = v.getAppPodGroupName(app) - } else { - return err - } - } - return nil -} - -func (v *VolcanoBatchScheduler) getAppPodGroupName(app *v1beta2.SparkApplication) string { - return fmt.Sprintf("spark-%s-pg", app.Name) -} - -func (v *VolcanoBatchScheduler) syncPodGroup(app *v1beta2.SparkApplication, size int32, minResource corev1.ResourceList) error { - var ( - err error - pg *v1beta1.PodGroup - ) - podGroupName := v.getAppPodGroupName(app) - if pg, err = v.volcanoClient.SchedulingV1beta1().PodGroups(app.Namespace).Get(context.TODO(), podGroupName, metav1.GetOptions{}); err != nil { - if !errors.IsNotFound(err) { - return err - } - podGroup := v1beta1.PodGroup{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: app.Namespace, - Name: podGroupName, - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(app, v1beta2.SchemeGroupVersion.WithKind("SparkApplication")), - }, - }, - Spec: v1beta1.PodGroupSpec{ - MinMember: size, - MinResources: &minResource, - }, - Status: v1beta1.PodGroupStatus{ - Phase: v1beta1.PodGroupPending, - }, - } - - if app.Spec.BatchSchedulerOptions != nil { - //Update pod group queue if it's specified in Spark Application - if app.Spec.BatchSchedulerOptions.Queue != nil { - podGroup.Spec.Queue = *app.Spec.BatchSchedulerOptions.Queue - } - //Update pod group priorityClassName if it's specified in Spark Application - if app.Spec.BatchSchedulerOptions.PriorityClassName != nil { - podGroup.Spec.PriorityClassName = *app.Spec.BatchSchedulerOptions.PriorityClassName - } - } - _, err = v.volcanoClient.SchedulingV1beta1().PodGroups(app.Namespace).Create(context.TODO(), &podGroup, metav1.CreateOptions{}) - } else { - if pg.Spec.MinMember != size { - pg.Spec.MinMember = size - _, err = v.volcanoClient.SchedulingV1beta1().PodGroups(app.Namespace).Update(context.TODO(), pg, metav1.UpdateOptions{}) - } - } - if err != nil { - return fmt.Errorf("failed to sync PodGroup with error: %s. Abandon schedule pods via volcano", err) - } - return nil -} - -func (v *VolcanoBatchScheduler) CleanupOnCompletion(app *v1beta2.SparkApplication) error { - podGroupName := v.getAppPodGroupName(app) - err := v.volcanoClient.SchedulingV1beta1().PodGroups(app.Namespace).Delete(context.TODO(), podGroupName, metav1.DeleteOptions{}) - if err != nil && !errors.IsNotFound(err) { - return err - } - return nil -} - -func New(config *rest.Config) (schedulerinterface.BatchScheduler, error) { - vkClient, err := volcanoclient.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("failed to initialize volcano client with error %v", err) - } - extClient, err := apiextensionsclient.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("failed to initialize k8s extension client with error %v", err) - } - - if _, err := extClient.ApiextensionsV1().CustomResourceDefinitions().Get( - context.TODO(), - PodGroupName, - metav1.GetOptions{}, - ); err != nil { - //For backward compatibility check v1beta1 API version of CustomResourceDefinitions - if _, err := extClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get( - context.TODO(), - PodGroupName, - metav1.GetOptions{}, - ); err != nil { - return nil, fmt.Errorf("podGroup CRD is required to exists in current cluster error: %s", err) - } - } - return &VolcanoBatchScheduler{ - extensionClient: extClient, - volcanoClient: vkClient, - }, nil -} - -func getExecutorRequestResource(app *v1beta2.SparkApplication) corev1.ResourceList { - minResource := corev1.ResourceList{} - - //CoreRequest correspond to executor's core request - if app.Spec.Executor.CoreRequest != nil { - if value, err := resource.ParseQuantity(*app.Spec.Executor.CoreRequest); err == nil { - minResource[corev1.ResourceCPU] = value - } - } - - //Use Core attribute if CoreRequest is empty - if app.Spec.Executor.Cores != nil { - if _, ok := minResource[corev1.ResourceCPU]; !ok { - if value, err := resource.ParseQuantity(fmt.Sprintf("%d", *app.Spec.Executor.Cores)); err == nil { - minResource[corev1.ResourceCPU] = value - } - } - } - - //CoreLimit correspond to executor's core limit, this attribute will be used only when core request is empty. - if app.Spec.Executor.CoreLimit != nil { - if _, ok := minResource[corev1.ResourceCPU]; !ok { - if value, err := resource.ParseQuantity(*app.Spec.Executor.CoreLimit); err == nil { - minResource[corev1.ResourceCPU] = value - } - } - } - - //Memory + MemoryOverhead correspond to executor's memory request - if app.Spec.Executor.Memory != nil { - if value, err := resource.ParseQuantity(*app.Spec.Executor.Memory); err == nil { - minResource[corev1.ResourceMemory] = value - } - } - if app.Spec.Executor.MemoryOverhead != nil { - if value, err := resource.ParseQuantity(*app.Spec.Executor.MemoryOverhead); err == nil { - if existing, ok := minResource[corev1.ResourceMemory]; ok { - existing.Add(value) - minResource[corev1.ResourceMemory] = existing - } - } - } - - resourceList := []corev1.ResourceList{{}} - for i := int32(0); i < *app.Spec.Executor.Instances; i++ { - resourceList = append(resourceList, minResource) - } - return sumResourceList(resourceList) -} - -func getDriverRequestResource(app *v1beta2.SparkApplication) corev1.ResourceList { - minResource := corev1.ResourceList{} - - //Cores correspond to driver's core request - if app.Spec.Driver.Cores != nil { - if value, err := resource.ParseQuantity(fmt.Sprintf("%d", *app.Spec.Driver.Cores)); err == nil { - minResource[corev1.ResourceCPU] = value - } - } - - //CoreLimit correspond to driver's core limit, this attribute will be used only when core request is empty. - if app.Spec.Driver.CoreLimit != nil { - if _, ok := minResource[corev1.ResourceCPU]; !ok { - if value, err := resource.ParseQuantity(*app.Spec.Driver.CoreLimit); err == nil { - minResource[corev1.ResourceCPU] = value - } - } - } - - //Memory + MemoryOverhead correspond to driver's memory request - if app.Spec.Driver.Memory != nil { - if value, err := resource.ParseQuantity(*app.Spec.Driver.Memory); err == nil { - minResource[corev1.ResourceMemory] = value - } - } - if app.Spec.Driver.MemoryOverhead != nil { - if value, err := resource.ParseQuantity(*app.Spec.Driver.MemoryOverhead); err == nil { - if existing, ok := minResource[corev1.ResourceMemory]; ok { - existing.Add(value) - minResource[corev1.ResourceMemory] = existing - } - } - } - - return minResource -} - -func sumResourceList(list []corev1.ResourceList) corev1.ResourceList { - totalResource := corev1.ResourceList{} - for _, l := range list { - for name, quantity := range l { - - if value, ok := totalResource[name]; !ok { - totalResource[name] = quantity.DeepCopy() - } else { - value.Add(quantity) - totalResource[name] = value - } - } - } - return totalResource -} diff --git a/pkg/certificate/certificate.go b/pkg/certificate/certificate.go new file mode 100644 index 0000000000..322faeb1ee --- /dev/null +++ b/pkg/certificate/certificate.go @@ -0,0 +1,307 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate + +import ( + "context" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "net" + "os" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/cert" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/kubeflow/spark-operator/pkg/common" +) + +const ( + Organization = "spark-operator" +) + +// Provider is a container of a X509 certificate file and a corresponding key file for the +// webhook server, and a CA certificate file for the API server to verify the server certificate. +type Provider struct { + client client.Client + commonName string + caKey *rsa.PrivateKey + caCert *x509.Certificate + serverKey *rsa.PrivateKey + serverCert *x509.Certificate +} + +// NewProvider creates a new Provider instance. +func NewProvider(client client.Client, name, namespace string) *Provider { + commonName := fmt.Sprintf("%s.%s.svc", name, namespace) + certProvider := Provider{ + client: client, + commonName: commonName, + } + return &certProvider +} + +// SyncSecret syncs the secret containing the certificates to the given name and namespace. +func (cp *Provider) SyncSecret(ctx context.Context, name, namespace string) error { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + if err := cp.client.Get(ctx, key, secret); err != nil { + if !errors.IsNotFound(err) { + return err + } + if err := cp.client.Create(ctx, secret); err != nil { + if errors.IsAlreadyExists(err) { + return err + } + return fmt.Errorf("failed to create secret: %v", err) + } + } + + if len(secret.Data[common.CAKeyPem]) == 0 || + len(secret.Data[common.CACertPem]) == 0 || + len(secret.Data[common.ServerCertPem]) == 0 || + len(secret.Data[common.ServerKeyPem]) == 0 { + if err := cp.Generate(); err != nil { + return fmt.Errorf("failed to generate certificate: %v", err) + } + if err := cp.updateSecret(ctx, secret); err != nil { + return err + } + return nil + } + return cp.parseSecret(secret) +} + +// CAKey returns the PEM-encoded CA private key. +func (cp *Provider) CAKey() ([]byte, error) { + if cp.caKey == nil { + return nil, fmt.Errorf("CA key is not set") + } + data := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(cp.caKey), + }) + return data, nil +} + +// CACert returns the PEM-encoded CA certificate. +func (cp *Provider) CACert() ([]byte, error) { + if cp.caCert == nil { + return nil, fmt.Errorf("CA certificate is not set") + } + data := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: cp.caCert.Raw, + }) + return data, nil +} + +// ServerKey returns the PEM-encoded server private key. +func (cp *Provider) ServerKey() ([]byte, error) { + if cp.serverKey == nil { + return nil, fmt.Errorf("server key is not set") + } + data := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(cp.serverKey), + }) + return data, nil +} + +// ServerCert returns the PEM-encoded server cert. +func (cp *Provider) ServerCert() ([]byte, error) { + if cp.serverCert == nil { + return nil, fmt.Errorf("server cert is not set") + } + data := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: cp.serverCert.Raw, + }) + return data, nil +} + +// TLSConfig returns the TLS configuration. +func (cp *Provider) TLSConfig() (*tls.Config, error) { + keyPEMBlock, err := cp.ServerKey() + if err != nil { + return nil, fmt.Errorf("failed to get server key: %v", err) + } + + certPEMBlock, err := cp.ServerCert() + if err != nil { + return nil, fmt.Errorf("failed to get server certificate: %v", err) + } + + tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, fmt.Errorf("failed to generate TLS certificate: %v", err) + } + + cfg := &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + } + return cfg, nil +} + +// WriteFile saves the certificate and key to the given path. +func (cp *Provider) WriteFile(path, certName, keyName string) error { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + serverCert, err := cp.ServerCert() + if err != nil { + return err + } + serverKey, err := cp.ServerKey() + if err != nil { + return err + } + if err := os.WriteFile(path+"/"+certName, serverCert, 0600); err != nil { + return err + } + if err := os.WriteFile(path+"/"+keyName, serverKey, 0600); err != nil { + return err + } + return nil +} + +func (cp *Provider) Generate() error { + // Generate CA private caKey + caKey, err := NewPrivateKey() + if err != nil { + return fmt.Errorf("failed to generate CA private key: %v", err) + } + + // Generate self-signed CA certificate + caCfg := cert.Config{ + CommonName: cp.commonName, + Organization: []string{Organization}, + } + caCert, err := cert.NewSelfSignedCACert(caCfg, caKey) + if err != nil { + return fmt.Errorf("failed to generate self-signed CA certificate: %v", err) + } + + // Generate server private key + serverKey, err := NewPrivateKey() + if err != nil { + return fmt.Errorf("failed to generate server private key: %v", err) + } + + // Generate signed server certificate + var ips []net.IP + dnsNames := []string{"localhost"} + hostIP := net.ParseIP(cp.commonName) + if hostIP.To4() != nil { + ips = append(ips, hostIP.To4()) + } else { + dnsNames = append(dnsNames, cp.commonName) + } + serverCfg := cert.Config{ + CommonName: cp.commonName, + Organization: []string{Organization}, + AltNames: cert.AltNames{IPs: ips, DNSNames: dnsNames}, + } + serverCert, err := NewSignedServerCert(serverCfg, caKey, caCert, serverKey) + if err != nil { + return fmt.Errorf("failed to generate signed server certificate: %v", err) + } + + cp.caKey = caKey + cp.caCert = caCert + cp.serverKey = serverKey + cp.serverCert = serverCert + return nil +} + +func (cp *Provider) parseSecret(secret *corev1.Secret) error { + if secret == nil { + return fmt.Errorf("secret is nil") + } + caKeyPem, _ := pem.Decode(secret.Data[common.CAKeyPem]) + caCertPem, _ := pem.Decode(secret.Data[common.CACertPem]) + serverKeyPem, _ := pem.Decode(secret.Data[common.ServerKeyPem]) + serverCertPem, _ := pem.Decode(secret.Data[common.ServerCertPem]) + if caKeyPem == nil || caCertPem == nil || serverKeyPem == nil || serverCertPem == nil { + return fmt.Errorf("failed to decode secret data to pem block") + } + caKey, err := x509.ParsePKCS1PrivateKey(caKeyPem.Bytes) + if err != nil { + return fmt.Errorf("failed to parse CA private key: %v", err) + } + caCert, err := x509.ParseCertificate(caCertPem.Bytes) + if err != nil { + return fmt.Errorf("failed to prase CA certificate: %v", err) + } + serverKey, err := x509.ParsePKCS1PrivateKey(serverKeyPem.Bytes) + if err != nil { + return fmt.Errorf("failed to parse server private key: %v", err) + } + serverCert, err := x509.ParseCertificate(serverCertPem.Bytes) + if err != nil { + return fmt.Errorf("failed to parse server certificate: %v", err) + } + cp.caKey = caKey + cp.caCert = caCert + cp.serverKey = serverKey + cp.serverCert = serverCert + return nil +} + +func (cp *Provider) updateSecret(ctx context.Context, secret *corev1.Secret) error { + caKey, err := cp.CAKey() + if err != nil { + return fmt.Errorf("failed to get CA key: %v", err) + } + caCert, err := cp.CACert() + if err != nil { + return fmt.Errorf("failed to get CA certificate: %v", err) + } + serverKey, err := cp.ServerKey() + if err != nil { + return fmt.Errorf("failed to get server key: %v", err) + } + serverCert, err := cp.ServerCert() + if err != nil { + return fmt.Errorf("failed to get server certificate: %v", err) + } + if secret.Data == nil { + secret.Data = make(map[string][]byte) + } + secret.Data[common.CAKeyPem] = caKey + secret.Data[common.CACertPem] = caCert + secret.Data[common.ServerKeyPem] = serverKey + secret.Data[common.ServerCertPem] = serverCert + if err := cp.client.Update(ctx, secret); err != nil { + return err + } + return nil +} diff --git a/pkg/certificate/certificate_test.go b/pkg/certificate/certificate_test.go new file mode 100644 index 0000000000..f0f4162794 --- /dev/null +++ b/pkg/certificate/certificate_test.go @@ -0,0 +1,175 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate_test + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/kubeflow/spark-operator/pkg/certificate" + "github.com/kubeflow/spark-operator/pkg/common" +) + +var _ = Describe("Certificate Provider", func() { + Context("Generate new certificates", func() { + secretName := "spark-operator-webhook-secret" + secretNamespace := "default" + + var cp *certificate.Provider + + BeforeEach(func() { + By("Creating a new cert provider") + cp = certificate.NewProvider(k8sClient, secretName, secretNamespace) + Expect(cp).NotTo(BeNil()) + + By("Generating new certificates") + Expect(cp.Generate()).To(Succeed()) + }) + + It("Should generate new CA key", func() { + caKey, err := cp.CAKey() + Expect(err).To(BeNil()) + Expect(caKey).NotTo(BeEmpty()) + }) + + It("Should generate new CA certificate", func() { + caCert, err := cp.CACert() + Expect(err).To(BeNil()) + Expect(caCert).NotTo(BeEmpty()) + }) + + It("Should generate new server key", func() { + serverKey, err := cp.ServerKey() + Expect(err).To(BeNil()) + Expect(serverKey).NotTo(BeEmpty()) + }) + + It("Should generate new server certificate", func() { + serverCert, err := cp.ServerCert() + Expect(err).To(BeNil()) + Expect(serverCert).NotTo(BeEmpty()) + }) + + It("Should generate new TLS config", func() { + cfg, err := cp.ServerCert() + Expect(err).To(BeNil()) + Expect(cfg).NotTo(BeEmpty()) + }) + }) + + Context("The data of webhook secret is empty", func() { + ctx := context.Background() + secretName := "spark-operator-webhook-secret" + secretNamespace := "default" + key := types.NamespacedName{ + Namespace: secretNamespace, + Name: secretName, + } + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: secretNamespace, + }, + } + + BeforeEach(func() { + By("Creating a new webhook secret with empty data") + Expect(k8sClient.Create(ctx, secret)).To(Succeed()) + }) + + AfterEach(func() { + By("Deleting the webhook secret") + Expect(k8sClient.Delete(ctx, secret)).To(Succeed()) + }) + + It("Should generate new certificates and update webhook secret", func() { + By("Creating a new CertProvider") + cp := certificate.NewProvider(k8sClient, secretName, secretNamespace) + Expect(cp.SyncSecret(context.TODO(), secretName, secretNamespace)).To(Succeed()) + + By("Checking out whether the data of webhook secret is populated") + Expect(k8sClient.Get(ctx, key, secret)).To(Succeed()) + Expect(secret.Data[common.CAKeyPem]).NotTo(BeEmpty()) + Expect(secret.Data[common.CACertPem]).NotTo(BeEmpty()) + Expect(secret.Data[common.ServerKeyPem]).NotTo(BeEmpty()) + Expect(secret.Data[common.ServerCertPem]).NotTo(BeEmpty()) + }) + }) + + Context("The data of webhook secret is already populated", func() { + ctx := context.Background() + secretName := "spark-operator-webhook-secret" + secretNamespace := "default" + key := types.NamespacedName{ + Name: secretName, + Namespace: secretNamespace, + } + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: secretNamespace, + }, + } + + BeforeEach(func() { + By("Creating a new webhook secret with data populated") + Expect(k8sClient.Create(ctx, secret)).To(Succeed()) + + By("Creating a new CertProvider and synchronize generated certificates to webhook secret") + cp := certificate.NewProvider(k8sClient, secretName, secretNamespace) + Expect(cp.SyncSecret(context.TODO(), secretName, secretNamespace)).To(Succeed()) + + By("Creating a new webhook secret with data populated") + Expect(k8sClient.Get(ctx, key, secret)).To(Succeed()) + Expect(secret.Data[common.CAKeyPem]).NotTo(BeEmpty()) + Expect(secret.Data[common.CACertPem]).NotTo(BeEmpty()) + Expect(secret.Data[common.ServerKeyPem]).NotTo(BeEmpty()) + Expect(secret.Data[common.ServerCertPem]).NotTo(BeEmpty()) + }) + + AfterEach(func() { + By("Deleting the webhook secret") + Expect(k8sClient.Delete(ctx, secret)).To(Succeed()) + }) + + It("Should synchronize webhook certificates data", func() { + By("Creating a new cert provider and synchronize generated certificates to webhook secret") + cp := certificate.NewProvider(k8sClient, secretName, secretNamespace) + Expect(cp.SyncSecret(context.TODO(), secretName, secretNamespace)).To(Succeed()) + + By("Checking out whether the webhook certificates is synchronized into the cert provider") + caKey, err := cp.CAKey() + Expect(err).To(BeNil()) + Expect(caKey).To(Equal(secret.Data[common.CAKeyPem])) + caCert, err := cp.CACert() + Expect(err).To(BeNil()) + Expect(caCert).To(Equal(secret.Data[common.CACertPem])) + serverKey, err := cp.ServerKey() + Expect(err).To(BeNil()) + Expect(serverKey).To(Equal(secret.Data[common.ServerKeyPem])) + serverCert, err := cp.ServerCert() + Expect(err).To(BeNil()) + Expect(serverCert).To(Equal(secret.Data[common.ServerCertPem])) + }) + }) +}) diff --git a/pkg/certificate/doc.go b/pkg/certificate/doc.go new file mode 100644 index 0000000000..082248576b --- /dev/null +++ b/pkg/certificate/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate diff --git a/pkg/certificate/suite_test.go b/pkg/certificate/suite_test.go new file mode 100644 index 0000000000..96fad9a2ae --- /dev/null +++ b/pkg/certificate/suite_test.go @@ -0,0 +1,94 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate_test + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestCertProvider(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Certificate Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.29.3-%s-%s", runtime.GOOS, runtime.GOARCH)), + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = v1beta2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = v1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/pkg/util/cert.go b/pkg/certificate/util.go similarity index 72% rename from pkg/util/cert.go rename to pkg/certificate/util.go index 37188f3a34..635c89dde6 100644 --- a/pkg/util/cert.go +++ b/pkg/certificate/util.go @@ -1,4 +1,20 @@ -package util +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate import ( "crypto/rand" @@ -11,14 +27,12 @@ import ( "time" "k8s.io/client-go/util/cert" -) -const ( - RSAKeySize = 2048 + "github.com/kubeflow/spark-operator/pkg/common" ) func NewPrivateKey() (*rsa.PrivateKey, error) { - key, err := rsa.GenerateKey(rand.Reader, RSAKeySize) + key, err := rsa.GenerateKey(rand.Reader, common.RSAKeySize) if err != nil { return nil, fmt.Errorf("failed to generate private key: %v", err) } diff --git a/pkg/certificate/util_test.go b/pkg/certificate/util_test.go new file mode 100644 index 0000000000..d7a24e7ddb --- /dev/null +++ b/pkg/certificate/util_test.go @@ -0,0 +1,58 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate_test + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "testing" + "time" + + "k8s.io/client-go/util/cert" + + "github.com/kubeflow/spark-operator/pkg/certificate" + "github.com/kubeflow/spark-operator/pkg/common" +) + +func TestNewPrivateKey(t *testing.T) { + _, err := certificate.NewPrivateKey() + if err != nil { + t.Errorf("failed to generate private key: %v", err) + } +} + +func TestNewSignedServerCert(t *testing.T) { + cfg := cert.Config{ + CommonName: "test-server", + Organization: []string{"test-org"}, + NotBefore: time.Now(), + } + + caKey, _ := rsa.GenerateKey(rand.Reader, common.RSAKeySize) + caCert := &x509.Certificate{} + serverKey, _ := rsa.GenerateKey(rand.Reader, common.RSAKeySize) + + serverCert, err := certificate.NewSignedServerCert(cfg, caKey, caCert, serverKey) + if err != nil { + t.Errorf("failed to generate signed server certificate: %v", err) + } + + if serverCert == nil { + t.Error("server certificate is nil") + } +} diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go index 2ba94243f1..6fd11d33c8 100644 --- a/pkg/client/clientset/versioned/fake/register.go +++ b/pkg/client/clientset/versioned/fake/register.go @@ -21,8 +21,8 @@ limitations under the License. package fake import ( - sparkoperatorv1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - sparkoperatorv1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + sparkoperatorv1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" + sparkoperatorv1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go index d12cb60d48..d765130f14 100644 --- a/pkg/client/clientset/versioned/scheme/register.go +++ b/pkg/client/clientset/versioned/scheme/register.go @@ -21,8 +21,8 @@ limitations under the License. package scheme import ( - sparkoperatorv1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - sparkoperatorv1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + sparkoperatorv1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" + sparkoperatorv1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_scheduledsparkapplication.go index ac41f935cc..270b95ac8d 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_scheduledsparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_scheduledsparkapplication.go @@ -23,7 +23,7 @@ package fake import ( "context" - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkapplication.go index e8772e7ba1..d8b61c686a 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/fake/fake_sparkapplication.go @@ -23,7 +23,7 @@ package fake import ( "context" - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go index 65336a68eb..9447017aa6 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go @@ -24,7 +24,7 @@ import ( "context" "time" - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" scheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkapplication.go index e4308e3093..b638991341 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkapplication.go @@ -24,7 +24,7 @@ import ( "context" "time" - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" scheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkoperator.k8s.io_client.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkoperator.k8s.io_client.go index c347da5be3..016a05d27c 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkoperator.k8s.io_client.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta1/sparkoperator.k8s.io_client.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) @@ -74,7 +74,7 @@ func New(c rest.Interface) *SparkoperatorV1beta1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion + gv := v1beta1.GroupVersion config.GroupVersion = &gv config.APIPath = "/apis" config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go index 6d2218ba46..be3cdec5e2 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_scheduledsparkapplication.go @@ -23,7 +23,7 @@ package fake import ( "context" - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go index aa2a994390..b5bddecae5 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/fake/fake_sparkapplication.go @@ -23,7 +23,7 @@ package fake import ( "context" - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go index 38b0063685..2a1b6f8832 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go @@ -24,7 +24,7 @@ import ( "context" "time" - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" scheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go index cc541f5dd9..6e6e17a29a 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkapplication.go @@ -24,7 +24,7 @@ import ( "context" "time" - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" scheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" diff --git a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go index cb8dc20734..6d9b3ae855 100644 --- a/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go +++ b/pkg/client/clientset/versioned/typed/sparkoperator.k8s.io/v1beta2/sparkoperator.k8s.io_client.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 6992de7df7..0e952e11e8 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -23,8 +23,8 @@ package externalversions import ( "fmt" - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) @@ -56,15 +56,15 @@ func (f *genericInformer) Lister() cache.GenericLister { func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { // Group=sparkoperator.k8s.io, Version=v1beta1 - case v1beta1.SchemeGroupVersion.WithResource("scheduledsparkapplications"): + case v1beta1.GroupVersion.WithResource("scheduledsparkapplications"): return &genericInformer{resource: resource.GroupResource(), informer: f.Sparkoperator().V1beta1().ScheduledSparkApplications().Informer()}, nil - case v1beta1.SchemeGroupVersion.WithResource("sparkapplications"): + case v1beta1.GroupVersion.WithResource("sparkapplications"): return &genericInformer{resource: resource.GroupResource(), informer: f.Sparkoperator().V1beta1().SparkApplications().Informer()}, nil // Group=sparkoperator.k8s.io, Version=v1beta2 - case v1beta2.SchemeGroupVersion.WithResource("scheduledsparkapplications"): + case v1beta2.GroupVersion.WithResource("scheduledsparkapplications"): return &genericInformer{resource: resource.GroupResource(), informer: f.Sparkoperator().V1beta2().ScheduledSparkApplications().Informer()}, nil - case v1beta2.SchemeGroupVersion.WithResource("sparkapplications"): + case v1beta2.GroupVersion.WithResource("sparkapplications"): return &genericInformer{resource: resource.GroupResource(), informer: f.Sparkoperator().V1beta2().SparkApplications().Informer()}, nil } diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go index 46de9ba529..78564956b1 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go @@ -24,7 +24,7 @@ import ( "context" time "time" - sparkoperatork8siov1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + sparkoperatork8siov1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" v1beta1 "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta1" diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/sparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/sparkapplication.go index f38734232a..b33dd91ea7 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/sparkapplication.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta1/sparkapplication.go @@ -24,7 +24,7 @@ import ( "context" time "time" - sparkoperatork8siov1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + sparkoperatork8siov1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" v1beta1 "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta1" diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go index ffa1fddd37..6c0a0ac992 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go @@ -24,7 +24,7 @@ import ( "context" time "time" - sparkoperatork8siov1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + sparkoperatork8siov1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" v1beta2 "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" diff --git a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go index da42c12ec0..5ad4788762 100644 --- a/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go +++ b/pkg/client/informers/externalversions/sparkoperator.k8s.io/v1beta2/sparkapplication.go @@ -24,7 +24,7 @@ import ( "context" time "time" - sparkoperatork8siov1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + sparkoperatork8siov1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" versioned "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" internalinterfaces "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions/internalinterfaces" v1beta2 "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go index f3921e8106..b0058373bc 100644 --- a/pkg/client/listers/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta1/scheduledsparkapplication.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta1/sparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta1/sparkapplication.go index 51ceafa4dd..9afc432f58 100644 --- a/pkg/client/listers/sparkoperator.k8s.io/v1beta1/sparkapplication.go +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta1/sparkapplication.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta1 import ( - v1beta1 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta1" + v1beta1 "github.com/kubeflow/spark-operator/api/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go index f70331d5b0..c4d9faa0c1 100644 --- a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/scheduledsparkapplication.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go index 4818a3cf77..95cee753f8 100644 --- a/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go +++ b/pkg/client/listers/sparkoperator.k8s.io/v1beta2/sparkapplication.go @@ -21,7 +21,7 @@ limitations under the License. package v1beta2 import ( - v1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + v1beta2 "github.com/kubeflow/spark-operator/api/v1beta2" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/common/constants.go b/pkg/common/constants.go new file mode 100644 index 0000000000..59ffc87081 --- /dev/null +++ b/pkg/common/constants.go @@ -0,0 +1,50 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +const ( + ErrorCodePodAlreadyExists = "code=409" +) + +const ( + SparkApplicationFinalizerName = "sparkoperator.k8s.io/finalizer" + ScheduledSparkApplicationFinalizerName = "sparkoperator.k8s.io/finalizer" +) + +const ( + RSAKeySize = 2048 +) + +const ( + CAKeyPem = "ca-key.pem" + CACertPem = "ca-cert.pem" + ServerKeyPem = "server-key.pem" + ServerCertPem = "server-cert.pem" +) + +// Kubernetes volume types. +const ( + VolumeTypeEmptyDir = "emptyDir" + VolumeTypeHostPath = "hostPath" + VolumeTypeNFS = "nfs" + VolumeTypePersistentVolumeClaim = "persistentVolumeClaim" +) + +const ( + // Epsilon is a small number used to compare 64 bit floating point numbers. + Epsilon = 1e-9 +) diff --git a/pkg/config/doc.go b/pkg/common/doc.go similarity index 89% rename from pkg/config/doc.go rename to pkg/common/doc.go index f5c09720c7..4ff7b522bc 100644 --- a/pkg/config/doc.go +++ b/pkg/common/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package config +package common -// Package config contains code that deals with configuration of Spark driver and executor pods, e.g., mounting +// Package common contains code that deals with configuration of Spark driver and executor pods, e.g., mounting // user-specified ConfigMaps, volumes, secrets, etc. diff --git a/pkg/common/event.go b/pkg/common/event.go new file mode 100644 index 0000000000..0c469ce926 --- /dev/null +++ b/pkg/common/event.go @@ -0,0 +1,58 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// SparkApplication events +const ( + EventSparkApplicationAdded = "SparkApplicationAdded" + + EventSparkApplicationSubmitted = "SparkApplicationSubmitted" + + EventSparkApplicationSubmissionFailed = "SparkApplicationSubmissionFailed" + + EventSparkApplicationCompleted = "SparkApplicationCompleted" + + EventSparkApplicationFailed = "SparkApplicationFailed" + + EventSparkApplicationPendingRerun = "SparkApplicationPendingRerun" +) + +// Spark driver events +const ( + EventSparkDriverPending = "SparkDriverPending" + + EventSparkDriverRunning = "SparkDriverRunning" + + EventSparkDriverCompleted = "SparkDriverCompleted" + + EventSparkDriverFailed = "SparkDriverFailed" + + EventSparkDriverUnknown = "SparkDriverUnknown" +) + +// Spark executor events +const ( + EventSparkExecutorPending = "SparkExecutorPending" + + EventSparkExecutorRunning = "SparkExecutorRunning" + + EventSparkExecutorCompleted = "SparkExecutorCompleted" + + EventSparkExecutorFailed = "SparkExecutorFailed" + + EventSparkExecutorUnknown = "SparkExecutorUnknown" +) diff --git a/pkg/common/metrics.go b/pkg/common/metrics.go new file mode 100644 index 0000000000..7e38dd7ba3 --- /dev/null +++ b/pkg/common/metrics.go @@ -0,0 +1,49 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// Spark application metric names. +const ( + MetricSparkApplicationCount = "spark_application_count" + + MetricSparkApplicationSubmitCount = "spark_application_submit_count" + + MetricSparkApplicationFailedSubmissionCount = "spark_application_failed_submission_count" + + MetricSparkApplicationRunningCount = "spark_application_running_count" + + MetricSparkApplicationSuccessCount = "spark_application_success_count" + + MetricSparkApplicationFailureCount = "spark_application_failure_count" + + MetricSparkApplicationSuccessExecutionTimeSeconds = "spark_application_success_execution_time_seconds" + + MetricSparkApplicationFailureExecutionTimeSeconds = "spark_application_failure_execution_time_seconds" + + MetricSparkApplicationStartLatencySeconds = "spark_application_start_latency_seconds" + + MetricSparkApplicationStartLatencySecondsHistogram = "spark_application_start_latency_seconds_histogram" +) + +// Spark executor metric names. +const ( + MetricSparkExecutorRunningCount = "spark_executor_running_count" + + MetricSparkExecutorSuccessCount = "spark_executor_success_count" + + MetricSparkExecutorFailureCount = "spark_executor_failure_count" +) diff --git a/pkg/common/prometheus.go b/pkg/common/prometheus.go new file mode 100644 index 0000000000..2e141f3270 --- /dev/null +++ b/pkg/common/prometheus.go @@ -0,0 +1,139 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +const ( + // PrometheusConfigMapNameSuffix is the name prefix of the Prometheus ConfigMap. + PrometheusConfigMapNameSuffix = "prom-conf" + + // PrometheusConfigMapMountPath is the mount path of the Prometheus ConfigMap. + PrometheusConfigMapMountPath = "/etc/metrics/conf" +) + +const ( + MetricsPropertiesKey = "metrics.properties" + PrometheusConfigKey = "prometheus.yaml" + PrometheusScrapeAnnotation = "prometheus.io/scrape" + PrometheusPortAnnotation = "prometheus.io/port" + PrometheusPathAnnotation = "prometheus.io/path" +) + +// DefaultMetricsProperties is the default content of metrics.properties. +const DefaultMetricsProperties = ` +*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink +driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource +executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource` + +// DefaultPrometheusConfiguration is the default content of prometheus.yaml. +const DefaultPrometheusConfiguration = ` +lowercaseOutputName: true +attrNameSnakeCase: true +rules: + - pattern: metrics<>Value + name: spark_driver_$3_$4 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Value + name: spark_streaming_driver_$4 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Value + name: spark_structured_streaming_driver_$4 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + query_name: "$3" + - pattern: metrics<>Value + name: spark_executor_$4 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" + - pattern: metrics<>Count + name: spark_driver_DAGScheduler_$3_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Count + name: spark_driver_HiveExternalCatalog_$3_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Count + name: spark_driver_CodeGenerator_$3_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Count + name: spark_driver_LiveListenerBus_$3_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Value + name: spark_driver_LiveListenerBus_$3 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + - pattern: metrics<>Count + name: spark_executor_$4_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" + - pattern: metrics<>Value + name: spark_executor_$4_$5 + type: GAUGE + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" + - pattern: metrics<>Count + name: spark_executor_HiveExternalCatalog_$4_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" + - pattern: metrics<>Count + name: spark_executor_CodeGenerator_$4_count + type: COUNTER + labels: + app_namespace: "$1" + app_id: "$2" + executor_id: "$3" +` + +// DefaultPrometheusJavaAgentPort is the default port used by the Prometheus JMX exporter. +const DefaultPrometheusJavaAgentPort int32 = 8090 + +// DefaultPrometheusPortProtocol is the default protocol used by the Prometheus JMX exporter. +const DefaultPrometheusPortProtocol string = "TCP" + +// DefaultPrometheusPortName is the default port name used by the Prometheus JMX exporter. +const DefaultPrometheusPortName string = "jmx-exporter" diff --git a/pkg/common/spark.go b/pkg/common/spark.go new file mode 100644 index 0000000000..3c53c5fe61 --- /dev/null +++ b/pkg/common/spark.go @@ -0,0 +1,370 @@ +/* +Copyright 2017 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +// Spark environment variables. +const ( + EnvSparkHome = "SPARK_HOME" + + EnvKubernetesServiceHost = "KUBERNETES_SERVICE_HOST" + + EnvKubernetesServicePort = "KUBERNETES_SERVICE_PORT" +) + +// Spark properties. +const ( + // SparkAppName is the configuration property for application name. + SparkAppName = "spark.app.name" + + SparkDriverCores = "spark.driver.cores" + + SparkDriverMemory = "spark.driver.memory" + + SparkDriverMemoryOverhead = "spark.driver.memoryOverhead" + + SparkExecutorInstances = "spark.executor.instances" + + SparkExecutorEnvTemplate = "spark.executor.env.%s" + + SparkExecutorCores = "spark.executor.cores" + + SparkExecutorMemory = "spark.executor.memory" + + SparkExecutorMemoryOverhead = "spark.executor.memoryOverhead" + + SparkUIProxyBase = "spark.ui.proxyBase" + + SparkUIProxyRedirectURI = "spark.ui.proxyRedirectUri" +) + +// Spark on Kubernetes properties. +const ( + + // SparkKubernetesDriverMaster is the Spark configuration key for specifying the Kubernetes master the driver use + // to manage executor pods and other Kubernetes resources. + SparkKubernetesDriverMaster = "spark.kubernetes.driver.master" + + // SparkKubernetesNamespace is the configuration property for application namespace. + SparkKubernetesNamespace = "spark.kubernetes.namespace" + + // SparkKubernetesContainerImage is the configuration property for specifying the unified container image. + SparkKubernetesContainerImage = "spark.kubernetes.container.image" + + // SparkKubernetesContainerImagePullPolicy is the configuration property for specifying the container image pull policy. + SparkKubernetesContainerImagePullPolicy = "spark.kubernetes.container.image.pullPolicy" + + // SparkKubernetesContainerImagePullSecrets is the configuration property for specifying the comma-separated list of image-pull + // secrets. + SparkKubernetesContainerImagePullSecrets = "spark.kubernetes.container.image.pullSecrets" + + SparkKubernetesAllocationBatchSize = "spark.kubernetes.allocation.batch.size" + + SparkKubernetesAllocationBatchDelay = "spark.kubernetes.allocation.batch.delay" + + // SparkKubernetesAuthenticateDriverServiceAccountName is the Spark configuration key for specifying name of the Kubernetes service + // account used by the driver pod. + SparkKubernetesAuthenticateDriverServiceAccountName = "spark.kubernetes.authenticate.driver.serviceAccountName" + + // account used by the executor pod. + SparkKubernetesAuthenticateExecutorServiceAccountName = "spark.kubernetes.authenticate.executor.serviceAccountName" + + // SparkKubernetesDriverLabelPrefix is the Spark configuration key prefix for labels on the driver Pod. + SparkKubernetesDriverLabelTemplate = "spark.kubernetes.driver.label.%s" + + // SparkKubernetesDriverAnnotationPrefix is the Spark configuration key prefix for annotations on the driver Pod. + SparkKubernetesDriverAnnotationTemplate = "spark.kubernetes.driver.annotation.%s" + + // SparkKubernetesDriverServiceLabelPrefix is the key prefix of annotations to be added to the driver service. + SparkKubernetesDriverServiceLabelTemplate = "spark.kubernetes.driver.service.label.%s" + + // SparkKubernetesDriverServiceAnnotationPrefix is the key prefix of annotations to be added to the driver service. + SparkKubernetesDriverServiceAnnotationTemplate = "spark.kubernetes.driver.service.annotation.%s" + + // SparkKubernetesExecutorLabelPrefix is the Spark configuration key prefix for labels on the executor Pods. + SparkKubernetesExecutorLabelTemplate = "spark.kubernetes.executor.label.%s" + + // SparkKubernetesExecutorAnnotationPrefix is the Spark configuration key prefix for annotations on the executor Pods. + SparkKubernetesExecutorAnnotationTemplate = "spark.kubernetes.executor.annotation.%s" + + // SparkKubernetesDriverPodName is the Spark configuration key for driver pod name. + SparkKubernetesDriverPodName = "spark.kubernetes.driver.pod.name" + + SparkKubernetesExecutorPodNamePrefix = "spark.kubernetes.executor.podNamePrefix" + + // SparkKubernetesDriverRequestCores is the configuration property for specifying the physical CPU request for the driver. + SparkKubernetesDriverRequestCores = "spark.kubernetes.driver.request.cores" + + // SparkKubernetesDriverLimitCores is the configuration property for specifying the hard CPU limit for the driver pod. + SparkKubernetesDriverLimitCores = "spark.kubernetes.driver.limit.cores" + + // SparkKubernetesExecutorRequestCores is the configuration property for specifying the physical CPU request for executors. + SparkKubernetesExecutorRequestCores = "spark.kubernetes.executor.request.cores" + + // SparkKubernetesExecutorLimitCores is the configuration property for specifying the hard CPU limit for the executor pods. + SparkKubernetesExecutorLimitCores = "spark.kubernetes.executor.limit.cores" + + // SparkKubernetesNodeSelectorPrefix is the configuration property prefix for specifying node selector for the pods. + SparkKubernetesNodeSelectorTemplate = "spark.kubernetes.node.selector.%s" + + SparkKubernetesDriverNodeSelectorTemplate = "spark.kubernetes.driver.node.selector.%s" + + SparkKubernetesExecutorNodeSelectorTemplate = "spark.kubernetes.executor.node.selector.%s" + + // SparkKubernetesDriverEnvPrefix is the Spark configuration prefix for setting environment variables + // into the driver. + SparkKubernetesDriverEnvTemplate = "spark.kubernetes.driverEnv.%s" + + // SparkKubernetesDriverSecretsPrefix is the configuration property prefix for specifying secrets to be mounted into the + // driver. + SparkKubernetesDriverSecretsTemplate = "spark.kubernetes.driver.secrets.%s" + + // SparkKubernetesExecutorSecretsPrefix is the configuration property prefix for specifying secrets to be mounted into the + // executors. + SparkKubernetesExecutorSecretsTemplate = "spark.kubernetes.executor.secrets.%s" + + // SparkKubernetesDriverSecretKeyRefPrefix is the configuration property prefix for specifying environment variables + // from SecretKeyRefs for the driver. + SparkKubernetesDriverSecretKeyRefTemplate = "spark.kubernetes.driver.secretKeyRef.%s" + + // SparkKubernetesExecutorSecretKeyRefPrefix is the configuration property prefix for specifying environment variables + // from SecretKeyRefs for the executors. + SparkKubernetesExecutorSecretKeyRefTemplate = "spark.kubernetes.executor.secretKeyRef.%s" + + // SparkKubernetesDriverContainerImage is the configuration property for specifying a custom driver container image. + SparkKubernetesDriverContainerImage = "spark.kubernetes.driver.container.image" + + // SparkKubernetesExecutorContainerImage is the configuration property for specifying a custom executor container image. + SparkKubernetesExecutorContainerImage = "spark.kubernetes.executor.container.image" + + // SparkKubernetesDriverVolumesPrefix is the Spark volumes configuration for mounting a volume into the driver pod. + SparkKubernetesDriverVolumesPrefix = "spark.kubernetes.driver.volumes." + SparkKubernetesDriverVolumesMountPathTemplate = "spark.kubernetes.driver.volumes.%s.%s.mount.path" + SparkKubernetesDriverVolumesMountSubPathTemplate = "spark.kubernetes.driver.volumes.%s.%s.mount.subPath" + SparkKubernetesDriverVolumesMountReadOnlyTemplate = "spark.kubernetes.driver.volumes.%s.%s.mount.readOnly" + SparkKubernetesDriverVolumesOptionsTemplate = "spark.kubernetes.driver.volumes.%s.%s.options.%s" + + // SparkKubernetesExecutorVolumesPrefix is the Spark volumes configuration for mounting a volume into the driver pod. + SparkKubernetesExecutorVolumesPrefix = "spark.kubernetes.executor.volumes." + SparkKubernetesExecutorVolumesMountPathTemplate = "spark.kubernetes.executor.volumes.%s.%s.mount.path" + SparkKubernetesExecutorVolumesMountSubPathTemplate = "spark.kubernetes.executor.volumes.%s.%s.mount.subPath" + SparkKubernetesExecutorVolumesMountReadOnlyTemplate = "spark.kubernetes.executor.volumes.%s.%s.mount.readOnly" + SparkKubernetesExecutorVolumesOptionsTemplate = "spark.kubernetes.executor.volumes.%s.%s.options.%s" + + // SparkKubernetesMemoryOverheadFactor is the Spark configuration key for specifying memory overhead factor used for Non-JVM memory. + SparkKubernetesMemoryOverheadFactor = "spark.kubernetes.memoryOverheadFactor" + + // SparkKubernetesPysparkPythonVersion is the Spark configuration key for specifying python version used. + SparkKubernetesPysparkPythonVersion = "spark.kubernetes.pyspark.pythonVersion" + + SparkKubernetesDriverPodTemplateFile = "spark.kubernetes.driver.podTemplateFile" + + SparkKubernetesDriverPodTemplateContainerName = "spark.kubernetes.driver.podTemplateContainerName" + + SparkKubernetesExecutorPodTemplateFile = "spark.kubernetes.executor.podTemplateFile" + + SparkKubernetesExecutorPodTemplateContainerName = "spark.kubernetes.executor.podTemplateContainerName" + + SparkKubernetesDriverSchedulerName = "spark.kubernetes.driver.schedulerName" + + SparkKubernetesExecutorSchedulerName = "spark.kubernetes.executor.schedulerName" + + // SparkExecutorEnvVarConfigKeyPrefix is the Spark configuration prefix for setting environment variables + // into the executor. + SparkExecutorEnvVarConfigKeyPrefix = "spark.executorEnv." + + // SparkKubernetesInitContainerImage is the Spark configuration key for specifying a custom init-container image. + SparkKubernetesInitContainerImage = "spark.kubernetes.initContainer.image" + + // SparkKubernetesMountDependenciesJarsDownloadDir is the Spark configuration key for specifying the download path in the driver and + // executors for remote jars. + SparkKubernetesMountDependenciesJarsDownloadDir = "spark.kubernetes.mountDependencies.jarsDownloadDir" + + // SparkKubernetesMountDependenciesFilesDownloadDir is the Spark configuration key for specifying the download path in the driver and + // executors for remote files. + SparkKubernetesMountDependenciesFilesDownloadDir = "spark.kubernetes.mountDependencies.filesDownloadDir" + + // SparkKubernetesMountDependenciesTimeout is the Spark configuration key for specifying the timeout in seconds of downloading + // remote dependencies. + SparkKubernetesMountDependenciesTimeout = "spark.kubernetes.mountDependencies.timeout" + + // SparkKubernetesMountDependenciesMaxSimultaneousDownloads is the Spark configuration key for specifying the maximum number of remote + // dependencies to download. + SparkKubernetesMountDependenciesMaxSimultaneousDownloads = "spark.kubernetes.mountDependencies.maxSimultaneousDownloads" + + // SparkKubernetesSubmissionWaitAppCompletion is the Spark configuration key for specifying whether to wait for application to complete. + SparkKubernetesSubmissionWaitAppCompletion = "spark.kubernetes.submission.waitAppCompletion" + + // SparkDriverExtraJavaOptions is the Spark configuration key for a string of extra JVM options to pass to driver. + SparkDriverExtraJavaOptions = "spark.driver.extraJavaOptions" + + // SparkExecutorExtraJavaOptions is the Spark configuration key for a string of extra JVM options to pass to executors. + SparkExecutorExtraJavaOptions = "spark.executor.extraJavaOptions" + + // SparkKubernetesExecutorDeleteOnTermination is the Spark configuration for specifying whether executor pods should be deleted in case of failure or normal termination. + SparkKubernetesExecutorDeleteOnTermination = "spark.kubernetes.executor.deleteOnTermination" +) + +// Dynamic allocation properties. +// Ref: https://spark.apache.org/docs/latest/configuration.html#dynamic-allocation +const ( + // SparkDynamicAllocationEnabled is the Spark configuration key for specifying if dynamic + // allocation is enabled or not. + SparkDynamicAllocationEnabled = "spark.dynamicAllocation.enabled" + + SparkDynamicAllocationExecutorIdleTimeout = "spark.dynamicAllocation.executorIdleTimeout" + + SparkDynamicAllocationCachedExecutorIdleTimeout = "spark.dynamicAllocation.cachedExecutorIdleTimeout" + + // SparkDynamicAllocationInitialExecutors is the Spark configuration key for specifying + // the initial number of executors to request if dynamic allocation is enabled. + SparkDynamicAllocationInitialExecutors = "spark.dynamicAllocation.initialExecutors" + + // SparkDynamicAllocationMaxExecutors is the Spark configuration key for specifying the + // upper bound of the number of executors to request if dynamic allocation is enabled. + SparkDynamicAllocationMaxExecutors = "spark.dynamicAllocation.maxExecutors" + + // SparkDynamicAllocationMinExecutors is the Spark configuration key for specifying the + // lower bound of the number of executors to request if dynamic allocation is enabled. + SparkDynamicAllocationMinExecutors = "spark.dynamicAllocation.minExecutors" + + SparkDynamicAllocationExecutorAllocationRatio = "spark.dynamicAllocation.executorAllocationRatio" + + SparkDynamicAllocationSchedulerBacklogTimeout = "spark.dynamicAllocation.schedulerBacklogTimeout" + + SparkDynamicAllocationSustainedSchedulerBacklogTimeout = "spark.dynamicAllocation.sustainedSchedulerBacklogTimeout" + + // SparkDynamicAllocationShuffleTrackingEnabled is the Spark configuration key for + // specifying if shuffle data tracking is enabled. + SparkDynamicAllocationShuffleTrackingEnabled = "spark.dynamicAllocation.shuffleTracking.enabled" + + // SparkDynamicAllocationShuffleTrackingTimeout is the Spark configuration key for specifying + // the shuffle tracking timeout in milliseconds if shuffle tracking is enabled. + SparkDynamicAllocationShuffleTrackingTimeout = "spark.dynamicAllocation.shuffleTracking.timeout" +) + +const ( + // SparkRoleDriver is the value of the spark-role label for the driver. + SparkRoleDriver = "driver" + + // SparkRoleExecutor is the value of the spark-role label for the executors. + SparkRoleExecutor = "executor" +) + +const ( + // DefaultSparkConfDir is the default directory for Spark configuration files if not specified. + // This directory is where the Spark ConfigMap is mounted in the driver and executor containers. + DefaultSparkConfDir = "/etc/spark/conf" + + // SparkConfigMapVolumeName is the name of the ConfigMap volume of Spark configuration files. + SparkConfigMapVolumeName = "spark-configmap-volume" + + // DefaultHadoopConfDir is the default directory for Spark configuration files if not specified. + // This directory is where the Hadoop ConfigMap is mounted in the driver and executor containers. + DefaultHadoopConfDir = "/etc/hadoop/conf" + + // HadoopConfigMapVolumeName is the name of the ConfigMap volume of Hadoop configuration files. + HadoopConfigMapVolumeName = "hadoop-configmap-volume" + + // EnvSparkConfDir is the environment variable to add to the driver and executor Pods that point + // to the directory where the Spark ConfigMap is mounted. + EnvSparkConfDir = "SPARK_CONF_DIR" + + // EnvHadoopConfDir is the environment variable to add to the driver and executor Pods that point + // to the directory where the Hadoop ConfigMap is mounted. + EnvHadoopConfDir = "HADOOP_CONF_DIR" +) + +const ( + // LabelSparkApplicationSelector is the AppID set by the spark-distribution on the driver/executors Pods. + LabelSparkApplicationSelector = "spark-app-selector" + + // LabelSparkRole is the driver/executor label set by the operator/spark-distribution on the driver/executors Pods. + LabelSparkRole = "spark-role" + + // LabelAnnotationPrefix is the prefix of every labels and annotations added by the controller. + LabelAnnotationPrefix = "sparkoperator.k8s.io/" + + // LabelSparkAppName is the name of the label for the SparkApplication object name. + LabelSparkAppName = LabelAnnotationPrefix + "app-name" + + // LabelScheduledSparkAppName is the name of the label for the ScheduledSparkApplication object name. + LabelScheduledSparkAppName = LabelAnnotationPrefix + "scheduled-app-name" + + // LabelLaunchedBySparkOperator is a label on Spark pods launched through the Spark Operator. + LabelLaunchedBySparkOperator = LabelAnnotationPrefix + "launched-by-spark-operator" + + // LabelSubmissionID is the label that records the submission ID of the current run of an application. + LabelSubmissionID = LabelAnnotationPrefix + "submission-id" +) + +const ( + // SparkDriverContainerName is name of driver container in spark driver pod. + SparkDriverContainerName = "spark-kubernetes-driver" + + // SparkExecutorContainerName is name of executor container in spark executor pod. + SparkExecutorContainerName = "executor" + + // Spark3DefaultExecutorContainerName is the default executor container name in + // Spark 3.x, which allows the container name to be configured through the pod + // template support. + Spark3DefaultExecutorContainerName = "spark-kubernetes-executor" + + // SparkLocalDirVolumePrefix is the volume name prefix for "scratch" space directory. + SparkLocalDirVolumePrefix = "spark-local-dir-" +) + +const ( + SparkUIPortKey = "spark.ui.port" + + DefaultSparkWebUIPort int32 = 4040 + + DefaultSparkWebUIPortName = "spark-driver-ui-port" +) + +// https://spark.apache.org/docs/latest/configuration.html +const ( + DefaultCPUMilliCores = 1000 + + DefaultMemoryBytes = 1 << 30 // 1 Gi + + DefaultJVMMemoryOverheadFactor = 0.1 + + DefaultNonJVMMemoryOverheadFactor = 0.4 + + MinMemoryOverhead = 384 * (1 << 20) // 384 Mi +) + +const ( + // EnvGoogleApplicationCredentials is the environment variable used by the + // Application Default Credentials mechanism. More details can be found at + // https://developers.google.com/identity/protocols/application-default-credentials. + EnvGoogleApplicationCredentials = "GOOGLE_APPLICATION_CREDENTIALS" + + // ServiceAccountJSONKeyFileName is the assumed name of the service account + // Json key file. This name is added to the service account secret mount path to + // form the path to the Json key file referred to by GOOGLE_APPLICATION_CREDENTIALS. + ServiceAccountJSONKeyFileName = "key.json" + + // EnvHadoopTokenFileLocation is the environment variable for specifying the location + // where the file storing the Hadoop delegation token is located. + EnvHadoopTokenFileLocation = "HADOOP_TOKEN_FILE_LOCATION" + + // HadoopDelegationTokenFileName is the assumed name of the file storing the Hadoop + // delegation token. This name is added to the delegation token secret mount path to + // form the path to the file referred to by HADOOP_TOKEN_FILE_LOCATION. + HadoopDelegationTokenFileName = "hadoop.token" +) diff --git a/pkg/common/volcano.go b/pkg/common/volcano.go new file mode 100644 index 0000000000..cda710480d --- /dev/null +++ b/pkg/common/volcano.go @@ -0,0 +1,23 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +const ( + VolcanoSchedulerName = "volcano" + + VolcanoPodGroupName = "podgroups.scheduling.volcano.sh" +) diff --git a/pkg/config/config.go b/pkg/config/config.go deleted file mode 100644 index 18a708c6db..0000000000 --- a/pkg/config/config.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" -) - -// GetDriverAnnotationOption returns a spark-submit option for a driver annotation of the given key and value. -func GetDriverAnnotationOption(key string, value string) string { - return fmt.Sprintf("%s%s=%s", SparkDriverAnnotationKeyPrefix, key, value) -} - -// GetExecutorAnnotationOption returns a spark-submit option for an executor annotation of the given key and value. -func GetExecutorAnnotationOption(key string, value string) string { - return fmt.Sprintf("%s%s=%s", SparkExecutorAnnotationKeyPrefix, key, value) -} - -// GetDriverEnvVarConfOptions returns a list of spark-submit options for setting driver environment variables. -func GetDriverEnvVarConfOptions(app *v1beta2.SparkApplication) []string { - var envVarConfOptions []string - for key, value := range app.Spec.Driver.EnvVars { - envVar := fmt.Sprintf("%s%s=%s", SparkDriverEnvVarConfigKeyPrefix, key, value) - envVarConfOptions = append(envVarConfOptions, envVar) - } - return envVarConfOptions -} - -// GetExecutorEnvVarConfOptions returns a list of spark-submit options for setting executor environment variables. -func GetExecutorEnvVarConfOptions(app *v1beta2.SparkApplication) []string { - var envVarConfOptions []string - for key, value := range app.Spec.Executor.EnvVars { - envVar := fmt.Sprintf("%s%s=%s", SparkExecutorEnvVarConfigKeyPrefix, key, value) - envVarConfOptions = append(envVarConfOptions, envVar) - } - return envVarConfOptions -} - -// GetPrometheusConfigMapName returns the name of the ConfigMap for Prometheus configuration. -func GetPrometheusConfigMapName(app *v1beta2.SparkApplication) string { - return fmt.Sprintf("%s-%s", app.Name, PrometheusConfigMapNameSuffix) -} diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go deleted file mode 100644 index 485c1cb282..0000000000 --- a/pkg/config/config_test.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" -) - -func TestGetDriverEnvVarConfOptions(t *testing.T) { - app := &v1beta2.SparkApplication{ - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - EnvVars: map[string]string{ - "ENV1": "VALUE1", - "ENV2": "VALUE2", - }, - }, - }, - }, - } - - options := GetDriverEnvVarConfOptions(app) - optionsMap := map[string]bool{ - strings.TrimPrefix(options[0], SparkDriverEnvVarConfigKeyPrefix): true, - strings.TrimPrefix(options[1], SparkDriverEnvVarConfigKeyPrefix): true, - } - assert.Equal(t, 2, len(optionsMap)) - assert.True(t, optionsMap["ENV1=VALUE1"]) - assert.True(t, optionsMap["ENV2=VALUE2"]) -} - -func TestGetExecutorEnvVarConfOptions(t *testing.T) { - app := &v1beta2.SparkApplication{ - Spec: v1beta2.SparkApplicationSpec{ - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - EnvVars: map[string]string{ - "ENV1": "VALUE1", - "ENV2": "VALUE2", - }, - }, - }, - }, - } - - options := GetExecutorEnvVarConfOptions(app) - optionsMap := map[string]bool{ - strings.TrimPrefix(options[0], SparkExecutorEnvVarConfigKeyPrefix): true, - strings.TrimPrefix(options[1], SparkExecutorEnvVarConfigKeyPrefix): true, - } - assert.Equal(t, 2, len(optionsMap)) - assert.True(t, optionsMap["ENV1=VALUE1"]) - assert.True(t, optionsMap["ENV2=VALUE2"]) -} diff --git a/pkg/config/constants.go b/pkg/config/constants.go deleted file mode 100644 index b65f3a38c0..0000000000 --- a/pkg/config/constants.go +++ /dev/null @@ -1,317 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -const ( - // DefaultSparkConfDir is the default directory for Spark configuration files if not specified. - // This directory is where the Spark ConfigMap is mounted in the driver and executor containers. - DefaultSparkConfDir = "/etc/spark/conf" - // SparkConfigMapVolumeName is the name of the ConfigMap volume of Spark configuration files. - SparkConfigMapVolumeName = "spark-configmap-volume" - // DefaultHadoopConfDir is the default directory for Spark configuration files if not specified. - // This directory is where the Hadoop ConfigMap is mounted in the driver and executor containers. - DefaultHadoopConfDir = "/etc/hadoop/conf" - // HadoopConfigMapVolumeName is the name of the ConfigMap volume of Hadoop configuration files. - HadoopConfigMapVolumeName = "hadoop-configmap-volume" - // SparkConfDirEnvVar is the environment variable to add to the driver and executor Pods that point - // to the directory where the Spark ConfigMap is mounted. - SparkConfDirEnvVar = "SPARK_CONF_DIR" - // HadoopConfDirEnvVar is the environment variable to add to the driver and executor Pods that point - // to the directory where the Hadoop ConfigMap is mounted. - HadoopConfDirEnvVar = "HADOOP_CONF_DIR" -) - -const ( - // LabelAnnotationPrefix is the prefix of every labels and annotations added by the controller. - LabelAnnotationPrefix = "sparkoperator.k8s.io/" - // SparkAppNameLabel is the name of the label for the SparkApplication object name. - SparkAppNameLabel = LabelAnnotationPrefix + "app-name" - // ScheduledSparkAppNameLabel is the name of the label for the ScheduledSparkApplication object name. - ScheduledSparkAppNameLabel = LabelAnnotationPrefix + "scheduled-app-name" - // LaunchedBySparkOperatorLabel is a label on Spark pods launched through the Spark Operator. - LaunchedBySparkOperatorLabel = LabelAnnotationPrefix + "launched-by-spark-operator" - // SparkApplicationSelectorLabel is the AppID set by the spark-distribution on the driver/executors Pods. - SparkApplicationSelectorLabel = "spark-app-selector" - // SparkRoleLabel is the driver/executor label set by the operator/spark-distribution on the driver/executors Pods. - SparkRoleLabel = "spark-role" - // SparkDriverRole is the value of the spark-role label for the driver. - SparkDriverRole = "driver" - // SparkExecutorRole is the value of the spark-role label for the executors. - SparkExecutorRole = "executor" - // SubmissionIDLabel is the label that records the submission ID of the current run of an application. - SubmissionIDLabel = LabelAnnotationPrefix + "submission-id" -) - -const ( - // SparkAppNameKey is the configuration property for application name. - SparkAppNameKey = "spark.app.name" - // SparkAppNamespaceKey is the configuration property for application namespace. - SparkAppNamespaceKey = "spark.kubernetes.namespace" - // SparkContainerImageKey is the configuration property for specifying the unified container image. - SparkContainerImageKey = "spark.kubernetes.container.image" - // SparkImagePullSecretKey is the configuration property for specifying the comma-separated list of image-pull - // secrets. - SparkImagePullSecretKey = "spark.kubernetes.container.image.pullSecrets" - // SparkContainerImagePullPolicyKey is the configuration property for specifying the container image pull policy. - SparkContainerImagePullPolicyKey = "spark.kubernetes.container.image.pullPolicy" - // SparkNodeSelectorKeyPrefix is the configuration property prefix for specifying node selector for the pods. - SparkNodeSelectorKeyPrefix = "spark.kubernetes.node.selector." - // SparkDriverContainerImageKey is the configuration property for specifying a custom driver container image. - SparkDriverContainerImageKey = "spark.kubernetes.driver.container.image" - // SparkExecutorContainerImageKey is the configuration property for specifying a custom executor container image. - SparkExecutorContainerImageKey = "spark.kubernetes.executor.container.image" - // SparkDriverCoreRequestKey is the configuration property for specifying the physical CPU request for the driver. - SparkDriverCoreRequestKey = "spark.kubernetes.driver.request.cores" - // SparkExecutorCoreRequestKey is the configuration property for specifying the physical CPU request for executors. - SparkExecutorCoreRequestKey = "spark.kubernetes.executor.request.cores" - // SparkDriverCoreLimitKey is the configuration property for specifying the hard CPU limit for the driver pod. - SparkDriverCoreLimitKey = "spark.kubernetes.driver.limit.cores" - // SparkExecutorCoreLimitKey is the configuration property for specifying the hard CPU limit for the executor pods. - SparkExecutorCoreLimitKey = "spark.kubernetes.executor.limit.cores" - // SparkDriverSecretKeyPrefix is the configuration property prefix for specifying secrets to be mounted into the - // driver. - SparkDriverSecretKeyPrefix = "spark.kubernetes.driver.secrets." - // SparkExecutorSecretKeyPrefix is the configuration property prefix for specifying secrets to be mounted into the - // executors. - SparkExecutorSecretKeyPrefix = "spark.kubernetes.executor.secrets." - // SparkDriverSecretKeyRefKeyPrefix is the configuration property prefix for specifying environment variables - // from SecretKeyRefs for the driver. - SparkDriverSecretKeyRefKeyPrefix = "spark.kubernetes.driver.secretKeyRef." - // SparkExecutorSecretKeyRefKeyPrefix is the configuration property prefix for specifying environment variables - // from SecretKeyRefs for the executors. - SparkExecutorSecretKeyRefKeyPrefix = "spark.kubernetes.executor.secretKeyRef." - // SparkDriverEnvVarConfigKeyPrefix is the Spark configuration prefix for setting environment variables - // into the driver. - SparkDriverEnvVarConfigKeyPrefix = "spark.kubernetes.driverEnv." - // SparkExecutorEnvVarConfigKeyPrefix is the Spark configuration prefix for setting environment variables - // into the executor. - SparkExecutorEnvVarConfigKeyPrefix = "spark.executorEnv." - // SparkDriverAnnotationKeyPrefix is the Spark configuration key prefix for annotations on the driver Pod. - SparkDriverAnnotationKeyPrefix = "spark.kubernetes.driver.annotation." - // SparkExecutorAnnotationKeyPrefix is the Spark configuration key prefix for annotations on the executor Pods. - SparkExecutorAnnotationKeyPrefix = "spark.kubernetes.executor.annotation." - // SparkDriverLabelKeyPrefix is the Spark configuration key prefix for labels on the driver Pod. - SparkDriverLabelKeyPrefix = "spark.kubernetes.driver.label." - // SparkExecutorLabelKeyPrefix is the Spark configuration key prefix for labels on the executor Pods. - SparkExecutorLabelKeyPrefix = "spark.kubernetes.executor.label." - // SparkDriverVolumesPrefix is the Spark volumes configuration for mounting a volume into the driver pod. - SparkDriverVolumesPrefix = "spark.kubernetes.driver.volumes." - // SparkExecutorVolumesPrefix is the Spark volumes configuration for mounting a volume into the driver pod. - SparkExecutorVolumesPrefix = "spark.kubernetes.executor.volumes." - // SparkDriverPodNameKey is the Spark configuration key for driver pod name. - SparkDriverPodNameKey = "spark.kubernetes.driver.pod.name" - // SparkDriverServiceAccountName is the Spark configuration key for specifying name of the Kubernetes service - // account used by the driver pod. - SparkDriverServiceAccountName = "spark.kubernetes.authenticate.driver.serviceAccountName" - // account used by the executor pod. - SparkExecutorAccountName = "spark.kubernetes.authenticate.executor.serviceAccountName" - // SparkInitContainerImage is the Spark configuration key for specifying a custom init-container image. - SparkInitContainerImage = "spark.kubernetes.initContainer.image" - // SparkJarsDownloadDir is the Spark configuration key for specifying the download path in the driver and - // executors for remote jars. - SparkJarsDownloadDir = "spark.kubernetes.mountDependencies.jarsDownloadDir" - // SparkFilesDownloadDir is the Spark configuration key for specifying the download path in the driver and - // executors for remote files. - SparkFilesDownloadDir = "spark.kubernetes.mountDependencies.filesDownloadDir" - // SparkDownloadTimeout is the Spark configuration key for specifying the timeout in seconds of downloading - // remote dependencies. - SparkDownloadTimeout = "spark.kubernetes.mountDependencies.timeout" - // SparkMaxSimultaneousDownloads is the Spark configuration key for specifying the maximum number of remote - // dependencies to download. - SparkMaxSimultaneousDownloads = "spark.kubernetes.mountDependencies.maxSimultaneousDownloads" - // SparkWaitAppCompletion is the Spark configuration key for specifying whether to wait for application to complete. - SparkWaitAppCompletion = "spark.kubernetes.submission.waitAppCompletion" - // SparkPythonVersion is the Spark configuration key for specifying python version used. - SparkPythonVersion = "spark.kubernetes.pyspark.pythonVersion" - // SparkMemoryOverheadFactor is the Spark configuration key for specifying memory overhead factor used for Non-JVM memory. - SparkMemoryOverheadFactor = "spark.kubernetes.memoryOverheadFactor" - // SparkDriverJavaOptions is the Spark configuration key for a string of extra JVM options to pass to driver. - SparkDriverJavaOptions = "spark.driver.extraJavaOptions" - // SparkExecutorJavaOptions is the Spark configuration key for a string of extra JVM options to pass to executors. - SparkExecutorJavaOptions = "spark.executor.extraJavaOptions" - // SparkExecutorDeleteOnTermination is the Spark configuration for specifying whether executor pods should be deleted in case of failure or normal termination - SparkExecutorDeleteOnTermination = "spark.kubernetes.executor.deleteOnTermination" - // SparkDriverKubernetesMaster is the Spark configuration key for specifying the Kubernetes master the driver use - // to manage executor pods and other Kubernetes resources. - SparkDriverKubernetesMaster = "spark.kubernetes.driver.master" - // SparkDriverServiceAnnotationKeyPrefix is the key prefix of annotations to be added to the driver service. - SparkDriverServiceAnnotationKeyPrefix = "spark.kubernetes.driver.service.annotation." - // SparkDriverServiceLabelKeyPrefix is the key prefix of annotations to be added to the driver service. - SparkDriverServiceLabelKeyPrefix = "spark.kubernetes.driver.service.label." - // SparkDynamicAllocationEnabled is the Spark configuration key for specifying if dynamic - // allocation is enabled or not. - SparkDynamicAllocationEnabled = "spark.dynamicAllocation.enabled" - // SparkDynamicAllocationShuffleTrackingEnabled is the Spark configuration key for - // specifying if shuffle data tracking is enabled. - SparkDynamicAllocationShuffleTrackingEnabled = "spark.dynamicAllocation.shuffleTracking.enabled" - // SparkDynamicAllocationShuffleTrackingTimeout is the Spark configuration key for specifying - // the shuffle tracking timeout in milliseconds if shuffle tracking is enabled. - SparkDynamicAllocationShuffleTrackingTimeout = "spark.dynamicAllocation.shuffleTracking.timeout" - // SparkDynamicAllocationInitialExecutors is the Spark configuration key for specifying - // the initial number of executors to request if dynamic allocation is enabled. - SparkDynamicAllocationInitialExecutors = "spark.dynamicAllocation.initialExecutors" - // SparkDynamicAllocationMinExecutors is the Spark configuration key for specifying the - // lower bound of the number of executors to request if dynamic allocation is enabled. - SparkDynamicAllocationMinExecutors = "spark.dynamicAllocation.minExecutors" - // SparkDynamicAllocationMaxExecutors is the Spark configuration key for specifying the - // upper bound of the number of executors to request if dynamic allocation is enabled. - SparkDynamicAllocationMaxExecutors = "spark.dynamicAllocation.maxExecutors" -) - -const ( - // GoogleApplicationCredentialsEnvVar is the environment variable used by the - // Application Default Credentials mechanism. More details can be found at - // https://developers.google.com/identity/protocols/application-default-credentials. - GoogleApplicationCredentialsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" - // ServiceAccountJSONKeyFileName is the assumed name of the service account - // Json key file. This name is added to the service account secret mount path to - // form the path to the Json key file referred to by GOOGLE_APPLICATION_CREDENTIALS. - ServiceAccountJSONKeyFileName = "key.json" - // HadoopTokenFileLocationEnvVar is the environment variable for specifying the location - // where the file storing the Hadoop delegation token is located. - HadoopTokenFileLocationEnvVar = "HADOOP_TOKEN_FILE_LOCATION" - // HadoopDelegationTokenFileName is the assumed name of the file storing the Hadoop - // delegation token. This name is added to the delegation token secret mount path to - // form the path to the file referred to by HADOOP_TOKEN_FILE_LOCATION. - HadoopDelegationTokenFileName = "hadoop.token" -) - -const ( - // PrometheusConfigMapNameSuffix is the name prefix of the Prometheus ConfigMap. - PrometheusConfigMapNameSuffix = "prom-conf" - // PrometheusConfigMapMountPath is the mount path of the Prometheus ConfigMap. - PrometheusConfigMapMountPath = "/etc/metrics/conf" -) - -// DefaultMetricsProperties is the default content of metrics.properties. -const DefaultMetricsProperties = ` -*.sink.jmx.class=org.apache.spark.metrics.sink.JmxSink -driver.source.jvm.class=org.apache.spark.metrics.source.JvmSource -executor.source.jvm.class=org.apache.spark.metrics.source.JvmSource` - -// DefaultPrometheusConfiguration is the default content of prometheus.yaml. -const DefaultPrometheusConfiguration = ` -lowercaseOutputName: true -attrNameSnakeCase: true -rules: - - pattern: metrics<>Value - name: spark_driver_$3_$4 - type: GAUGE - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Value - name: spark_streaming_driver_$4 - type: GAUGE - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Value - name: spark_structured_streaming_driver_$4 - type: GAUGE - labels: - app_namespace: "$1" - app_id: "$2" - query_name: "$3" - - pattern: metrics<>Value - name: spark_executor_$4 - type: GAUGE - labels: - app_namespace: "$1" - app_id: "$2" - executor_id: "$3" - - pattern: metrics<>Count - name: spark_driver_DAGScheduler_$3_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Count - name: spark_driver_HiveExternalCatalog_$3_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Count - name: spark_driver_CodeGenerator_$3_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Count - name: spark_driver_LiveListenerBus_$3_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Value - name: spark_driver_LiveListenerBus_$3 - type: GAUGE - labels: - app_namespace: "$1" - app_id: "$2" - - pattern: metrics<>Count - name: spark_executor_$4_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - executor_id: "$3" - - pattern: metrics<>Value - name: spark_executor_$4_$5 - type: GAUGE - labels: - app_namespace: "$1" - app_id: "$2" - executor_id: "$3" - - pattern: metrics<>Count - name: spark_executor_HiveExternalCatalog_$4_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - executor_id: "$3" - - pattern: metrics<>Count - name: spark_executor_CodeGenerator_$4_count - type: COUNTER - labels: - app_namespace: "$1" - app_id: "$2" - executor_id: "$3" -` - -// DefaultPrometheusJavaAgentPort is the default port used by the Prometheus JMX exporter. -const DefaultPrometheusJavaAgentPort int32 = 8090 - -// DefaultPrometheusPortProtocol is the default protocol used by the Prometheus JMX exporter. -const DefaultPrometheusPortProtocol string = "TCP" - -// DefaultPrometheusPortName is the default port name used by the Prometheus JMX exporter. -const DefaultPrometheusPortName string = "jmx-exporter" - -const ( - // SparkDriverContainerName is name of driver container in spark driver pod - SparkDriverContainerName = "spark-kubernetes-driver" - // SparkExecutorContainerName is name of executor container in spark executor pod - SparkExecutorContainerName = "executor" - // Spark3DefaultExecutorContainerName is the default executor container name in - // Spark 3.x, which allows the container name to be configured through the pod - // template support. - Spark3DefaultExecutorContainerName = "spark-kubernetes-executor" - // SparkLocalDirVolumePrefix is the volume name prefix for "scratch" space directory - SparkLocalDirVolumePrefix = "spark-local-dir-" -) diff --git a/pkg/config/secret.go b/pkg/config/secret.go deleted file mode 100644 index 1a2c7fa492..0000000000 --- a/pkg/config/secret.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - "path/filepath" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" -) - -// GetDriverSecretConfOptions returns a list of spark-submit options for mounting driver secrets. -func GetDriverSecretConfOptions(app *v1beta2.SparkApplication) []string { - var secretConfOptions []string - for _, s := range app.Spec.Driver.Secrets { - conf := fmt.Sprintf("%s%s=%s", SparkDriverSecretKeyPrefix, s.Name, s.Path) - secretConfOptions = append(secretConfOptions, conf) - if s.Type == v1beta2.GCPServiceAccountSecret { - conf = fmt.Sprintf( - "%s%s=%s", - SparkDriverEnvVarConfigKeyPrefix, - GoogleApplicationCredentialsEnvVar, - filepath.Join(s.Path, ServiceAccountJSONKeyFileName)) - secretConfOptions = append(secretConfOptions, conf) - } else if s.Type == v1beta2.HadoopDelegationTokenSecret { - conf = fmt.Sprintf( - "%s%s=%s", - SparkDriverEnvVarConfigKeyPrefix, - HadoopTokenFileLocationEnvVar, - filepath.Join(s.Path, HadoopDelegationTokenFileName)) - secretConfOptions = append(secretConfOptions, conf) - } - } - return secretConfOptions -} - -// GetExecutorSecretConfOptions returns a list of spark-submit options for mounting executor secrets. -func GetExecutorSecretConfOptions(app *v1beta2.SparkApplication) []string { - var secretConfOptions []string - for _, s := range app.Spec.Executor.Secrets { - conf := fmt.Sprintf("%s%s=%s", SparkExecutorSecretKeyPrefix, s.Name, s.Path) - secretConfOptions = append(secretConfOptions, conf) - if s.Type == v1beta2.GCPServiceAccountSecret { - conf = fmt.Sprintf( - "%s%s=%s", - SparkExecutorEnvVarConfigKeyPrefix, - GoogleApplicationCredentialsEnvVar, - filepath.Join(s.Path, ServiceAccountJSONKeyFileName)) - secretConfOptions = append(secretConfOptions, conf) - } else if s.Type == v1beta2.HadoopDelegationTokenSecret { - conf = fmt.Sprintf( - "%s%s=%s", - SparkExecutorEnvVarConfigKeyPrefix, - HadoopTokenFileLocationEnvVar, - filepath.Join(s.Path, HadoopDelegationTokenFileName)) - secretConfOptions = append(secretConfOptions, conf) - } - } - return secretConfOptions -} diff --git a/pkg/config/secret_test.go b/pkg/config/secret_test.go deleted file mode 100644 index fcd0ea9922..0000000000 --- a/pkg/config/secret_test.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" -) - -func TestGetDriverSecretConfOptions(t *testing.T) { - app := &v1beta2.SparkApplication{ - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - Secrets: []v1beta2.SecretInfo{ - { - Name: "db-credentials", - Path: "/etc/secrets", - }, - { - Name: "gcp-service-account", - Path: "/etc/secrets", - Type: v1beta2.GCPServiceAccountSecret, - }, - { - Name: "hadoop-token", - Path: "/etc/secrets", - Type: v1beta2.HadoopDelegationTokenSecret, - }, - }, - }, - }, - }, - } - - options := GetDriverSecretConfOptions(app) - assert.Equal(t, 5, len(options)) - assert.Equal(t, fmt.Sprintf("%s=%s", "db-credentials", "/etc/secrets"), strings.TrimPrefix(options[0], - SparkDriverSecretKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s", "gcp-service-account", "/etc/secrets"), - strings.TrimPrefix(options[1], SparkDriverSecretKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s%s", GoogleApplicationCredentialsEnvVar, "/etc/secrets/", - ServiceAccountJSONKeyFileName), strings.TrimPrefix(options[2], SparkDriverEnvVarConfigKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s", "hadoop-token", "/etc/secrets"), strings.TrimPrefix(options[3], - SparkDriverSecretKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s%s", HadoopTokenFileLocationEnvVar, "/etc/secrets/", - HadoopDelegationTokenFileName), strings.TrimPrefix(options[4], SparkDriverEnvVarConfigKeyPrefix)) -} - -func TestGetExecutorSecretConfOptions(t *testing.T) { - app := &v1beta2.SparkApplication{ - Spec: v1beta2.SparkApplicationSpec{ - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - Secrets: []v1beta2.SecretInfo{ - { - Name: "db-credentials", - Path: "/etc/secrets", - }, - { - Name: "gcp-service-account", - Path: "/etc/secrets", - Type: v1beta2.GCPServiceAccountSecret, - }, - { - Name: "hadoop-token", - Path: "/etc/secrets", - Type: v1beta2.HadoopDelegationTokenSecret, - }, - }, - }, - }, - }, - } - - options := GetExecutorSecretConfOptions(app) - assert.Equal(t, 5, len(options)) - assert.Equal(t, fmt.Sprintf("%s=%s", "db-credentials", "/etc/secrets"), strings.TrimPrefix(options[0], - SparkExecutorSecretKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s", "gcp-service-account", "/etc/secrets"), - strings.TrimPrefix(options[1], SparkExecutorSecretKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s%s", GoogleApplicationCredentialsEnvVar, "/etc/secrets/", - ServiceAccountJSONKeyFileName), strings.TrimPrefix(options[2], SparkExecutorEnvVarConfigKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s", "hadoop-token", "/etc/secrets"), strings.TrimPrefix(options[3], - SparkExecutorSecretKeyPrefix)) - assert.Equal(t, fmt.Sprintf("%s=%s%s", HadoopTokenFileLocationEnvVar, "/etc/secrets/", - HadoopDelegationTokenFileName), strings.TrimPrefix(options[4], SparkExecutorEnvVarConfigKeyPrefix)) -} diff --git a/pkg/controller/scheduledsparkapplication/controller.go b/pkg/controller/scheduledsparkapplication/controller.go deleted file mode 100644 index 056ba9d562..0000000000 --- a/pkg/controller/scheduledsparkapplication/controller.go +++ /dev/null @@ -1,425 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduledsparkapplication - -import ( - "context" - "fmt" - "reflect" - "sort" - "time" - - "github.com/golang/glog" - "github.com/robfig/cron/v3" - - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/retry" - "k8s.io/client-go/util/workqueue" - "k8s.io/utils/clock" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" - crdscheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - crdlisters "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" -) - -var ( - keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc -) - -type Controller struct { - crdClient crdclientset.Interface - kubeClient kubernetes.Interface - extensionsClient apiextensionsclient.Interface - queue workqueue.RateLimitingInterface - cacheSynced cache.InformerSynced - ssaLister crdlisters.ScheduledSparkApplicationLister - saLister crdlisters.SparkApplicationLister - clock clock.Clock -} - -func NewController( - crdClient crdclientset.Interface, - kubeClient kubernetes.Interface, - extensionsClient apiextensionsclient.Interface, - informerFactory crdinformers.SharedInformerFactory, - clock clock.Clock) *Controller { - crdscheme.AddToScheme(scheme.Scheme) - - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), - "scheduled-spark-application-controller") - - controller := &Controller{ - crdClient: crdClient, - kubeClient: kubeClient, - extensionsClient: extensionsClient, - queue: queue, - clock: clock, - } - - informer := informerFactory.Sparkoperator().V1beta2().ScheduledSparkApplications() - informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.onAdd, - UpdateFunc: controller.onUpdate, - DeleteFunc: controller.onDelete, - }) - controller.cacheSynced = informer.Informer().HasSynced - controller.ssaLister = informer.Lister() - controller.saLister = informerFactory.Sparkoperator().V1beta2().SparkApplications().Lister() - - return controller -} - -func (c *Controller) Start(workers int, stopCh <-chan struct{}) error { - glog.Info("Starting the ScheduledSparkApplication controller") - - if !cache.WaitForCacheSync(stopCh, c.cacheSynced) { - return fmt.Errorf("timed out waiting for cache to sync") - } - - glog.Info("Starting the workers of the ScheduledSparkApplication controller") - for i := 0; i < workers; i++ { - // runWorker will loop until "something bad" happens. Until will then rekick - // the worker after one second. - go wait.Until(c.runWorker, time.Second, stopCh) - } - - return nil -} - -func (c *Controller) Stop() { - glog.Info("Stopping the ScheduledSparkApplication controller") - c.queue.ShutDown() -} - -func (c *Controller) runWorker() { - defer utilruntime.HandleCrash() - for c.processNextItem() { - } -} - -func (c *Controller) processNextItem() bool { - key, quit := c.queue.Get() - if quit { - return false - } - defer c.queue.Done(key) - - err := c.syncScheduledSparkApplication(key.(string)) - if err == nil { - // Successfully processed the key or the key was not found so tell the queue to stop tracking - // history for your key. This will reset things like failure counts for per-item rate limiting. - c.queue.Forget(key) - return true - } - - // There was a failure so be sure to report it. This method allows for pluggable error handling - // which can be used for things like cluster-monitoring - utilruntime.HandleError(fmt.Errorf("failed to sync ScheduledSparkApplication %q: %v", key, err)) - // Since we failed, we should requeue the item to work on later. This method will add a backoff - // to avoid hot-looping on particular items (they're probably still not going to work right away) - // and overall controller protection (everything I've done is broken, this controller needs to - // calm down or it can starve other useful work) cases. - c.queue.AddRateLimited(key) - - return true -} - -func (c *Controller) syncScheduledSparkApplication(key string) error { - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - return err - } - app, err := c.ssaLister.ScheduledSparkApplications(namespace).Get(name) - if err != nil { - return err - } - - if app.Spec.Suspend != nil && *app.Spec.Suspend { - return nil - } - - glog.V(2).Infof("Syncing ScheduledSparkApplication %s/%s", app.Namespace, app.Name) - status := app.Status.DeepCopy() - schedule, err := cron.ParseStandard(app.Spec.Schedule) - if err != nil { - glog.Errorf("failed to parse schedule %s of ScheduledSparkApplication %s/%s: %v", app.Spec.Schedule, app.Namespace, app.Name, err) - status.ScheduleState = v1beta2.FailedValidationState - status.Reason = err.Error() - } else { - status.ScheduleState = v1beta2.ScheduledState - now := c.clock.Now() - nextRunTime := status.NextRun.Time - // if we updated the schedule for an earlier execution - those changes need to be reflected - updatedNextRunTime := schedule.Next(now) - if nextRunTime.IsZero() || updatedNextRunTime.Before(nextRunTime) { - // The first run of the application. - nextRunTime = updatedNextRunTime - status.NextRun = metav1.NewTime(nextRunTime) - } - if nextRunTime.Before(now) { - // Check if the condition for starting the next run is satisfied. - ok, err := c.shouldStartNextRun(app) - if err != nil { - return err - } - if ok { - glog.Infof("Next run of ScheduledSparkApplication %s/%s is due, creating a new SparkApplication instance", app.Namespace, app.Name) - name, err := c.startNextRun(app, now) - if err != nil { - return err - } - status.LastRun = metav1.NewTime(now) - status.NextRun = metav1.NewTime(schedule.Next(status.LastRun.Time)) - status.LastRunName = name - } - } - - if err = c.checkAndUpdatePastRuns(app, status); err != nil { - return err - } - } - - return c.updateScheduledSparkApplicationStatus(app, status) -} - -func (c *Controller) onAdd(obj interface{}) { - c.enqueue(obj) -} - -func (c *Controller) onUpdate(oldObj, newObj interface{}) { - c.enqueue(newObj) -} - -func (c *Controller) onDelete(obj interface{}) { - c.dequeue(obj) -} - -func (c *Controller) enqueue(obj interface{}) { - key, err := keyFunc(obj) - if err != nil { - glog.Errorf("failed to get key for %v: %v", obj, err) - return - } - - c.queue.AddRateLimited(key) -} - -func (c *Controller) dequeue(obj interface{}) { - key, err := keyFunc(obj) - if err != nil { - glog.Errorf("failed to get key for %v: %v", obj, err) - return - } - - c.queue.Forget(key) - c.queue.Done(key) -} - -func (c *Controller) createSparkApplication( - scheduledApp *v1beta2.ScheduledSparkApplication, t time.Time) (string, error) { - app := &v1beta2.SparkApplication{} - app.Spec = scheduledApp.Spec.Template - app.Name = fmt.Sprintf("%s-%d", scheduledApp.Name, t.UnixNano()) - app.OwnerReferences = append(app.OwnerReferences, metav1.OwnerReference{ - APIVersion: v1beta2.SchemeGroupVersion.String(), - Kind: reflect.TypeOf(v1beta2.ScheduledSparkApplication{}).Name(), - Name: scheduledApp.Name, - UID: scheduledApp.UID, - }) - app.ObjectMeta.Namespace = scheduledApp.Namespace - app.ObjectMeta.Labels = make(map[string]string) - for key, value := range scheduledApp.Labels { - app.ObjectMeta.Labels[key] = value - } - app.ObjectMeta.Labels[config.ScheduledSparkAppNameLabel] = scheduledApp.Name - _, err := c.crdClient.SparkoperatorV1beta2().SparkApplications(scheduledApp.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - if err != nil { - return "", err - } - return app.Name, nil -} - -func (c *Controller) shouldStartNextRun(app *v1beta2.ScheduledSparkApplication) (bool, error) { - sortedApps, err := c.listSparkApplications(app) - if err != nil { - return false, err - } - if len(sortedApps) == 0 { - return true, nil - } - - // The last run (most recently started) is the first one in the sorted slice. - lastRun := sortedApps[0] - switch app.Spec.ConcurrencyPolicy { - case v1beta2.ConcurrencyAllow: - return true, nil - case v1beta2.ConcurrencyForbid: - return c.hasLastRunFinished(lastRun), nil - case v1beta2.ConcurrencyReplace: - if err := c.killLastRunIfNotFinished(lastRun); err != nil { - return false, err - } - return true, nil - } - return true, nil -} - -func (c *Controller) startNextRun(app *v1beta2.ScheduledSparkApplication, now time.Time) (string, error) { - name, err := c.createSparkApplication(app, now) - if err != nil { - glog.Errorf("failed to create a SparkApplication instance for ScheduledSparkApplication %s/%s: %v", app.Namespace, app.Name, err) - return "", err - } - return name, nil -} - -func (c *Controller) hasLastRunFinished(app *v1beta2.SparkApplication) bool { - return app.Status.AppState.State == v1beta2.CompletedState || - app.Status.AppState.State == v1beta2.FailedState -} - -func (c *Controller) killLastRunIfNotFinished(app *v1beta2.SparkApplication) error { - finished := c.hasLastRunFinished(app) - if finished { - return nil - } - - // Delete the SparkApplication object of the last run. - if err := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Delete( - context.TODO(), - app.Name, - metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}, - ); err != nil { - return err - } - - return nil -} - -func (c *Controller) checkAndUpdatePastRuns( - app *v1beta2.ScheduledSparkApplication, - status *v1beta2.ScheduledSparkApplicationStatus) error { - sortedApps, err := c.listSparkApplications(app) - if err != nil { - return err - } - - var completedRuns []string - var failedRuns []string - for _, a := range sortedApps { - if a.Status.AppState.State == v1beta2.CompletedState { - completedRuns = append(completedRuns, a.Name) - } else if a.Status.AppState.State == v1beta2.FailedState { - failedRuns = append(failedRuns, a.Name) - } - } - - var toDelete []string - status.PastSuccessfulRunNames, toDelete = bookkeepPastRuns(completedRuns, app.Spec.SuccessfulRunHistoryLimit) - for _, name := range toDelete { - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Delete(context.TODO(), name, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - } - status.PastFailedRunNames, toDelete = bookkeepPastRuns(failedRuns, app.Spec.FailedRunHistoryLimit) - for _, name := range toDelete { - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Delete(context.TODO(), name, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - } - - return nil -} - -func (c *Controller) updateScheduledSparkApplicationStatus( - app *v1beta2.ScheduledSparkApplication, - newStatus *v1beta2.ScheduledSparkApplicationStatus) error { - // If the status has not changed, do not perform an update. - if isStatusEqual(newStatus, &app.Status) { - return nil - } - - toUpdate := app.DeepCopy() - return retry.RetryOnConflict(retry.DefaultRetry, func() error { - toUpdate.Status = *newStatus - _, updateErr := c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(toUpdate.Namespace).UpdateStatus( - context.TODO(), - toUpdate, - metav1.UpdateOptions{}, - ) - if updateErr == nil { - return nil - } - - result, err := c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(toUpdate.Namespace).Get( - context.TODO(), - toUpdate.Name, - metav1.GetOptions{}, - ) - if err != nil { - return err - } - toUpdate = result - - return updateErr - }) -} - -func (c *Controller) listSparkApplications(app *v1beta2.ScheduledSparkApplication) (sparkApps, error) { - set := labels.Set{config.ScheduledSparkAppNameLabel: app.Name} - apps, err := c.saLister.SparkApplications(app.Namespace).List(set.AsSelector()) - if err != nil { - return nil, fmt.Errorf("failed to list SparkApplications: %v", err) - } - sortedApps := sparkApps(apps) - sort.Sort(sortedApps) - return sortedApps, nil -} - -func bookkeepPastRuns(names []string, runLimit *int32) (toKeep []string, toDelete []string) { - limit := 1 - if runLimit != nil { - limit = int(*runLimit) - } - - if len(names) <= limit { - return names, nil - } - toKeep = names[:limit] - toDelete = names[limit:] - return -} - -func isStatusEqual(newStatus, currentStatus *v1beta2.ScheduledSparkApplicationStatus) bool { - return newStatus.ScheduleState == currentStatus.ScheduleState && - newStatus.LastRun == currentStatus.LastRun && - newStatus.NextRun == currentStatus.NextRun && - newStatus.LastRunName == currentStatus.LastRunName && - reflect.DeepEqual(newStatus.PastSuccessfulRunNames, currentStatus.PastSuccessfulRunNames) && - reflect.DeepEqual(newStatus.PastFailedRunNames, currentStatus.PastFailedRunNames) && - newStatus.Reason == currentStatus.Reason -} - -func int64ptr(n int64) *int64 { - return &n -} diff --git a/pkg/controller/scheduledsparkapplication/controller_test.go b/pkg/controller/scheduledsparkapplication/controller_test.go deleted file mode 100644 index 9ef610113d..0000000000 --- a/pkg/controller/scheduledsparkapplication/controller_test.go +++ /dev/null @@ -1,552 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheduledsparkapplication - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - apiextensionsfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - kubeclientfake "k8s.io/client-go/kubernetes/fake" - kubetesting "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" - clocktesting "k8s.io/utils/clock/testing" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientfake "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/fake" - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - "github.com/kubeflow/spark-operator/pkg/config" -) - -func TestSyncScheduledSparkApplication_Allow(t *testing.T) { - app := &v1beta2.ScheduledSparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "test-app-allow", - }, - Spec: v1beta2.ScheduledSparkApplicationSpec{ - Schedule: "@every 10m", - ConcurrencyPolicy: v1beta2.ConcurrencyAllow, - }, - } - c, clk := newFakeController() - c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - - key, _ := cache.MetaNamespaceKeyFunc(app) - options := metav1.GetOptions{} - - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) - // The first run should not have been triggered. - assert.True(t, app.Status.LastRunName == "") - - // Advance the clock by 10 minutes. - clk.Step(10 * time.Minute) - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - firstRunName := app.Status.LastRunName - // The first run should have been triggered. - assert.True(t, firstRunName != "") - assert.False(t, app.Status.LastRun.IsZero()) - assert.True(t, app.Status.NextRun.Time.After(app.Status.LastRun.Time)) - // The first run exists. - run, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.NotNil(t, run) - - clk.Step(5 * time.Second) - // The second sync should not start any new run. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - // Next run is not due, so LastRunName should stay the same. - assert.Equal(t, firstRunName, app.Status.LastRunName) - - // Simulate completion of the first run. - run.Status.AppState.State = v1beta2.CompletedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(context.TODO(), run, metav1.UpdateOptions{}) - // This sync should not start any new run, but update Status.PastSuccessfulRunNames. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) - assert.Equal(t, firstRunName, app.Status.PastSuccessfulRunNames[0]) - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.NotNil(t, run) - - // This sync should not start any new run, nor update Status.PastSuccessfulRunNames. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) - assert.Equal(t, firstRunName, app.Status.PastSuccessfulRunNames[0]) - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.NotNil(t, run) - - // Advance the clock to trigger the second run. - clk.SetTime(app.Status.NextRun.Time.Add(5 * time.Second)) - // This sync should start the second run. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) - // The second run should have a different name. - secondRunName := app.Status.LastRunName - assert.NotEqual(t, firstRunName, secondRunName) - assert.True(t, app.Status.NextRun.Time.After(app.Status.LastRun.Time)) - // The second run exists. - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), secondRunName, options) - assert.NotNil(t, run) - - // Simulate completion of the second run. - run.Status.AppState.State = v1beta2.CompletedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(context.TODO(), run, metav1.UpdateOptions{}) - // This sync should not start any new run, but update Status.PastSuccessfulRunNames. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) - // The first run should have been deleted due to the completion of the second run. - firstRun, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.Nil(t, firstRun) - - // This sync should not start any new run, nor update Status.PastSuccessfulRunNames. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), secondRunName, options) - assert.NotNil(t, run) - - // Test the case where we update the schedule to be more frequent - app.Spec.Schedule = "@every 2m" - recentRunName := app.Status.LastRunName - recentRunTime := app.Status.LastRun.Time - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Update(context.TODO(), app, metav1.UpdateOptions{}) - // sync our update - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - // Advance the clock by 3 minutes. - clk.Step(3 * time.Minute) - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - // A run should have been triggered - assert.NotEqual(t, recentRunName, app.Status.LastRunName) - assert.True(t, recentRunTime.Before(app.Status.LastRun.Time)) - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Status.LastRunName, options) - assert.NotNil(t, run) - // Simulate completion of the last run - run.Status.AppState.State = v1beta2.CompletedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(context.TODO(), run, metav1.UpdateOptions{}) - // This sync should not start any new run, but update Status.PastSuccessfulRunNames. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } -} - -func TestSyncScheduledSparkApplication_Forbid(t *testing.T) { - app := &v1beta2.ScheduledSparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "test-app-forbid", - }, - Spec: v1beta2.ScheduledSparkApplicationSpec{ - Schedule: "@every 1m", - ConcurrencyPolicy: v1beta2.ConcurrencyForbid, - }, - } - c, clk := newFakeController() - c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - - key, _ := cache.MetaNamespaceKeyFunc(app) - options := metav1.GetOptions{} - - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) - // The first run should not have been triggered. - assert.True(t, app.Status.LastRunName == "") - - // Advance the clock by 1 minute. - clk.Step(1 * time.Minute) - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) - firstRunName := app.Status.LastRunName - // The first run should have been triggered. - assert.True(t, firstRunName != "") - assert.False(t, app.Status.LastRun.IsZero()) - assert.True(t, app.Status.NextRun.Time.After(app.Status.LastRun.Time)) - // The first run exists. - run, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.NotNil(t, run) - - clk.SetTime(app.Status.NextRun.Time.Add(5 * time.Second)) - // This sync should not start the next run because the first run has not completed yet. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, firstRunName, app.Status.LastRunName) - - // Simulate completion of the first run. - run.Status.AppState.State = v1beta2.CompletedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(context.TODO(), run, metav1.UpdateOptions{}) - // This sync should start the next run because the first run has completed. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - secondRunName := app.Status.LastRunName - assert.NotEqual(t, firstRunName, secondRunName) - assert.Equal(t, 1, len(app.Status.PastSuccessfulRunNames)) - assert.Equal(t, firstRunName, app.Status.PastSuccessfulRunNames[0]) - // The second run exists. - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), secondRunName, options) - assert.NotNil(t, run) -} - -func TestSyncScheduledSparkApplication_Replace(t *testing.T) { - // TODO: figure out why the test fails and remove this. - t.Skip() - - app := &v1beta2.ScheduledSparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "test-app-replace", - }, - Spec: v1beta2.ScheduledSparkApplicationSpec{ - Schedule: "@every 1m", - ConcurrencyPolicy: v1beta2.ConcurrencyReplace, - }, - } - c, clk := newFakeController() - c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - key, _ := cache.MetaNamespaceKeyFunc(app) - - options := metav1.GetOptions{} - - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) - // The first run should not have been triggered. - assert.True(t, app.Status.LastRunName == "") - - // Advance the clock by 1 minute. - clk.Step(1 * time.Minute) - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - assert.Equal(t, v1beta2.ScheduledState, app.Status.ScheduleState) - firstRunName := app.Status.LastRunName - // The first run should have been triggered. - assert.True(t, firstRunName != "") - assert.False(t, app.Status.LastRun.IsZero()) - assert.True(t, app.Status.NextRun.Time.After(app.Status.LastRun.Time)) - // The first run exists. - run, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.NotNil(t, run) - - clk.SetTime(app.Status.NextRun.Time.Add(5 * time.Second)) - // This sync should replace the first run with a new run. - if err := c.syncScheduledSparkApplication(key); err != nil { - t.Fatal(err) - } - app, _ = c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Get(context.TODO(), app.Name, options) - secondRunName := app.Status.LastRunName - assert.NotEqual(t, firstRunName, secondRunName) - // The first run should have been deleted. - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), firstRunName, options) - assert.Nil(t, run) - // The second run exists. - run, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), secondRunName, options) - assert.NotNil(t, run) -} - -func TestShouldStartNextRun(t *testing.T) { - app := &v1beta2.ScheduledSparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "test-app", - }, - Spec: v1beta2.ScheduledSparkApplicationSpec{ - Schedule: "@every 1m", - }, - Status: v1beta2.ScheduledSparkApplicationStatus{ - LastRunName: "run1", - }, - } - c, _ := newFakeController() - c.crdClient.SparkoperatorV1beta2().ScheduledSparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - - run1 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: app.Namespace, - Name: "run1", - Labels: map[string]string{config.ScheduledSparkAppNameLabel: app.Name}, - }, - } - c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Create(context.TODO(), run1, metav1.CreateOptions{}) - - // ConcurrencyAllow with a running run. - run1.Status.AppState.State = v1beta2.RunningState - c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Update(context.TODO(), run1, metav1.UpdateOptions{}) - app.Spec.ConcurrencyPolicy = v1beta2.ConcurrencyAllow - ok, _ := c.shouldStartNextRun(app) - assert.True(t, ok) - - // ConcurrencyForbid with a running run. - app.Spec.ConcurrencyPolicy = v1beta2.ConcurrencyForbid - ok, _ = c.shouldStartNextRun(app) - assert.False(t, ok) - // ConcurrencyForbid with a completed run. - run1.Status.AppState.State = v1beta2.CompletedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Update(context.TODO(), run1, metav1.UpdateOptions{}) - ok, _ = c.shouldStartNextRun(app) - assert.True(t, ok) - - // ConcurrencyReplace with a completed run. - app.Spec.ConcurrencyPolicy = v1beta2.ConcurrencyReplace - ok, _ = c.shouldStartNextRun(app) - assert.True(t, ok) - // ConcurrencyReplace with a running run. - run1.Status.AppState.State = v1beta2.RunningState - c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Update(context.TODO(), run1, metav1.UpdateOptions{}) - ok, _ = c.shouldStartNextRun(app) - assert.True(t, ok) - // The previous running run should have been deleted. - existing, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(run1.Namespace).Get( - context.TODO(), - run1.Name, - metav1.GetOptions{}, - ) - assert.Nil(t, existing) -} - -func TestCheckAndUpdatePastRuns(t *testing.T) { - var two int32 = 2 - app := &v1beta2.ScheduledSparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "test-app", - }, - Spec: v1beta2.ScheduledSparkApplicationSpec{ - Schedule: "@every 1m", - SuccessfulRunHistoryLimit: &two, - FailedRunHistoryLimit: &two, - }, - } - c, _ := newFakeController() - - run1 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: app.Namespace, - Name: "run1", - Labels: map[string]string{config.ScheduledSparkAppNameLabel: app.Name}, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.CompletedState, - }, - }, - } - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), run1, metav1.CreateOptions{}) - - // The first completed run should have been recorded. - status := app.Status.DeepCopy() - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 1, len(status.PastSuccessfulRunNames)) - assert.Equal(t, run1.Name, status.PastSuccessfulRunNames[0]) - - // The second run that is running should not be recorded. - run2 := run1.DeepCopy() - run2.CreationTimestamp.Time = run1.CreationTimestamp.Add(10 * time.Second) - run2.Name = "run2" - run2.Status.AppState.State = v1beta2.RunningState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), run2, metav1.CreateOptions{}) - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 1, len(status.PastSuccessfulRunNames)) - assert.Equal(t, run1.Name, status.PastSuccessfulRunNames[0]) - // The second completed run should have been recorded. - run2.Status.AppState.State = v1beta2.CompletedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Update(context.TODO(), run2, metav1.UpdateOptions{}) - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 2, len(status.PastSuccessfulRunNames)) - assert.Equal(t, run2.Name, status.PastSuccessfulRunNames[0]) - assert.Equal(t, run1.Name, status.PastSuccessfulRunNames[1]) - // The second completed run has already been recorded, so should not be recorded again. - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 2, len(status.PastSuccessfulRunNames)) - assert.Equal(t, run2.Name, status.PastSuccessfulRunNames[0]) - assert.Equal(t, run1.Name, status.PastSuccessfulRunNames[1]) - // SparkApplications of both of the first two completed runs should exist. - existing, _ := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run2.Name, - metav1.GetOptions{}, - ) - assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run1.Name, - metav1.GetOptions{}, - ) - assert.NotNil(t, existing) - - // The third completed run should have been recorded. - run3 := run1.DeepCopy() - run3.CreationTimestamp.Time = run2.CreationTimestamp.Add(10 * time.Second) - run3.Name = "run3" - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), run3, metav1.CreateOptions{}) - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 2, len(status.PastSuccessfulRunNames)) - assert.Equal(t, run3.Name, status.PastSuccessfulRunNames[0]) - assert.Equal(t, run2.Name, status.PastSuccessfulRunNames[1]) - // SparkApplications of the last two completed runs should still exist, - // but the one of the first completed run should have been deleted. - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run3.Name, - metav1.GetOptions{}, - ) - assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run2.Name, - metav1.GetOptions{}, - ) - assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run1.Name, - metav1.GetOptions{}, - ) - assert.Nil(t, existing) - - // The first failed run should have been recorded. - run4 := run1.DeepCopy() - run4.CreationTimestamp.Time = run3.CreationTimestamp.Add(10 * time.Second) - run4.Name = "run4" - run4.Status.AppState.State = v1beta2.FailedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), run4, metav1.CreateOptions{}) - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 1, len(status.PastFailedRunNames)) - assert.Equal(t, run4.Name, status.PastFailedRunNames[0]) - - // The second failed run should have been recorded. - run5 := run1.DeepCopy() - run5.CreationTimestamp.Time = run4.CreationTimestamp.Add(10 * time.Second) - run5.Name = "run5" - run5.Status.AppState.State = v1beta2.FailedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), run5, metav1.CreateOptions{}) - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 2, len(status.PastFailedRunNames)) - assert.Equal(t, run5.Name, status.PastFailedRunNames[0]) - assert.Equal(t, run4.Name, status.PastFailedRunNames[1]) - - // The third failed run should have been recorded. - run6 := run1.DeepCopy() - run6.CreationTimestamp.Time = run5.CreationTimestamp.Add(10 * time.Second) - run6.Name = "run6" - run6.Status.AppState.State = v1beta2.FailedState - c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), run6, metav1.CreateOptions{}) - c.checkAndUpdatePastRuns(app, status) - assert.Equal(t, 2, len(status.PastFailedRunNames)) - assert.Equal(t, run6.Name, status.PastFailedRunNames[0]) - assert.Equal(t, run5.Name, status.PastFailedRunNames[1]) - // SparkApplications of the last two failed runs should still exist, - // but the one of the first failed run should have been deleted. - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run6.Name, - metav1.GetOptions{}, - ) - assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run5.Name, - metav1.GetOptions{}, - ) - assert.NotNil(t, existing) - existing, _ = c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get( - context.TODO(), - run4.Name, - metav1.GetOptions{}, - ) - assert.Nil(t, existing) -} - -func newFakeController() (*Controller, *clocktesting.FakeClock) { - crdClient := crdclientfake.NewSimpleClientset() - kubeClient := kubeclientfake.NewSimpleClientset() - apiExtensionsClient := apiextensionsfake.NewSimpleClientset() - informerFactory := crdinformers.NewSharedInformerFactory(crdClient, 1*time.Second) - clk := clocktesting.NewFakeClock(time.Now()) - controller := NewController(crdClient, kubeClient, apiExtensionsClient, informerFactory, clk) - ssaInformer := informerFactory.Sparkoperator().V1beta2().ScheduledSparkApplications().Informer() - saInformer := informerFactory.Sparkoperator().V1beta2().SparkApplications().Informer() - crdClient.PrependReactor("create", "scheduledsparkapplications", - func(action kubetesting.Action) (bool, runtime.Object, error) { - obj := action.(kubetesting.CreateAction).GetObject() - ssaInformer.GetStore().Add(obj) - return false, obj, nil - }) - crdClient.PrependReactor("update", "scheduledsparkapplications", - func(action kubetesting.Action) (bool, runtime.Object, error) { - obj := action.(kubetesting.UpdateAction).GetObject() - ssaInformer.GetStore().Update(obj) - return false, obj, nil - }) - crdClient.PrependReactor("create", "sparkapplications", - func(action kubetesting.Action) (bool, runtime.Object, error) { - obj := action.(kubetesting.CreateAction).GetObject() - saInformer.GetStore().Add(obj) - return false, obj, nil - }) - crdClient.PrependReactor("update", "sparkapplications", - func(action kubetesting.Action) (bool, runtime.Object, error) { - obj := action.(kubetesting.UpdateAction).GetObject() - saInformer.GetStore().Update(obj) - return false, obj, nil - }) - return controller, clk -} diff --git a/pkg/controller/sparkapplication/controller.go b/pkg/controller/sparkapplication/controller.go deleted file mode 100644 index 3e9b373b89..0000000000 --- a/pkg/controller/sparkapplication/controller.go +++ /dev/null @@ -1,1130 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "context" - "fmt" - "os/exec" - "time" - - "github.com/golang/glog" - "github.com/google/uuid" - "golang.org/x/time/rate" - apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - v1 "k8s.io/client-go/listers/core/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/retry" - "k8s.io/client-go/util/workqueue" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/batchscheduler" - schedulerinterface "github.com/kubeflow/spark-operator/pkg/batchscheduler/interface" - crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" - crdscheme "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/scheme" - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - crdlisters "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/util" -) - -const ( - sparkExecutorIDLabel = "spark-exec-id" - podAlreadyExistsErrorCode = "code=409" - queueTokenRefillRate = 50 - queueTokenBucketSize = 500 -) - -var ( - keyFunc = cache.DeletionHandlingMetaNamespaceKeyFunc - execCommand = exec.Command -) - -// Controller manages instances of SparkApplication. -type Controller struct { - crdClient crdclientset.Interface - kubeClient clientset.Interface - queue workqueue.RateLimitingInterface - cacheSynced cache.InformerSynced - recorder record.EventRecorder - metrics *sparkAppMetrics - applicationLister crdlisters.SparkApplicationLister - podLister v1.PodLister - ingressURLFormat string - ingressClassName string - batchSchedulerMgr *batchscheduler.SchedulerManager - enableUIService bool -} - -// NewController creates a new Controller. -func NewController( - crdClient crdclientset.Interface, - kubeClient clientset.Interface, - crdInformerFactory crdinformers.SharedInformerFactory, - podInformerFactory informers.SharedInformerFactory, - metricsConfig *util.MetricConfig, - namespace string, - ingressURLFormat string, - ingressClassName string, - batchSchedulerMgr *batchscheduler.SchedulerManager, - enableUIService bool) *Controller { - crdscheme.AddToScheme(scheme.Scheme) - - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.V(2).Infof) - eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{ - Interface: kubeClient.CoreV1().Events(namespace), - }) - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, apiv1.EventSource{Component: "spark-operator"}) - - return newSparkApplicationController(crdClient, kubeClient, crdInformerFactory, podInformerFactory, recorder, metricsConfig, ingressURLFormat, ingressClassName, batchSchedulerMgr, enableUIService) -} - -func newSparkApplicationController( - crdClient crdclientset.Interface, - kubeClient clientset.Interface, - crdInformerFactory crdinformers.SharedInformerFactory, - podInformerFactory informers.SharedInformerFactory, - eventRecorder record.EventRecorder, - metricsConfig *util.MetricConfig, - ingressURLFormat string, - ingressClassName string, - batchSchedulerMgr *batchscheduler.SchedulerManager, - enableUIService bool) *Controller { - queue := workqueue.NewNamedRateLimitingQueue(&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(queueTokenRefillRate), queueTokenBucketSize)}, - "spark-application-controller") - - controller := &Controller{ - crdClient: crdClient, - kubeClient: kubeClient, - recorder: eventRecorder, - queue: queue, - ingressURLFormat: ingressURLFormat, - ingressClassName: ingressClassName, - batchSchedulerMgr: batchSchedulerMgr, - enableUIService: enableUIService, - } - - if metricsConfig != nil { - controller.metrics = newSparkAppMetrics(metricsConfig) - controller.metrics.registerMetrics() - } - - crdInformer := crdInformerFactory.Sparkoperator().V1beta2().SparkApplications() - crdInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.onAdd, - UpdateFunc: controller.onUpdate, - DeleteFunc: controller.onDelete, - }) - controller.applicationLister = crdInformer.Lister() - - podsInformer := podInformerFactory.Core().V1().Pods() - sparkPodEventHandler := newSparkPodEventHandler(controller.queue.AddRateLimited, controller.applicationLister) - podsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: sparkPodEventHandler.onPodAdded, - UpdateFunc: sparkPodEventHandler.onPodUpdated, - DeleteFunc: sparkPodEventHandler.onPodDeleted, - }) - controller.podLister = podsInformer.Lister() - - controller.cacheSynced = func() bool { - return crdInformer.Informer().HasSynced() && podsInformer.Informer().HasSynced() - } - - return controller -} - -// Start starts the Controller by registering a watcher for SparkApplication objects. -func (c *Controller) Start(workers int, stopCh <-chan struct{}) error { - // Wait for all involved caches to be synced, before processing items from the queue is started. - if !cache.WaitForCacheSync(stopCh, c.cacheSynced) { - return fmt.Errorf("timed out waiting for cache to sync") - } - - glog.Info("Starting the workers of the SparkApplication controller") - for i := 0; i < workers; i++ { - // runWorker will loop until "something bad" happens. Until will then rekick - // the worker after one second. - go wait.Until(c.runWorker, time.Second, stopCh) - } - - return nil -} - -// Stop stops the controller. -func (c *Controller) Stop() { - glog.Info("Stopping the SparkApplication controller") - c.queue.ShutDown() -} - -// Callback function called when a new SparkApplication object gets created. -func (c *Controller) onAdd(obj interface{}) { - app := obj.(*v1beta2.SparkApplication) - glog.Infof("SparkApplication %s/%s was added, enqueuing it for submission", app.Namespace, app.Name) - c.enqueue(app) -} - -func (c *Controller) onUpdate(oldObj, newObj interface{}) { - oldApp := oldObj.(*v1beta2.SparkApplication) - newApp := newObj.(*v1beta2.SparkApplication) - - // The informer will call this function on non-updated resources during resync, avoid - // enqueuing unchanged applications, unless it has expired or is subject to retry. - if oldApp.ResourceVersion == newApp.ResourceVersion && !c.hasApplicationExpired(newApp) && !shouldRetry(newApp) { - return - } - - // The spec has changed. This is currently best effort as we can potentially miss updates - // and end up in an inconsistent state. - if !equality.Semantic.DeepEqual(oldApp.Spec, newApp.Spec) { - // Force-set the application status to Invalidating which handles clean-up and application re-run. - if _, err := c.updateApplicationStatusWithRetries(newApp, func(status *v1beta2.SparkApplicationStatus) { - status.AppState.State = v1beta2.InvalidatingState - }); err != nil { - c.recorder.Eventf( - newApp, - apiv1.EventTypeWarning, - "SparkApplicationSpecUpdateFailed", - "failed to process spec update for SparkApplication %s: %v", - newApp.Name, - err) - return - } - - c.recorder.Eventf( - newApp, - apiv1.EventTypeNormal, - "SparkApplicationSpecUpdateProcessed", - "Successfully processed spec update for SparkApplication %s", - newApp.Name) - } - - glog.V(2).Infof("SparkApplication %s/%s was updated, enqueuing it", newApp.Namespace, newApp.Name) - c.enqueue(newApp) -} - -func (c *Controller) onDelete(obj interface{}) { - var app *v1beta2.SparkApplication - switch obj.(type) { - case *v1beta2.SparkApplication: - app = obj.(*v1beta2.SparkApplication) - case cache.DeletedFinalStateUnknown: - deletedObj := obj.(cache.DeletedFinalStateUnknown).Obj - app = deletedObj.(*v1beta2.SparkApplication) - } - - if app != nil { - c.handleSparkApplicationDeletion(app) - c.recorder.Eventf( - app, - apiv1.EventTypeNormal, - "SparkApplicationDeleted", - "SparkApplication %s was deleted", - app.Name) - } -} - -// runWorker runs a single controller worker. -func (c *Controller) runWorker() { - defer utilruntime.HandleCrash() - for c.processNextItem() { - } -} - -func (c *Controller) processNextItem() bool { - key, quit := c.queue.Get() - - if quit { - return false - } - defer c.queue.Done(key) - - glog.V(2).Infof("Starting processing key: %q", key) - defer glog.V(2).Infof("Ending processing key: %q", key) - err := c.syncSparkApplication(key.(string)) - if err == nil { - // Successfully processed the key or the key was not found so tell the queue to stop tracking - // history for your key. This will reset things like failure counts for per-item rate limiting. - c.queue.Forget(key) - return true - } - - // There was a failure so be sure to report it. This method allows for pluggable error handling - // which can be used for things like cluster-monitoring - utilruntime.HandleError(fmt.Errorf("failed to sync SparkApplication %q: %v", key, err)) - return true -} - -func (c *Controller) getExecutorPods(app *v1beta2.SparkApplication) ([]*apiv1.Pod, error) { - matchLabels := getResourceLabels(app) - matchLabels[config.SparkRoleLabel] = config.SparkExecutorRole - // Fetch all the executor pods for the current run of the application. - selector := labels.SelectorFromSet(labels.Set(matchLabels)) - pods, err := c.podLister.Pods(app.Namespace).List(selector) - if err != nil { - return nil, fmt.Errorf("failed to get pods for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - } - return pods, nil -} - -func (c *Controller) getDriverPod(app *v1beta2.SparkApplication) (*apiv1.Pod, error) { - pod, err := c.podLister.Pods(app.Namespace).Get(app.Status.DriverInfo.PodName) - if err == nil { - return pod, nil - } - if !errors.IsNotFound(err) { - return nil, fmt.Errorf("failed to get driver pod %s: %v", app.Status.DriverInfo.PodName, err) - } - - // The driver pod was not found in the informer cache, try getting it directly from the API server. - pod, err = c.kubeClient.CoreV1().Pods(app.Namespace).Get(context.TODO(), app.Status.DriverInfo.PodName, metav1.GetOptions{}) - if err == nil { - return pod, nil - } - if !errors.IsNotFound(err) { - return nil, fmt.Errorf("failed to get driver pod %s: %v", app.Status.DriverInfo.PodName, err) - } - // Driver pod was not found on the API server either. - return nil, nil -} - -// getAndUpdateDriverState finds the driver pod of the application -// and updates the driver state based on the current phase of the pod. -func (c *Controller) getAndUpdateDriverState(app *v1beta2.SparkApplication) error { - // Either the driver pod doesn't exist yet or its name has not been updated. - if app.Status.DriverInfo.PodName == "" { - return fmt.Errorf("empty driver pod name with application state %s", app.Status.AppState.State) - } - - driverPod, err := c.getDriverPod(app) - if err != nil { - return err - } - - if driverPod == nil { - app.Status.AppState.ErrorMessage = "driver pod not found" - app.Status.AppState.State = v1beta2.FailingState - app.Status.TerminationTime = metav1.Now() - return nil - } - - app.Status.SparkApplicationID = getSparkApplicationID(driverPod) - driverState := podStatusToDriverState(driverPod.Status) - - if hasDriverTerminated(driverState) { - if app.Status.TerminationTime.IsZero() { - app.Status.TerminationTime = metav1.Now() - } - if driverState == v1beta2.DriverFailedState { - state := getDriverContainerTerminatedState(driverPod.Status) - if state != nil { - if state.ExitCode != 0 { - app.Status.AppState.ErrorMessage = fmt.Sprintf("driver container failed with ExitCode: %d, Reason: %s", state.ExitCode, state.Reason) - } - } else { - app.Status.AppState.ErrorMessage = "driver container status missing" - } - } - } - - newState := driverStateToApplicationState(driverState) - // Only record a driver event if the application state (derived from the driver pod phase) has changed. - if newState != app.Status.AppState.State { - c.recordDriverEvent(app, driverState, driverPod.Name) - app.Status.AppState.State = newState - } - - return nil -} - -// getAndUpdateExecutorState lists the executor pods of the application -// and updates the executor state based on the current phase of the pods. -func (c *Controller) getAndUpdateExecutorState(app *v1beta2.SparkApplication) error { - pods, err := c.getExecutorPods(app) - if err != nil { - return err - } - - executorStateMap := make(map[string]v1beta2.ExecutorState) - var executorApplicationID string - for _, pod := range pods { - if util.IsExecutorPod(pod) { - newState := podPhaseToExecutorState(pod.Status.Phase) - oldState, exists := app.Status.ExecutorState[pod.Name] - // Only record an executor event if the executor state is new or it has changed. - if !exists || newState != oldState { - if newState == v1beta2.ExecutorFailedState { - execContainerState := getExecutorContainerTerminatedState(pod.Status) - if execContainerState != nil { - c.recordExecutorEvent(app, newState, pod.Name, execContainerState.ExitCode, execContainerState.Reason) - } else { - // If we can't find the container state, - // we need to set the exitCode and the Reason to unambiguous values. - c.recordExecutorEvent(app, newState, pod.Name, -1, "Unknown (Container not Found)") - } - } else { - c.recordExecutorEvent(app, newState, pod.Name) - } - } - executorStateMap[pod.Name] = newState - - if executorApplicationID == "" { - executorApplicationID = getSparkApplicationID(pod) - } - } - } - - // ApplicationID label can be different on driver/executors. Prefer executor ApplicationID if set. - // Refer https://issues.apache.org/jira/projects/SPARK/issues/SPARK-25922 for details. - if executorApplicationID != "" { - app.Status.SparkApplicationID = executorApplicationID - } - - if app.Status.ExecutorState == nil { - app.Status.ExecutorState = make(map[string]v1beta2.ExecutorState) - } - for name, execStatus := range executorStateMap { - app.Status.ExecutorState[name] = execStatus - } - - // Handle missing/deleted executors. - for name, oldStatus := range app.Status.ExecutorState { - _, exists := executorStateMap[name] - if !isExecutorTerminated(oldStatus) && !exists { - if !isDriverRunning(app) { - // If ApplicationState is COMPLETED, in other words, the driver pod has been completed - // successfully. The executor pods terminate and are cleaned up, so we could not found - // the executor pod, under this circumstances, we assume the executor pod are completed. - if app.Status.AppState.State == v1beta2.CompletedState { - app.Status.ExecutorState[name] = v1beta2.ExecutorCompletedState - } else { - glog.Infof("Executor pod %s not found, assuming it was deleted.", name) - app.Status.ExecutorState[name] = v1beta2.ExecutorFailedState - } - } else { - app.Status.ExecutorState[name] = v1beta2.ExecutorUnknownState - } - } - } - - return nil -} - -func (c *Controller) getAndUpdateAppState(app *v1beta2.SparkApplication) error { - if err := c.getAndUpdateDriverState(app); err != nil { - return err - } - if err := c.getAndUpdateExecutorState(app); err != nil { - return err - } - return nil -} - -func (c *Controller) handleSparkApplicationDeletion(app *v1beta2.SparkApplication) { - if c.metrics != nil { - c.metrics.exportMetricsOnDelete(app) - } - // SparkApplication deletion requested, lets delete driver pod. - if err := c.deleteSparkResources(app); err != nil { - glog.Errorf("failed to delete resources associated with deleted SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - } -} - -// ShouldRetry determines if SparkApplication in a given state should be retried. -func shouldRetry(app *v1beta2.SparkApplication) bool { - switch app.Status.AppState.State { - case v1beta2.SucceedingState: - return app.Spec.RestartPolicy.Type == v1beta2.Always - case v1beta2.FailingState: - if app.Spec.RestartPolicy.Type == v1beta2.Always { - return true - } else if app.Spec.RestartPolicy.Type == v1beta2.OnFailure { - // We retry if we haven't hit the retry limit. - if app.Spec.RestartPolicy.OnFailureRetries != nil && app.Status.ExecutionAttempts <= *app.Spec.RestartPolicy.OnFailureRetries { - return true - } - } - case v1beta2.FailedSubmissionState: - if app.Spec.RestartPolicy.Type == v1beta2.Always { - return true - } else if app.Spec.RestartPolicy.Type == v1beta2.OnFailure { - // We retry if we haven't hit the retry limit. - if app.Spec.RestartPolicy.OnSubmissionFailureRetries != nil && app.Status.SubmissionAttempts <= *app.Spec.RestartPolicy.OnSubmissionFailureRetries { - return true - } - } - } - return false -} - -// State Machine for SparkApplication: -// +--------------------------------------------------------------------------------------------------------------------+ -// | +---------------------------------------------------------------------------------------------+ | -// | | +----------+ | | -// | | | | | | -// | | | | | | -// | | |Submission| | | -// | | +----> Failed +----+------------------------------------------------------------------+ | | -// | | | | | | | | | -// | | | | | | | | | -// | | | +----^-----+ | +-----------------------------------------+ | | | -// | | | | | | | | | | -// | | | | | | | | | | -// | +-+--+----+ | +-----v--+-+ +----------+ +-----v-----+ +----v--v--+ | -// | | | | | | | | | | | | | -// | | | | | | | | | | | | | -// | | New +---------> Submitted+----------> Running +-----------> Failing +----------> Failed | | -// | | | | | | | | | | | | | -// | | | | | | | | | | | | | -// | | | | | | | | | | | | | -// | +---------+ | +----^-----+ +-----+----+ +-----+-----+ +----------+ | -// | | | | | | -// | | | | | | -// | +------------+ | | +-------------------------------+ | -// | | | | +-----+-----+ | | +-----------+ +----------+ | -// | | | | | Pending | | | | | | | | -// | | | +---+ Rerun <-------+ +---------------->Succeeding +---------->Completed | | -// | |Invalidating| | <-------+ | | | | | -// | | +-------> | | | | | | | -// | | | | | | | | | | | -// | | | +-----------+ | +-----+-----+ +----------+ | -// | +------------+ | | | -// | | | | -// | +-------------------------------+ | -// | | -// +--------------------------------------------------------------------------------------------------------------------+ -func (c *Controller) syncSparkApplication(key string) error { - namespace, name, err := cache.SplitMetaNamespaceKey(key) - if err != nil { - return fmt.Errorf("failed to get the namespace and name from key %s: %v", key, err) - } - app, err := c.getSparkApplication(namespace, name) - if err != nil { - return err - } - if app == nil { - // SparkApplication not found. - return nil - } - if !app.DeletionTimestamp.IsZero() { - c.handleSparkApplicationDeletion(app) - return nil - } - - appCopy := app.DeepCopy() - // Apply the default values to the copy. Note that the default values applied - // won't be sent to the API server as we only update the /status subresource. - v1beta2.SetSparkApplicationDefaults(appCopy) - - // Take action based on application state. - switch appCopy.Status.AppState.State { - case v1beta2.NewState: - c.recordSparkApplicationEvent(appCopy) - if err := c.validateSparkApplication(appCopy); err != nil { - appCopy.Status.AppState.State = v1beta2.FailedState - appCopy.Status.AppState.ErrorMessage = err.Error() - } else { - appCopy = c.submitSparkApplication(appCopy) - } - case v1beta2.SucceedingState: - if !shouldRetry(appCopy) { - appCopy.Status.AppState.State = v1beta2.CompletedState - c.recordSparkApplicationEvent(appCopy) - } else { - if err := c.deleteSparkResources(appCopy); err != nil { - glog.Errorf("failed to delete resources associated with SparkApplication %s/%s: %v", - appCopy.Namespace, appCopy.Name, err) - return err - } - appCopy.Status.AppState.State = v1beta2.PendingRerunState - } - case v1beta2.FailingState: - if !shouldRetry(appCopy) { - appCopy.Status.AppState.State = v1beta2.FailedState - c.recordSparkApplicationEvent(appCopy) - } else if isNextRetryDue(appCopy.Spec.RestartPolicy.OnFailureRetryInterval, appCopy.Status.ExecutionAttempts, appCopy.Status.TerminationTime) { - if err := c.deleteSparkResources(appCopy); err != nil { - glog.Errorf("failed to delete resources associated with SparkApplication %s/%s: %v", - appCopy.Namespace, appCopy.Name, err) - return err - } - appCopy.Status.AppState.State = v1beta2.PendingRerunState - } - case v1beta2.FailedSubmissionState: - if !shouldRetry(appCopy) { - // App will never be retried. Move to terminal FailedState. - appCopy.Status.AppState.State = v1beta2.FailedState - c.recordSparkApplicationEvent(appCopy) - } else if isNextRetryDue(appCopy.Spec.RestartPolicy.OnSubmissionFailureRetryInterval, appCopy.Status.SubmissionAttempts, appCopy.Status.LastSubmissionAttemptTime) { - if c.validateSparkResourceDeletion(appCopy) { - c.submitSparkApplication(appCopy) - } else { - if err := c.deleteSparkResources(appCopy); err != nil { - glog.Errorf("failed to delete resources associated with SparkApplication %s/%s: %v", - appCopy.Namespace, appCopy.Name, err) - return err - } - } - } - case v1beta2.InvalidatingState: - // Invalidate the current run and enqueue the SparkApplication for re-execution. - if err := c.deleteSparkResources(appCopy); err != nil { - glog.Errorf("failed to delete resources associated with SparkApplication %s/%s: %v", - appCopy.Namespace, appCopy.Name, err) - return err - } - c.clearStatus(&appCopy.Status) - appCopy.Status.AppState.State = v1beta2.PendingRerunState - case v1beta2.PendingRerunState: - glog.V(2).Infof("SparkApplication %s/%s is pending rerun", appCopy.Namespace, appCopy.Name) - if c.validateSparkResourceDeletion(appCopy) { - glog.V(2).Infof("Resources for SparkApplication %s/%s successfully deleted", appCopy.Namespace, appCopy.Name) - c.recordSparkApplicationEvent(appCopy) - c.clearStatus(&appCopy.Status) - appCopy = c.submitSparkApplication(appCopy) - } - case v1beta2.SubmittedState, v1beta2.RunningState, v1beta2.UnknownState: - if err := c.getAndUpdateAppState(appCopy); err != nil { - return err - } - case v1beta2.CompletedState, v1beta2.FailedState: - if c.hasApplicationExpired(app) { - glog.Infof("Garbage collecting expired SparkApplication %s/%s", app.Namespace, app.Name) - err := c.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Delete(context.TODO(), app.Name, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - if err != nil && !errors.IsNotFound(err) { - return err - } - return nil - } - if err := c.getAndUpdateExecutorState(appCopy); err != nil { - return err - } - } - - if appCopy != nil { - err = c.updateStatusAndExportMetrics(app, appCopy) - if err != nil { - glog.Errorf("failed to update SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - return err - } - - if state := appCopy.Status.AppState.State; state == v1beta2.CompletedState || - state == v1beta2.FailedState { - if err := c.cleanUpOnTermination(app, appCopy); err != nil { - glog.Errorf("failed to clean up resources for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - return err - } - } - } - - return nil -} - -// Helper func to determine if the next retry the SparkApplication is due now. -func isNextRetryDue(retryInterval *int64, attemptsDone int32, lastEventTime metav1.Time) bool { - if retryInterval == nil || lastEventTime.IsZero() || attemptsDone <= 0 { - return false - } - - // Retry if we have waited at-least equal to attempts*RetryInterval since we do a linear back-off. - interval := time.Duration(*retryInterval) * time.Second * time.Duration(attemptsDone) - currentTime := time.Now() - glog.V(3).Infof("currentTime is %v, interval is %v", currentTime, interval) - if currentTime.After(lastEventTime.Add(interval)) { - return true - } - return false -} - -// submitSparkApplication creates a new submission for the given SparkApplication and submits it using spark-submit. -func (c *Controller) submitSparkApplication(app *v1beta2.SparkApplication) *v1beta2.SparkApplication { - if app.PrometheusMonitoringEnabled() { - if err := configPrometheusMonitoring(app, c.kubeClient); err != nil { - glog.Error(err) - } - } - - // Use batch scheduler to perform scheduling task before submitting (before build command arguments). - if needScheduling, scheduler := c.shouldDoBatchScheduling(app); needScheduling { - err := scheduler.DoBatchSchedulingOnSubmission(app) - if err != nil { - glog.Errorf("failed to process batch scheduler BeforeSubmitSparkApplication with error %v", err) - return app - } - } - - driverInfo := v1beta2.DriverInfo{} - - if c.enableUIService { - service, err := createSparkUIService(app, c.kubeClient) - if err != nil { - glog.Errorf("failed to create UI service for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - } else { - driverInfo.WebUIServiceName = service.serviceName - driverInfo.WebUIPort = service.servicePort - driverInfo.WebUIAddress = fmt.Sprintf("%s:%d", service.serviceIP, app.Status.DriverInfo.WebUIPort) - // Create UI Ingress if ingress-format is set. - if c.ingressURLFormat != "" { - // We are going to want to use an ingress url. - ingressURL, err := getDriverIngressURL(c.ingressURLFormat, app.GetName(), app.GetNamespace()) - if err != nil { - glog.Errorf("failed to get the spark ingress url %s/%s: %v", app.Namespace, app.Name, err) - } else { - // need to ensure the spark.ui variables are configured correctly if a subPath is used. - if ingressURL.Path != "" { - if app.Spec.SparkConf == nil { - app.Spec.SparkConf = make(map[string]string) - } - app.Spec.SparkConf["spark.ui.proxyBase"] = ingressURL.Path - app.Spec.SparkConf["spark.ui.proxyRedirectUri"] = "/" - } - ingress, err := createSparkUIIngress(app, *service, ingressURL, c.ingressClassName, c.kubeClient) - if err != nil { - glog.Errorf("failed to create UI Ingress for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - } else { - driverInfo.WebUIIngressAddress = ingress.ingressURL.String() - driverInfo.WebUIIngressName = ingress.ingressName - } - } - } - } - } - - for _, driverIngressConfiguration := range app.Spec.DriverIngressOptions { - service, err := createDriverIngressServiceFromConfiguration(app, &driverIngressConfiguration, c.kubeClient) - if err != nil { - glog.Errorf("failed to create driver ingress service for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - continue - } - glog.Infof("Created driver ingress service %s (port: %d) for SparkApplication %s/%s", service.serviceName, service.servicePort, app.Namespace, app.Name) - // Create ingress if ingress-format is set. - if driverIngressConfiguration.IngressURLFormat != "" { - // We are going to want to use an ingress url. - ingressURL, err := getDriverIngressURL(driverIngressConfiguration.IngressURLFormat, app.GetName(), app.GetNamespace()) - if err != nil { - glog.Errorf("failed to get the driver ingress url %s/%s: %v", app.Namespace, app.Name, err) - } else { - ingress, err := createDriverIngress(app, &driverIngressConfiguration, *service, ingressURL, c.ingressClassName, c.kubeClient) - if err != nil { - glog.Errorf("failed to create driver ingress for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - } - glog.Infof("Created driver ingress %s (url: %s) for SparkApplication %s/%s", ingress.ingressName, ingress.ingressURL, app.Namespace, app.Name) - } - } - } - - driverPodName := getDriverPodName(app) - driverInfo.PodName = driverPodName - submissionID := uuid.New().String() - submissionCmdArgs, err := buildSubmissionCommandArgs(app, driverPodName, submissionID) - if err != nil { - app.Status = v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - ErrorMessage: err.Error(), - }, - SubmissionAttempts: app.Status.SubmissionAttempts + 1, - LastSubmissionAttemptTime: metav1.Now(), - } - return app - } - // Try submitting the application by running spark-submit. - submitted, err := runSparkSubmit(newSubmission(submissionCmdArgs, app)) - if err != nil { - app.Status = v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - ErrorMessage: err.Error(), - }, - SubmissionAttempts: app.Status.SubmissionAttempts + 1, - LastSubmissionAttemptTime: metav1.Now(), - } - c.recordSparkApplicationEvent(app) - glog.Errorf("failed to run spark-submit for SparkApplication %s/%s: %v", app.Namespace, app.Name, err) - return app - } - if !submitted { - // The application may not have been submitted even if err == nil, e.g., when some - // state update caused an attempt to re-submit the application, in which case no - // error gets returned from runSparkSubmit. If this is the case, we simply return. - return app - } - - glog.Infof("SparkApplication %s/%s has been submitted", app.Namespace, app.Name) - app.Status = v1beta2.SparkApplicationStatus{ - SubmissionID: submissionID, - AppState: v1beta2.ApplicationState{ - State: v1beta2.SubmittedState, - }, - DriverInfo: driverInfo, - SubmissionAttempts: app.Status.SubmissionAttempts + 1, - ExecutionAttempts: app.Status.ExecutionAttempts + 1, - LastSubmissionAttemptTime: metav1.Now(), - } - c.recordSparkApplicationEvent(app) - - return app -} - -func (c *Controller) shouldDoBatchScheduling(app *v1beta2.SparkApplication) (bool, schedulerinterface.BatchScheduler) { - if c.batchSchedulerMgr == nil || app.Spec.BatchScheduler == nil || *app.Spec.BatchScheduler == "" { - return false, nil - } - - scheduler, err := c.batchSchedulerMgr.GetScheduler(*app.Spec.BatchScheduler) - if err != nil { - glog.Errorf("failed to get batch scheduler for name %s, %v", *app.Spec.BatchScheduler, err) - return false, nil - } - return scheduler.ShouldSchedule(app), scheduler -} - -func (c *Controller) updateApplicationStatusWithRetries( - original *v1beta2.SparkApplication, - updateFunc func(status *v1beta2.SparkApplicationStatus)) (*v1beta2.SparkApplication, error) { - toUpdate := original.DeepCopy() - updateErr := wait.ExponentialBackoff(retry.DefaultBackoff, func() (ok bool, err error) { - updateFunc(&toUpdate.Status) - if equality.Semantic.DeepEqual(original.Status, toUpdate.Status) { - return true, nil - } - - toUpdate, err = c.crdClient.SparkoperatorV1beta2().SparkApplications(original.Namespace).UpdateStatus(context.TODO(), toUpdate, metav1.UpdateOptions{}) - if err == nil { - return true, nil - } - if !errors.IsConflict(err) { - return false, err - } - - // There was a conflict updating the SparkApplication, fetch the latest version from the API server. - toUpdate, err = c.crdClient.SparkoperatorV1beta2().SparkApplications(original.Namespace).Get(context.TODO(), original.Name, metav1.GetOptions{}) - if err != nil { - glog.Errorf("failed to get SparkApplication %s/%s: %v", original.Namespace, original.Name, err) - return false, err - } - - // Retry with the latest version. - return false, nil - }) - - if updateErr != nil { - glog.Errorf("failed to update SparkApplication %s/%s: %v", original.Namespace, original.Name, updateErr) - return nil, updateErr - } - - return toUpdate, nil -} - -// updateStatusAndExportMetrics updates the status of the SparkApplication and export the metrics. -func (c *Controller) updateStatusAndExportMetrics(oldApp, newApp *v1beta2.SparkApplication) error { - // Skip update if nothing changed. - if equality.Semantic.DeepEqual(oldApp.Status, newApp.Status) { - return nil - } - - oldStatusJSON, err := printStatus(&oldApp.Status) - if err != nil { - return err - } - newStatusJSON, err := printStatus(&newApp.Status) - if err != nil { - return err - } - - glog.V(2).Infof("Update the status of SparkApplication %s/%s from:\n%s\nto:\n%s", newApp.Namespace, newApp.Name, oldStatusJSON, newStatusJSON) - updatedApp, err := c.updateApplicationStatusWithRetries(oldApp, func(status *v1beta2.SparkApplicationStatus) { - *status = newApp.Status - }) - if err != nil { - return err - } - - // Export metrics if the update was successful. - if c.metrics != nil { - c.metrics.exportMetrics(oldApp, updatedApp) - } - - return nil -} - -func (c *Controller) getSparkApplication(namespace string, name string) (*v1beta2.SparkApplication, error) { - app, err := c.applicationLister.SparkApplications(namespace).Get(name) - if err != nil { - if errors.IsNotFound(err) { - return nil, nil - } - return nil, err - } - return app, nil -} - -// Delete the driver pod and optional UI resources (Service/Ingress) created for the application. -func (c *Controller) deleteSparkResources(app *v1beta2.SparkApplication) error { - driverPodName := app.Status.DriverInfo.PodName - // Derive the driver pod name in case the driver pod name was not recorded in the status, - // which could happen if the status update right after submission failed. - if driverPodName == "" { - driverPodName = getDriverPodName(app) - } - - glog.V(2).Infof("Deleting pod %s in namespace %s", driverPodName, app.Namespace) - err := c.kubeClient.CoreV1().Pods(app.Namespace).Delete(context.TODO(), driverPodName, metav1.DeleteOptions{}) - if err != nil && !errors.IsNotFound(err) { - return err - } - - sparkUIServiceName := app.Status.DriverInfo.WebUIServiceName - if sparkUIServiceName != "" { - glog.V(2).Infof("Deleting Spark UI Service %s in namespace %s", sparkUIServiceName, app.Namespace) - err := c.kubeClient.CoreV1().Services(app.Namespace).Delete(context.TODO(), sparkUIServiceName, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - if err != nil && !errors.IsNotFound(err) { - return err - } - } - - sparkUIIngressName := app.Status.DriverInfo.WebUIIngressName - if sparkUIIngressName != "" { - if util.IngressCapabilities.Has("networking.k8s.io/v1") { - glog.V(2).Infof("Deleting Spark UI Ingress %s in namespace %s", sparkUIIngressName, app.Namespace) - err := c.kubeClient.NetworkingV1().Ingresses(app.Namespace).Delete(context.TODO(), sparkUIIngressName, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - if err != nil && !errors.IsNotFound(err) { - return err - } - } - if util.IngressCapabilities.Has("extensions/v1beta1") { - glog.V(2).Infof("Deleting extensions/v1beta1 Spark UI Ingress %s in namespace %s", sparkUIIngressName, app.Namespace) - err := c.kubeClient.ExtensionsV1beta1().Ingresses(app.Namespace).Delete(context.TODO(), sparkUIIngressName, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - if err != nil && !errors.IsNotFound(err) { - return err - } - } - } - - return nil -} - -func (c *Controller) validateSparkApplication(app *v1beta2.SparkApplication) error { - appSpec := app.Spec - driverSpec := appSpec.Driver - executorSpec := appSpec.Executor - if appSpec.NodeSelector != nil && (driverSpec.NodeSelector != nil || executorSpec.NodeSelector != nil) { - return fmt.Errorf("NodeSelector property can be defined at SparkApplication or at any of Driver,Executor") - } - - var servicePorts map[int32]bool - var ingressURLFormats map[string]bool - for _, item := range appSpec.DriverIngressOptions { - if item.ServicePort == nil { - return fmt.Errorf("DriverIngressOptions has nill ServicePort") - } - if servicePorts[*item.ServicePort] { - return fmt.Errorf("DriverIngressOptions has duplicate ServicePort: %d", *item.ServicePort) - } - servicePorts[*item.ServicePort] = true - - if item.IngressURLFormat == "" { - return fmt.Errorf("DriverIngressOptions has empty IngressURLFormat") - } - if ingressURLFormats[item.IngressURLFormat] { - return fmt.Errorf("DriverIngressOptions has duplicate IngressURLFormat: %s", item.IngressURLFormat) - } - ingressURLFormats[item.IngressURLFormat] = true - } - - return nil -} - -// Validate that any Spark resources (driver/Service/Ingress) created for the application have been deleted. -func (c *Controller) validateSparkResourceDeletion(app *v1beta2.SparkApplication) bool { - driverPodName := app.Status.DriverInfo.PodName - // Derive the driver pod name in case the driver pod name was not recorded in the status, - // which could happen if the status update right after submission failed. - if driverPodName == "" { - driverPodName = getDriverPodName(app) - } - _, err := c.kubeClient.CoreV1().Pods(app.Namespace).Get(context.TODO(), driverPodName, metav1.GetOptions{}) - if err == nil || !errors.IsNotFound(err) { - return false - } - - sparkUIServiceName := app.Status.DriverInfo.WebUIServiceName - if sparkUIServiceName != "" { - _, err := c.kubeClient.CoreV1().Services(app.Namespace).Get(context.TODO(), sparkUIServiceName, metav1.GetOptions{}) - if err == nil || !errors.IsNotFound(err) { - return false - } - } - - sparkUIIngressName := app.Status.DriverInfo.WebUIIngressName - if sparkUIIngressName != "" { - _, err := c.kubeClient.NetworkingV1().Ingresses(app.Namespace).Get(context.TODO(), sparkUIIngressName, metav1.GetOptions{}) - if err == nil || !errors.IsNotFound(err) { - return false - } - } - - return true -} - -func (c *Controller) enqueue(obj interface{}) { - key, err := keyFunc(obj) - if err != nil { - glog.Errorf("failed to get key for %v: %v", obj, err) - return - } - - c.queue.AddRateLimited(key) -} - -func (c *Controller) recordSparkApplicationEvent(app *v1beta2.SparkApplication) { - switch app.Status.AppState.State { - case v1beta2.NewState: - c.recorder.Eventf( - app, - apiv1.EventTypeNormal, - "SparkApplicationAdded", - "SparkApplication %s was added, enqueuing it for submission", - app.Name) - case v1beta2.SubmittedState: - c.recorder.Eventf( - app, - apiv1.EventTypeNormal, - "SparkApplicationSubmitted", - "SparkApplication %s was submitted successfully", - app.Name) - case v1beta2.FailedSubmissionState: - c.recorder.Eventf( - app, - apiv1.EventTypeWarning, - "SparkApplicationSubmissionFailed", - "failed to submit SparkApplication %s: %s", - app.Name, - app.Status.AppState.ErrorMessage) - case v1beta2.CompletedState: - c.recorder.Eventf( - app, - apiv1.EventTypeNormal, - "SparkApplicationCompleted", - "SparkApplication %s completed", - app.Name) - case v1beta2.FailedState: - c.recorder.Eventf( - app, - apiv1.EventTypeWarning, - "SparkApplicationFailed", - "SparkApplication %s failed: %s", - app.Name, - app.Status.AppState.ErrorMessage) - case v1beta2.PendingRerunState: - c.recorder.Eventf( - app, - apiv1.EventTypeWarning, - "SparkApplicationPendingRerun", - "SparkApplication %s is pending rerun", - app.Name) - } -} - -func (c *Controller) recordDriverEvent(app *v1beta2.SparkApplication, phase v1beta2.DriverState, name string) { - switch phase { - case v1beta2.DriverCompletedState: - c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkDriverCompleted", "Driver %s completed", name) - case v1beta2.DriverPendingState: - c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkDriverPending", "Driver %s is pending", name) - case v1beta2.DriverRunningState: - c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkDriverRunning", "Driver %s is running", name) - case v1beta2.DriverFailedState: - c.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkDriverFailed", "Driver %s failed", name) - case v1beta2.DriverUnknownState: - c.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkDriverUnknownState", "Driver %s in unknown state", name) - } -} - -func (c *Controller) recordExecutorEvent(app *v1beta2.SparkApplication, state v1beta2.ExecutorState, args ...interface{}) { - switch state { - case v1beta2.ExecutorCompletedState: - c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkExecutorCompleted", "Executor %s completed", args) - case v1beta2.ExecutorPendingState: - c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkExecutorPending", "Executor %s is pending", args) - case v1beta2.ExecutorRunningState: - c.recorder.Eventf(app, apiv1.EventTypeNormal, "SparkExecutorRunning", "Executor %s is running", args) - case v1beta2.ExecutorFailedState: - c.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkExecutorFailed", "Executor %s failed with ExitCode: %d, Reason: %s", args) - case v1beta2.ExecutorUnknownState: - c.recorder.Eventf(app, apiv1.EventTypeWarning, "SparkExecutorUnknownState", "Executor %s in unknown state", args) - } -} - -func (c *Controller) clearStatus(status *v1beta2.SparkApplicationStatus) { - if status.AppState.State == v1beta2.InvalidatingState { - status.SparkApplicationID = "" - status.SubmissionAttempts = 0 - status.ExecutionAttempts = 0 - status.LastSubmissionAttemptTime = metav1.Time{} - status.TerminationTime = metav1.Time{} - status.AppState.ErrorMessage = "" - status.ExecutorState = nil - } else if status.AppState.State == v1beta2.PendingRerunState { - status.SparkApplicationID = "" - status.SubmissionAttempts = 0 - status.LastSubmissionAttemptTime = metav1.Time{} - status.DriverInfo = v1beta2.DriverInfo{} - status.AppState.ErrorMessage = "" - status.ExecutorState = nil - } -} - -func (c *Controller) hasApplicationExpired(app *v1beta2.SparkApplication) bool { - // The application has no TTL defined and will never expire. - if app.Spec.TimeToLiveSeconds == nil { - return false - } - - ttl := time.Duration(*app.Spec.TimeToLiveSeconds) * time.Second - now := time.Now() - if !app.Status.TerminationTime.IsZero() && now.Sub(app.Status.TerminationTime.Time) > ttl { - return true - } - - return false -} - -// Clean up when the spark application is terminated. -func (c *Controller) cleanUpOnTermination(oldApp, newApp *v1beta2.SparkApplication) error { - if needScheduling, scheduler := c.shouldDoBatchScheduling(newApp); needScheduling { - if err := scheduler.CleanupOnCompletion(newApp); err != nil { - return err - } - } - return nil -} - -func int64ptr(n int64) *int64 { - return &n -} diff --git a/pkg/controller/sparkapplication/controller_test.go b/pkg/controller/sparkapplication/controller_test.go deleted file mode 100644 index 44f9003dbf..0000000000 --- a/pkg/controller/sparkapplication/controller_test.go +++ /dev/null @@ -1,1674 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "context" - "fmt" - "os" - "os/exec" - "strings" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - prometheus_model "github.com/prometheus/client_model/go" - "github.com/stretchr/testify/assert" - apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/informers" - kubeclientfake "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientfake "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/fake" - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/util" -) - -func newFakeController(app *v1beta2.SparkApplication, pods ...*apiv1.Pod) (*Controller, *record.FakeRecorder) { - crdclientfake.AddToScheme(scheme.Scheme) - crdClient := crdclientfake.NewSimpleClientset() - kubeClient := kubeclientfake.NewSimpleClientset() - util.IngressCapabilities = map[string]bool{"networking.k8s.io/v1": true} - informerFactory := crdinformers.NewSharedInformerFactory(crdClient, 0*time.Second) - recorder := record.NewFakeRecorder(3) - - kubeClient.CoreV1().Nodes().Create(context.TODO(), &apiv1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Status: apiv1.NodeStatus{ - Addresses: []apiv1.NodeAddress{ - { - Type: apiv1.NodeExternalIP, - Address: "12.34.56.78", - }, - }, - }, - }, metav1.CreateOptions{}) - - podInformerFactory := informers.NewSharedInformerFactory(kubeClient, 0*time.Second) - controller := newSparkApplicationController(crdClient, kubeClient, informerFactory, podInformerFactory, recorder, - &util.MetricConfig{}, "", "", nil, true) - - informer := informerFactory.Sparkoperator().V1beta2().SparkApplications().Informer() - if app != nil { - informer.GetIndexer().Add(app) - } - - podInformer := podInformerFactory.Core().V1().Pods().Informer() - for _, pod := range pods { - if pod != nil { - podInformer.GetIndexer().Add(pod) - } - } - return controller, recorder -} - -func TestOnAdd(t *testing.T) { - ctrl, _ := newFakeController(nil) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{}, - } - ctrl.onAdd(app) - - item, _ := ctrl.queue.Get() - defer ctrl.queue.Done(item) - key, ok := item.(string) - assert.True(t, ok) - expectedKey, _ := cache.MetaNamespaceKeyFunc(app) - assert.Equal(t, expectedKey, key) - ctrl.queue.Forget(item) -} - -func TestOnUpdate(t *testing.T) { - ctrl, recorder := newFakeController(nil) - - appTemplate := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - ResourceVersion: "1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Mode: v1beta2.ClusterMode, - Image: stringptr("foo-image:v1"), - Executor: v1beta2.ExecutorSpec{ - Instances: int32ptr(1), - }, - }, - } - - // Case1: Same Spec. - copyWithSameSpec := appTemplate.DeepCopy() - copyWithSameSpec.Status.ExecutionAttempts = 3 - copyWithSameSpec.ResourceVersion = "2" - - ctrl.onUpdate(appTemplate, copyWithSameSpec) - - // Verify that the SparkApplication was enqueued but no spec update events fired. - item, _ := ctrl.queue.Get() - key, ok := item.(string) - assert.True(t, ok) - expectedKey, _ := cache.MetaNamespaceKeyFunc(appTemplate) - assert.Equal(t, expectedKey, key) - ctrl.queue.Forget(item) - ctrl.queue.Done(item) - assert.Equal(t, 0, len(recorder.Events)) - - // Case2: Spec update failed. - copyWithSpecUpdate := appTemplate.DeepCopy() - copyWithSpecUpdate.Spec.Image = stringptr("foo-image:v2") - copyWithSpecUpdate.ResourceVersion = "2" - - ctrl.onUpdate(appTemplate, copyWithSpecUpdate) - - // Verify that update failed due to non-existence of SparkApplication. - assert.Equal(t, 1, len(recorder.Events)) - event := <-recorder.Events - assert.True(t, strings.Contains(event, "SparkApplicationSpecUpdateFailed")) - - // Case3: Spec update successful. - ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(appTemplate.Namespace).Create(context.TODO(), appTemplate, metav1.CreateOptions{}) - ctrl.onUpdate(appTemplate, copyWithSpecUpdate) - - // Verify App was enqueued. - item, _ = ctrl.queue.Get() - key, ok = item.(string) - assert.True(t, ok) - expectedKey, _ = cache.MetaNamespaceKeyFunc(appTemplate) - assert.Equal(t, expectedKey, key) - ctrl.queue.Forget(item) - ctrl.queue.Done(item) - // Verify that update was succeeded. - assert.Equal(t, 1, len(recorder.Events)) - event = <-recorder.Events - assert.True(t, strings.Contains(event, "SparkApplicationSpecUpdateProcessed")) - - // Verify the SparkApplication state was updated to InvalidatingState. - app, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(appTemplate.Namespace).Get(context.TODO(), appTemplate.Name, metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, v1beta2.InvalidatingState, app.Status.AppState.State) -} - -func TestOnDelete(t *testing.T) { - ctrl, recorder := newFakeController(nil) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{}, - } - ctrl.onAdd(app) - ctrl.queue.Get() - - ctrl.onDelete(app) - ctrl.queue.ShutDown() - item, _ := ctrl.queue.Get() - defer ctrl.queue.Done(item) - assert.True(t, item == nil) - event := <-recorder.Events - assert.True(t, strings.Contains(event, "SparkApplicationDeleted")) - ctrl.queue.Forget(item) -} - -func TestHelperProcessFailure(t *testing.T) { - if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { - return - } - os.Exit(2) -} - -func TestHelperProcessSuccess(t *testing.T) { - if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { - return - } - os.Exit(0) -} - -func fetchCounterValue(m *prometheus.CounterVec, labels map[string]string) float64 { - pb := &prometheus_model.Metric{} - m.With(labels).Write(pb) - - return pb.GetCounter().GetValue() -} - -type metrics struct { - submitMetricCount float64 - runningMetricCount float64 - successMetricCount float64 - failedMetricCount float64 -} - -type executorMetrics struct { - runningMetricCount float64 - successMetricCount float64 - failedMetricCount float64 -} - -func TestSyncSparkApplication_SubmissionFailed(t *testing.T) { - os.Setenv(sparkHomeEnvVar, "/spark") - os.Setenv(kubernetesServiceHostEnvVar, "localhost") - os.Setenv(kubernetesServicePortEnvVar, "443") - - restartPolicyOnFailure := v1beta2.RestartPolicy{ - Type: v1beta2.OnFailure, - OnFailureRetries: int32ptr(1), - OnFailureRetryInterval: int64ptr(100), - OnSubmissionFailureRetryInterval: int64ptr(100), - OnSubmissionFailureRetries: int32ptr(1), - } - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.NewState, - ErrorMessage: "", - }, - }, - } - - ctrl, recorder := newFakeController(app) - _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - - execCommand = func(command string, args ...string) *exec.Cmd { - cs := []string{"-test.run=TestHelperProcessFailure", "--", command} - cs = append(cs, args...) - cmd := exec.Command(os.Args[0], cs...) - cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} - return cmd - } - - // Attempt 1 - err = ctrl.syncSparkApplication("default/foo") - updatedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - - assert.Equal(t, v1beta2.FailedSubmissionState, updatedApp.Status.AppState.State) - assert.Equal(t, int32(1), updatedApp.Status.SubmissionAttempts) - assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppCount, map[string]string{})) - assert.Equal(t, float64(0), fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{})) - assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppFailedSubmissionCount, map[string]string{})) - - event := <-recorder.Events - assert.True(t, strings.Contains(event, "SparkApplicationAdded")) - event = <-recorder.Events - assert.True(t, strings.Contains(event, "SparkApplicationSubmissionFailed")) - - // Attempt 2: Retry again. - updatedApp.Status.LastSubmissionAttemptTime = metav1.Time{Time: metav1.Now().Add(-100 * time.Second)} - ctrl, recorder = newFakeController(updatedApp) - _, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), updatedApp, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - err = ctrl.syncSparkApplication("default/foo") - - // Verify that the application failed again. - updatedApp, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, v1beta2.FailedSubmissionState, updatedApp.Status.AppState.State) - assert.Equal(t, int32(2), updatedApp.Status.SubmissionAttempts) - assert.Equal(t, float64(0), fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{})) - - event = <-recorder.Events - assert.True(t, strings.Contains(event, "SparkApplicationSubmissionFailed")) - - // Attempt 3: No more retries. - updatedApp.Status.LastSubmissionAttemptTime = metav1.Time{Time: metav1.Now().Add(-100 * time.Second)} - ctrl, recorder = newFakeController(updatedApp) - _, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), updatedApp, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - err = ctrl.syncSparkApplication("default/foo") - - // Verify that the application failed again. - updatedApp, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, v1beta2.FailedState, updatedApp.Status.AppState.State) - // No more submission attempts made. - assert.Equal(t, int32(2), updatedApp.Status.SubmissionAttempts) -} - -func TestValidateDetectsNodeSelectorSuccessNoSelector(t *testing.T) { - ctrl, _ := newFakeController(nil) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - } - - err := ctrl.validateSparkApplication(app) - assert.Nil(t, err) -} - -func TestValidateDetectsNodeSelectorSuccessNodeSelectorAtAppLevel(t *testing.T) { - ctrl, _ := newFakeController(nil) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - NodeSelector: map[string]string{"mynode": "mygift"}, - }, - } - - err := ctrl.validateSparkApplication(app) - assert.Nil(t, err) -} - -func TestValidateDetectsNodeSelectorSuccessNodeSelectorAtPodLevel(t *testing.T) { - ctrl, _ := newFakeController(nil) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - NodeSelector: map[string]string{"mynode": "mygift"}, - }, - }, - }, - } - - err := ctrl.validateSparkApplication(app) - assert.Nil(t, err) - - app.Spec.Executor = v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - NodeSelector: map[string]string{"mynode": "mygift"}, - }, - } - - err = ctrl.validateSparkApplication(app) - assert.Nil(t, err) -} - -func TestValidateDetectsNodeSelectorFailsAppAndPodLevel(t *testing.T) { - ctrl, _ := newFakeController(nil) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - NodeSelector: map[string]string{"mynode": "mygift"}, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - NodeSelector: map[string]string{"mynode": "mygift"}, - }, - }, - }, - } - - err := ctrl.validateSparkApplication(app) - assert.NotNil(t, err) - - app.Spec.Executor = v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - NodeSelector: map[string]string{"mynode": "mygift"}, - }, - } - - err = ctrl.validateSparkApplication(app) - assert.NotNil(t, err) -} - -func TestShouldRetry(t *testing.T) { - type testcase struct { - app *v1beta2.SparkApplication - shouldRetry bool - } - - testFn := func(test testcase, t *testing.T) { - shouldRetry := shouldRetry(test.app) - assert.Equal(t, test.shouldRetry, shouldRetry) - } - - restartPolicyAlways := v1beta2.RestartPolicy{ - Type: v1beta2.Always, - OnSubmissionFailureRetryInterval: int64ptr(100), - OnFailureRetryInterval: int64ptr(100), - } - - restartPolicyNever := v1beta2.RestartPolicy{ - Type: v1beta2.Never, - } - - restartPolicyOnFailure := v1beta2.RestartPolicy{ - Type: v1beta2.OnFailure, - OnFailureRetries: int32ptr(1), - OnFailureRetryInterval: int64ptr(100), - OnSubmissionFailureRetryInterval: int64ptr(100), - OnSubmissionFailureRetries: int32ptr(2), - } - - testcases := []testcase{ - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }}, - shouldRetry: false, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.SucceedingState, - }, - }, - }, - shouldRetry: true, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.SucceedingState, - }, - }, - }, - shouldRetry: false, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - }, - }, - shouldRetry: true, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyNever, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - }, - }, - shouldRetry: false, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyNever, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - }, - }, - shouldRetry: false, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - }, - }, - shouldRetry: true, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.PendingRerunState, - }, - }, - }, - shouldRetry: false, - }, - } - - for _, test := range testcases { - testFn(test, t) - } -} - -func TestSyncSparkApplication_SubmissionSuccess(t *testing.T) { - type testcase struct { - app *v1beta2.SparkApplication - expectedState v1beta2.ApplicationStateType - } - os.Setenv(sparkHomeEnvVar, "/spark") - os.Setenv(kubernetesServiceHostEnvVar, "localhost") - os.Setenv(kubernetesServicePortEnvVar, "443") - - testFn := func(test testcase, t *testing.T) { - ctrl, _ := newFakeController(test.app) - _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(test.app.Namespace).Create(context.TODO(), test.app, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - - execCommand = func(command string, args ...string) *exec.Cmd { - cs := []string{"-test.run=TestHelperProcessSuccess", "--", command} - cs = append(cs, args...) - cmd := exec.Command(os.Args[0], cs...) - cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} - return cmd - } - - err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", test.app.Namespace, test.app.Name)) - assert.Nil(t, err) - updatedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(test.app.Namespace).Get(context.TODO(), test.app.Name, metav1.GetOptions{}) - assert.Nil(t, err) - assert.Equal(t, test.expectedState, updatedApp.Status.AppState.State) - if test.app.Status.AppState.State == v1beta2.NewState { - assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppCount, map[string]string{})) - } - if test.expectedState == v1beta2.SubmittedState { - assert.Equal(t, float64(1), fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{})) - } - } - - restartPolicyAlways := v1beta2.RestartPolicy{ - Type: v1beta2.Always, - OnSubmissionFailureRetryInterval: int64ptr(100), - OnFailureRetryInterval: int64ptr(100), - } - - restartPolicyNever := v1beta2.RestartPolicy{ - Type: v1beta2.Never, - } - - restartPolicyOnFailure := v1beta2.RestartPolicy{ - Type: v1beta2.OnFailure, - OnFailureRetries: int32ptr(1), - OnFailureRetryInterval: int64ptr(100), - OnSubmissionFailureRetryInterval: int64ptr(100), - OnSubmissionFailureRetries: int32ptr(2), - } - - testcases := []testcase{ - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }}, - expectedState: v1beta2.SubmittedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.SucceedingState, - }, - }, - }, - expectedState: v1beta2.PendingRerunState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.PendingRerunState, - }, - }, - }, - expectedState: v1beta2.SubmittedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - LastSubmissionAttemptTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - }, - expectedState: v1beta2.FailedSubmissionState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - SubmissionAttempts: 1, - LastSubmissionAttemptTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - }, - expectedState: v1beta2.SubmittedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - ExecutionAttempts: 1, - TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - }, - expectedState: v1beta2.PendingRerunState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyAlways, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - }, - expectedState: v1beta2.FailingState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyNever, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.InvalidatingState, - }, - TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - }, - expectedState: v1beta2.PendingRerunState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyNever, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.SucceedingState, - }, - }, - }, - expectedState: v1beta2.CompletedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyNever, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.NewState, - }, - }, - }, - expectedState: v1beta2.SubmittedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - ExecutionAttempts: 2, - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - }, - expectedState: v1beta2.FailedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - ExecutionAttempts: 1, - TerminationTime: metav1.Now(), - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - }, - expectedState: v1beta2.FailingState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailingState, - }, - ExecutionAttempts: 1, - TerminationTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - }, - expectedState: v1beta2.PendingRerunState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - SubmissionAttempts: 3, - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - }, - expectedState: v1beta2.FailedState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - SubmissionAttempts: 1, - LastSubmissionAttemptTime: metav1.Now(), - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - }, - expectedState: v1beta2.FailedSubmissionState, - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.FailedSubmissionState, - }, - SubmissionAttempts: 1, - LastSubmissionAttemptTime: metav1.Time{Time: metav1.Now().Add(-2000 * time.Second)}, - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: restartPolicyOnFailure, - }, - }, - expectedState: v1beta2.SubmittedState, - }, - } - - for _, test := range testcases { - testFn(test, t) - } -} - -func TestSyncSparkApplication_ExecutingState(t *testing.T) { - type testcase struct { - appName string - oldAppStatus v1beta2.ApplicationStateType - oldExecutorStatus map[string]v1beta2.ExecutorState - driverPod *apiv1.Pod - executorPod *apiv1.Pod - expectedAppState v1beta2.ApplicationStateType - expectedExecutorState map[string]v1beta2.ExecutorState - expectedAppMetrics metrics - expectedExecutorMetrics executorMetrics - } - - os.Setenv(kubernetesServiceHostEnvVar, "localhost") - os.Setenv(kubernetesServicePortEnvVar, "443") - - appName := "foo" - driverPodName := appName + "-driver" - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: appName, - Namespace: "test", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: v1beta2.RestartPolicy{ - Type: v1beta2.Never, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.SubmittedState, - ErrorMessage: "", - }, - DriverInfo: v1beta2.DriverInfo{ - PodName: driverPodName, - }, - ExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - }, - } - - testcases := []testcase{ - { - appName: appName, - oldAppStatus: v1beta2.SubmittedState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - expectedAppState: v1beta2.FailingState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState}, - expectedAppMetrics: metrics{ - failedMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - failedMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.SubmittedState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.RunningState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{ - runningMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - ContainerStatuses: []apiv1.ContainerStatus{ - { - Name: config.SparkDriverContainerName, - State: apiv1.ContainerState{ - Running: &apiv1.ContainerStateRunning{}, - }, - }, - { - Name: "sidecar", - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 0, - }, - }, - }, - }, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.RunningState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{}, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - ContainerStatuses: []apiv1.ContainerStatus{ - { - Name: config.SparkDriverContainerName, - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 0, - }, - }, - }, - { - Name: "sidecar", - State: apiv1.ContainerState{ - Running: &apiv1.ContainerStateRunning{}, - }, - }, - }, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.SucceedingState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{ - successMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - ContainerStatuses: []apiv1.ContainerStatus{ - { - Name: config.SparkDriverContainerName, - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 137, - Reason: "OOMKilled", - }, - }, - }, - { - Name: "sidecar", - State: apiv1.ContainerState{ - Running: &apiv1.ContainerStateRunning{}, - }, - }, - }, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.FailingState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{ - failedMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodFailed, - ContainerStatuses: []apiv1.ContainerStatus{ - { - Name: config.SparkDriverContainerName, - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 137, - Reason: "OOMKilled", - }, - }, - }, - }, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodFailed, - ContainerStatuses: []apiv1.ContainerStatus{ - { - Name: config.SparkExecutorContainerName, - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 137, - Reason: "OOMKilled", - }, - }, - }, - }, - }, - }, - expectedAppState: v1beta2.FailingState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState}, - expectedAppMetrics: metrics{ - failedMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - failedMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodFailed, - ContainerStatuses: []apiv1.ContainerStatus{ - { - Name: config.SparkDriverContainerName, - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 0, - }, - }, - }, - { - Name: "sidecar", - State: apiv1.ContainerState{ - Terminated: &apiv1.ContainerStateTerminated{ - ExitCode: 137, - Reason: "OOMKilled", - }, - }, - }, - }, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.SucceedingState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{ - successMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.FailingState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState}, - expectedAppState: v1beta2.FailedState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorFailedState}, - expectedAppMetrics: metrics{}, - expectedExecutorMetrics: executorMetrics{}, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.SucceedingState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{ - successMetricCount: 1, - }, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.SucceedingState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppState: v1beta2.CompletedState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{}, - expectedExecutorMetrics: executorMetrics{}, - }, - { - appName: appName, - oldAppStatus: v1beta2.SubmittedState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodUnknown, - }, - }, - executorPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "exec-1", - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkAppNameLabel: appName, - }, - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodPending, - }, - }, - expectedAppState: v1beta2.UnknownState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorPendingState}, - expectedAppMetrics: metrics{}, - expectedExecutorMetrics: executorMetrics{}, - }, - { - appName: appName, - oldAppStatus: v1beta2.CompletedState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorPendingState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - }, - expectedAppState: v1beta2.CompletedState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - expectedAppMetrics: metrics{}, - expectedExecutorMetrics: executorMetrics{ - successMetricCount: 1, - }, - }, - { - appName: appName, - oldAppStatus: v1beta2.RunningState, - oldExecutorStatus: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorRunningState}, - driverPod: &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: driverPodName, - Namespace: "test", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - }, - }, - expectedAppState: v1beta2.RunningState, - expectedExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorUnknownState}, - expectedAppMetrics: metrics{}, - expectedExecutorMetrics: executorMetrics{}, - }, - } - - testFn := func(test testcase, t *testing.T) { - app.Status.AppState.State = test.oldAppStatus - app.Status.ExecutorState = test.oldExecutorStatus - app.Name = test.appName - app.Status.ExecutionAttempts = 1 - ctrl, _ := newFakeController(app, test.driverPod, test.executorPod) - _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - if test.driverPod != nil { - ctrl.kubeClient.CoreV1().Pods(app.Namespace).Create(context.TODO(), test.driverPod, metav1.CreateOptions{}) - } - if test.executorPod != nil { - ctrl.kubeClient.CoreV1().Pods(app.Namespace).Create(context.TODO(), test.executorPod, metav1.CreateOptions{}) - } - - err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name)) - assert.Nil(t, err) - // Verify application and executor states. - updatedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - assert.Equal(t, test.expectedAppState, updatedApp.Status.AppState.State) - assert.Equal(t, test.expectedExecutorState, updatedApp.Status.ExecutorState) - - // Validate error message if the driver pod failed. - if test.driverPod != nil && test.driverPod.Status.Phase == apiv1.PodFailed { - if len(test.driverPod.Status.ContainerStatuses) > 0 && test.driverPod.Status.ContainerStatuses[0].State.Terminated != nil { - if test.driverPod.Status.ContainerStatuses[0].State.Terminated.ExitCode != 0 { - assert.Equal(t, updatedApp.Status.AppState.ErrorMessage, - fmt.Sprintf("driver container failed with ExitCode: %d, Reason: %s", test.driverPod.Status.ContainerStatuses[0].State.Terminated.ExitCode, test.driverPod.Status.ContainerStatuses[0].State.Terminated.Reason)) - } - } else { - assert.Equal(t, updatedApp.Status.AppState.ErrorMessage, "driver container status missing") - } - } - - // Verify application metrics. - assert.Equal(t, test.expectedAppMetrics.runningMetricCount, ctrl.metrics.sparkAppRunningCount.Value(map[string]string{})) - assert.Equal(t, test.expectedAppMetrics.successMetricCount, fetchCounterValue(ctrl.metrics.sparkAppSuccessCount, map[string]string{})) - assert.Equal(t, test.expectedAppMetrics.submitMetricCount, fetchCounterValue(ctrl.metrics.sparkAppSubmitCount, map[string]string{})) - assert.Equal(t, test.expectedAppMetrics.failedMetricCount, fetchCounterValue(ctrl.metrics.sparkAppFailureCount, map[string]string{})) - - // Verify executor metrics. - assert.Equal(t, test.expectedExecutorMetrics.runningMetricCount, ctrl.metrics.sparkAppExecutorRunningCount.Value(map[string]string{})) - assert.Equal(t, test.expectedExecutorMetrics.successMetricCount, fetchCounterValue(ctrl.metrics.sparkAppExecutorSuccessCount, map[string]string{})) - assert.Equal(t, test.expectedExecutorMetrics.failedMetricCount, fetchCounterValue(ctrl.metrics.sparkAppExecutorFailureCount, map[string]string{})) - } - - for _, test := range testcases { - testFn(test, t) - } -} - -func TestSyncSparkApplication_ApplicationExpired(t *testing.T) { - os.Setenv(kubernetesServiceHostEnvVar, "localhost") - os.Setenv(kubernetesServicePortEnvVar, "443") - - appName := "foo" - driverPodName := appName + "-driver" - - now := time.Now() - terminationTime := now.Add(-2 * time.Second) - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: appName, - Namespace: "test", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: v1beta2.RestartPolicy{ - Type: v1beta2.Never, - }, - TimeToLiveSeconds: int64ptr(1), - }, - Status: v1beta2.SparkApplicationStatus{ - AppState: v1beta2.ApplicationState{ - State: v1beta2.CompletedState, - ErrorMessage: "", - }, - DriverInfo: v1beta2.DriverInfo{ - PodName: driverPodName, - }, - TerminationTime: metav1.Time{ - Time: terminationTime, - }, - ExecutorState: map[string]v1beta2.ExecutorState{"exec-1": v1beta2.ExecutorCompletedState}, - }, - } - - ctrl, _ := newFakeController(app) - _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name)) - assert.Nil(t, err) - - _, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - assert.True(t, errors.IsNotFound(err)) -} - -func TestIsNextRetryDue(t *testing.T) { - // Failure cases. - assert.False(t, isNextRetryDue(nil, 3, metav1.Time{Time: metav1.Now().Add(-100 * time.Second)})) - assert.False(t, isNextRetryDue(int64ptr(5), 0, metav1.Time{Time: metav1.Now().Add(-100 * time.Second)})) - assert.False(t, isNextRetryDue(int64ptr(5), 3, metav1.Time{})) - // Not enough time passed. - assert.False(t, isNextRetryDue(int64ptr(50), 3, metav1.Time{Time: metav1.Now().Add(-100 * time.Second)})) - assert.True(t, isNextRetryDue(int64ptr(50), 3, metav1.Time{Time: metav1.Now().Add(-151 * time.Second)})) -} - -func TestIngressWithSubpathAffectsSparkConfiguration(t *testing.T) { - os.Setenv(kubernetesServiceHostEnvVar, "localhost") - os.Setenv(kubernetesServicePortEnvVar, "443") - - appName := "ingressaffectssparkconfig" - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: appName, - Namespace: "test", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: v1beta2.RestartPolicy{ - Type: v1beta2.Never, - }, - TimeToLiveSeconds: int64ptr(1), - }, - Status: v1beta2.SparkApplicationStatus{}, - } - - ctrl, _ := newFakeController(app) - ctrl.ingressURLFormat = "example.com/{{$appNamespace}}/{{$appName}}" - ctrl.enableUIService = true - _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name)) - assert.Nil(t, err) - deployedApp, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - ingresses, err := ctrl.kubeClient.NetworkingV1().Ingresses(app.Namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - t.Fatal(err) - } - if ingresses == nil || ingresses.Items == nil || len(ingresses.Items) != 1 { - t.Fatal("The ingress does not exist, has no items, or wrong amount of items") - } - if ingresses.Items[0].Spec.Rules[0].IngressRuleValue.HTTP.Paths[0].Path != "/"+app.Namespace+"/"+app.Name+"(/|$)(.*)" { - t.Fatal("The ingress subpath was not created successfully.") - } - // The controller doesn't sync changes to the sparkConf performed by submitSparkApplication back to the kubernetes API server. - if deployedApp.Spec.SparkConf["spark.ui.proxyBase"] != "/"+app.Namespace+"/"+app.Name { - t.Log("The spark configuration does not reflect the subpath expected by the ingress") - } - if deployedApp.Spec.SparkConf["spark.ui.proxyRedirectUri"] != "/" { - t.Log("The spark configuration does not reflect the proxyRedirectUri expected by the ingress") - } -} - -func TestIngressWithClassName(t *testing.T) { - os.Setenv(kubernetesServiceHostEnvVar, "localhost") - os.Setenv(kubernetesServicePortEnvVar, "443") - - appName := "ingressaffectssparkconfig" - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: appName, - Namespace: "test", - }, - Spec: v1beta2.SparkApplicationSpec{ - RestartPolicy: v1beta2.RestartPolicy{ - Type: v1beta2.Never, - }, - TimeToLiveSeconds: int64ptr(1), - }, - Status: v1beta2.SparkApplicationStatus{}, - } - - ctrl, _ := newFakeController(app) - ctrl.ingressURLFormat = "{{$appNamespace}}.{{$appName}}.example.com" - ctrl.ingressClassName = "nginx" - ctrl.enableUIService = true - _, err := ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - if err != nil { - t.Fatal(err) - } - err = ctrl.syncSparkApplication(fmt.Sprintf("%s/%s", app.Namespace, app.Name)) - assert.Nil(t, err) - _, err = ctrl.crdClient.SparkoperatorV1beta2().SparkApplications(app.Namespace).Get(context.TODO(), app.Name, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - ingresses, err := ctrl.kubeClient.NetworkingV1().Ingresses(app.Namespace).List(context.TODO(), metav1.ListOptions{}) - if err != nil { - t.Fatal(err) - } - if ingresses == nil || ingresses.Items == nil || len(ingresses.Items) != 1 { - t.Fatal("The ingress does not exist, has no items, or wrong amount of items") - } - if ingresses.Items[0].Spec.IngressClassName == nil || *ingresses.Items[0].Spec.IngressClassName != "nginx" { - t.Fatal("The ingressClassName does not exists, or wrong value is set") - } -} - -func stringptr(s string) *string { - return &s -} - -func int32ptr(n int32) *int32 { - return &n -} diff --git a/pkg/controller/sparkapplication/driveringress_test.go b/pkg/controller/sparkapplication/driveringress_test.go deleted file mode 100644 index ef1dedc672..0000000000 --- a/pkg/controller/sparkapplication/driveringress_test.go +++ /dev/null @@ -1,730 +0,0 @@ -/* -Copyright 2024 spark-operator contributors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "context" - "fmt" - "reflect" - "testing" - - apiv1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes/fake" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/util" -) - -func TestCreateDriverIngressService(t *testing.T) { - type testcase struct { - name string - app *v1beta2.SparkApplication - expectedServices []SparkService - expectedSelector map[string]string - expectError bool - } - testFn := func(test testcase, t *testing.T) { - fakeClient := fake.NewSimpleClientset() - util.IngressCapabilities = map[string]bool{"networking.k8s.io/v1": true} - if len(test.expectedServices) != len(test.app.Spec.DriverIngressOptions) { - t.Errorf("%s: size of test.expectedServices (%d) and test.app.Spec.DriverIngressOptions (%d) is different for %s", - test.name, len(test.expectedServices), len(test.app.Spec.DriverIngressOptions), test.app.Name) - } - for i, driverIngressConfiguration := range test.app.Spec.DriverIngressOptions { - sparkService, err := createDriverIngressServiceFromConfiguration(test.app, &driverIngressConfiguration, fakeClient) - if err != nil { - if test.expectError { - return - } - t.Fatal(err) - } - expectedService := test.expectedServices[i] - if sparkService.serviceName != expectedService.serviceName { - t.Errorf("%s: for service name wanted %s got %s", test.name, expectedService.serviceName, sparkService.serviceName) - } - service, err := fakeClient.CoreV1(). - Services(test.app.Namespace). - Get(context.TODO(), sparkService.serviceName, metav1.GetOptions{}) - if err != nil { - if test.expectError { - return - } - t.Fatal(err) - } - if service.Labels[config.SparkAppNameLabel] != test.app.Name { - t.Errorf("%s: service of app %s has the wrong labels", test.name, test.app.Name) - } - if !reflect.DeepEqual(test.expectedSelector, service.Spec.Selector) { - t.Errorf("%s: for label selector wanted %s got %s", test.name, test.expectedSelector, service.Spec.Selector) - } - if service.Spec.Type != expectedService.serviceType { - t.Errorf("%s: for service type wanted %s got %s", test.name, expectedService.serviceType, service.Spec.Type) - } - if len(service.Spec.Ports) != 1 { - t.Errorf("%s: wanted a single port got %d ports", test.name, len(service.Spec.Ports)) - } - port := service.Spec.Ports[0] - if port.Port != expectedService.servicePort { - t.Errorf("%s: unexpected port wanted %d got %d", test.name, expectedService.servicePort, port.Port) - } - if port.Name != expectedService.servicePortName { - t.Errorf("%s: unexpected port name wanted %s got %s", test.name, expectedService.servicePortName, port.Name) - } - serviceAnnotations := service.ObjectMeta.Annotations - if !reflect.DeepEqual(serviceAnnotations, expectedService.serviceAnnotations) { - t.Errorf("%s: unexpected annotations wanted %s got %s", test.name, expectedService.serviceAnnotations, serviceAnnotations) - } - serviceLabels := service.ObjectMeta.Labels - if !reflect.DeepEqual(serviceLabels, expectedService.serviceLabels) { - t.Errorf("%s: unexpected labels wanted %s got %s", test.name, expectedService.serviceLabels, serviceLabels) - } - } - } - serviceNameFormat := "%s-driver-%d" - portNameFormat := "driver-ing-%d" - app1 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo1", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: int32ptr(8888), - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - ExecutionAttempts: 1, - }, - } - app2 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo2", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: int32ptr(8888), - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-2", - ExecutionAttempts: 2, - }, - } - app3 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo3", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: nil, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-3", - }, - } - var appPort int32 = 80 - app4 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo4", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: &appPort, - }, - }, - SparkConf: map[string]string{ - sparkUIPortConfigurationKey: "4041", - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-3", - }, - } - var serviceTypeNodePort apiv1.ServiceType = apiv1.ServiceTypeNodePort - app5 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo5", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: int32ptr(8888), - ServiceType: &serviceTypeNodePort, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-2", - ExecutionAttempts: 2, - }, - } - appPortName := "http-spark-test" - app6 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo6", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: &appPort, - ServicePortName: &appPortName, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-6", - }, - } - app7 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo7", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: int32ptr(8888), - ServiceAnnotations: map[string]string{ - "key": "value", - }, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-7", - ExecutionAttempts: 1, - }, - } - app8 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo8", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: int32ptr(8888), - ServiceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo8", - "key": "value", - }, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-8", - ExecutionAttempts: 1, - }, - } - testcases := []testcase{ - { - name: "service with custom serviceport and serviceport and target port are same", - app: app1, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: fmt.Sprintf(portNameFormat, *app1.Spec.DriverIngressOptions[0].ServicePort), - servicePort: *app1.Spec.DriverIngressOptions[0].ServicePort, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo1", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(*app1.Spec.DriverIngressOptions[0].ServicePort), - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo1", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with default port", - app: app2, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app2.GetName(), *app2.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: fmt.Sprintf(portNameFormat, *app2.Spec.DriverIngressOptions[0].ServicePort), - servicePort: int32(*app2.Spec.DriverIngressOptions[0].ServicePort), - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo2", - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo2", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom serviceport and serviceport and target port are different", - app: app4, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app4.GetName(), *app4.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: fmt.Sprintf(portNameFormat, *app4.Spec.DriverIngressOptions[0].ServicePort), - servicePort: 80, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo4", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo4", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom servicetype", - app: app5, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app5.GetName(), *app5.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeNodePort, - servicePortName: fmt.Sprintf(portNameFormat, *app5.Spec.DriverIngressOptions[0].ServicePort), - servicePort: *app5.Spec.DriverIngressOptions[0].ServicePort, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo5", - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo5", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom serviceportname", - app: app6, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app6.GetName(), *app6.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: "http-spark-test", - servicePort: int32(80), - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo6", - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo6", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with annotation", - app: app7, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app7.GetName(), *app7.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: fmt.Sprintf(portNameFormat, *app7.Spec.DriverIngressOptions[0].ServicePort), - servicePort: *app7.Spec.DriverIngressOptions[0].ServicePort, - serviceAnnotations: map[string]string{ - "key": "value", - }, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo7", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo7", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom labels", - app: app8, - expectedServices: []SparkService{ - { - serviceName: fmt.Sprintf(serviceNameFormat, app8.GetName(), *app8.Spec.DriverIngressOptions[0].ServicePort), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: fmt.Sprintf(portNameFormat, *app8.Spec.DriverIngressOptions[0].ServicePort), - servicePort: *app8.Spec.DriverIngressOptions[0].ServicePort, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo8", - "key": "value", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo8", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with bad port configurations", - app: app3, - expectError: true, - expectedServices: []SparkService{{}}, - }, - } - for _, test := range testcases { - testFn(test, t) - } -} - -func TestCreateDriverIngress(t *testing.T) { - type testcase struct { - name string - app *v1beta2.SparkApplication - expectedIngresses []SparkIngress - expectError bool - } - - testFn := func(test testcase, t *testing.T, ingressURLFormat string, ingressClassName string) { - fakeClient := fake.NewSimpleClientset() - if len(test.expectedIngresses) != len(test.app.Spec.DriverIngressOptions) { - t.Errorf("%s: size of test.expectedIngresses (%d) and test.app.Spec.DriverIngressOptions (%d) is different for %s", - test.name, len(test.expectedIngresses), len(test.app.Spec.DriverIngressOptions), test.app.Name) - } - for i, driverIngressConfiguration := range test.app.Spec.DriverIngressOptions { - sparkService, err := createDriverIngressServiceFromConfiguration(test.app, &driverIngressConfiguration, fakeClient) - if err != nil { - t.Fatal(err) - } - ingressURL, err := getDriverIngressURL(ingressURLFormat, test.app.Name, test.app.Namespace) - if err != nil { - t.Fatal(err) - } - sparkIngress, err := createDriverIngress(test.app, &driverIngressConfiguration, *sparkService, ingressURL, ingressClassName, fakeClient) - if err != nil { - if test.expectError { - return - } - t.Fatal(err) - } - expectedIngress := test.expectedIngresses[i] - if sparkIngress.ingressName != expectedIngress.ingressName { - t.Errorf("Ingress name wanted %s got %s", expectedIngress.ingressName, sparkIngress.ingressName) - } - if sparkIngress.ingressURL.String() != expectedIngress.ingressURL.String() { - t.Errorf("Ingress URL wanted %s got %s", expectedIngress.ingressURL, sparkIngress.ingressURL) - } - ingress, err := fakeClient.NetworkingV1().Ingresses(test.app.Namespace). - Get(context.TODO(), sparkIngress.ingressName, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - if len(ingress.Annotations) != 0 { - for key, value := range ingress.Annotations { - if expectedIngress.annotations[key] != ingress.Annotations[key] { - t.Errorf("Expected annotation: %s=%s but found : %s=%s", key, value, key, ingress.Annotations[key]) - } - } - } - if len(ingress.Spec.TLS) != 0 { - for _, ingressTls := range ingress.Spec.TLS { - if ingressTls.Hosts[0] != expectedIngress.ingressTLS[0].Hosts[0] { - t.Errorf("Expected ingressTls host: %s but found : %s", expectedIngress.ingressTLS[0].Hosts[0], ingressTls.Hosts[0]) - } - if ingressTls.SecretName != expectedIngress.ingressTLS[0].SecretName { - t.Errorf("Expected ingressTls secretName: %s but found : %s", expectedIngress.ingressTLS[0].SecretName, ingressTls.SecretName) - } - } - } - if ingress.Labels[config.SparkAppNameLabel] != test.app.Name { - t.Errorf("Ingress of app %s has the wrong labels", test.app.Name) - } - - if len(ingress.Spec.Rules) != 1 { - t.Errorf("No Ingress rules found.") - } - ingressRule := ingress.Spec.Rules[0] - // If we have a path, then the ingress adds capture groups - if ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "" && ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "/" { - expectedIngress.ingressURL.Path = expectedIngress.ingressURL.Path + "(/|$)(.*)" - } - if ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path != expectedIngress.ingressURL.Host+expectedIngress.ingressURL.Path { - t.Errorf("Ingress of app %s has the wrong host %s", ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path, expectedIngress.ingressURL.Host+expectedIngress.ingressURL.Path) - } - - if len(ingressRule.IngressRuleValue.HTTP.Paths) != 1 { - t.Errorf("No Ingress paths found.") - } - ingressPath := ingressRule.IngressRuleValue.HTTP.Paths[0] - if ingressPath.Backend.Service.Name != sparkService.serviceName { - t.Errorf("Service name wanted %s got %s", sparkService.serviceName, ingressPath.Backend.Service.Name) - } - if *ingressPath.PathType != networkingv1.PathTypeImplementationSpecific { - t.Errorf("PathType wanted %s got %s", networkingv1.PathTypeImplementationSpecific, *ingressPath.PathType) - } - if ingressPath.Backend.Service.Port.Number != sparkService.servicePort { - t.Errorf("Service port wanted %v got %v", sparkService.servicePort, ingressPath.Backend.Service.Port.Number) - } - } - } - - ingressNameFormat := "%s-ing-%d" - var appPort int32 = 80 - app1 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: &appPort, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - app2 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: &appPort, - IngressAnnotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - app3 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: &appPort, - IngressAnnotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - IngressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, - }, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - app4 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - DriverIngressOptions: []v1beta2.DriverIngressConfiguration{ - { - ServicePort: &appPort, - IngressAnnotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - }, - IngressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: ""}, - }, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - - testcases := []testcase{ - { - name: "simple ingress object", - app: app1, - expectedIngresses: []SparkIngress{ - { - ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), - ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), - }, - }, - expectError: false, - }, - { - name: "ingress with annotations and without tls configuration", - app: app2, - expectedIngresses: []SparkIngress{ - { - ingressName: fmt.Sprintf(ingressNameFormat, app2.GetName(), *app2.Spec.DriverIngressOptions[0].ServicePort), - ingressURL: parseURLAndAssertError(app2.GetName()+".ingress.clusterName.com", t), - annotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - }, - }, - expectError: false, - }, - { - name: "ingress with annotations and tls configuration", - app: app3, - expectedIngresses: []SparkIngress{ - { - ingressName: fmt.Sprintf(ingressNameFormat, app3.GetName(), *app3.Spec.DriverIngressOptions[0].ServicePort), - ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), - annotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - ingressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, - }, - }, - }, - expectError: false, - }, - { - name: "ingress with incomplete list of annotations", - app: app4, - expectedIngresses: []SparkIngress{ - { - ingressName: fmt.Sprintf(ingressNameFormat, app4.GetName(), *app4.Spec.DriverIngressOptions[0].ServicePort), - ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), - annotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - ingressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: ""}, - }, - }, - }, - expectError: true, - }, - } - - for _, test := range testcases { - testFn(test, t, "{{$appName}}.ingress.clusterName.com", "") - } - - testcases = []testcase{ - { - name: "simple ingress object with ingress URL Format with path", - app: app1, - expectedIngresses: []SparkIngress{ - { - ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), - ingressURL: parseURLAndAssertError("ingress.clusterName.com/"+app1.GetNamespace()+"/"+app1.GetName(), t), - annotations: map[string]string{ - "nginx.ingress.kubernetes.io/rewrite-target": "/$2", - }, - }, - }, - expectError: false, - }, - } - - for _, test := range testcases { - testFn(test, t, "ingress.clusterName.com/{{$appNamespace}}/{{$appName}}", "") - } - - testcases = []testcase{ - { - name: "simple ingress object with ingressClassName set", - app: app1, - expectedIngresses: []SparkIngress{ - { - ingressName: fmt.Sprintf(ingressNameFormat, app1.GetName(), *app1.Spec.DriverIngressOptions[0].ServicePort), - ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), - ingressClassName: "nginx", - }, - }, - expectError: false, - }, - } - for _, test := range testcases { - testFn(test, t, "{{$appName}}.ingress.clusterName.com", "nginx") - } -} diff --git a/pkg/controller/sparkapplication/monitoring_config_test.go b/pkg/controller/sparkapplication/monitoring_config_test.go deleted file mode 100644 index 3eb20b8f91..0000000000 --- a/pkg/controller/sparkapplication/monitoring_config_test.go +++ /dev/null @@ -1,267 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "context" - "fmt" - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/fake" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" -) - -func TestConfigPrometheusMonitoring(t *testing.T) { - type testcase struct { - app *v1beta2.SparkApplication - metricsProperties string - metricsPropertiesFile string - prometheusConfig string - port string - driverJavaOptions string - executorJavaOptions string - } - - fakeClient := fake.NewSimpleClientset() - testFn := func(test testcase, t *testing.T) { - err := configPrometheusMonitoring(test.app, fakeClient) - if err != nil { - t.Errorf("failed to configure Prometheus monitoring: %v", err) - } - - configMapName := config.GetPrometheusConfigMapName(test.app) - configMap, err := fakeClient.CoreV1().ConfigMaps(test.app.Namespace).Get(context.TODO(), configMapName, metav1.GetOptions{}) - if err != nil { - t.Errorf("failed to get ConfigMap %s: %v", configMapName, err) - } - - if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && - test.app.Spec.Monitoring.MetricsPropertiesFile == nil && - len(configMap.Data) != 2 { - t.Errorf("expected %d data items got %d", 2, len(configMap.Data)) - } - - if test.app.Spec.Monitoring.Prometheus.ConfigFile != nil && - test.app.Spec.Monitoring.MetricsPropertiesFile == nil && - len(configMap.Data) != 1 { - t.Errorf("expected %d data items got %d", 1, len(configMap.Data)) - } - - if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && - test.app.Spec.Monitoring.MetricsPropertiesFile != nil && - len(configMap.Data) != 1 { - t.Errorf("expected %d data items got %d", 1, len(configMap.Data)) - } - - if test.app.Spec.Monitoring.MetricsPropertiesFile == nil && configMap.Data[metricsPropertiesKey] != test.metricsProperties { - t.Errorf("metrics.properties expected %s got %s", test.metricsProperties, configMap.Data[metricsPropertiesKey]) - } - - if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && configMap.Data[prometheusConfigKey] != test.prometheusConfig { - t.Errorf("prometheus.yaml expected %s got %s", test.prometheusConfig, configMap.Data[prometheusConfigKey]) - } - - if test.app.Spec.Monitoring.Prometheus.ConfigFile == nil && configMap.Data[prometheusConfigKey] != test.prometheusConfig { - t.Errorf("prometheus.yaml expected %s got %s", test.prometheusConfig, configMap.Data[prometheusConfigKey]) - } - - if test.app.Spec.Monitoring.ExposeDriverMetrics { - if len(test.app.Spec.Driver.Annotations) != 3 { - t.Errorf("expected %d driver annotations got %d", 3, len(test.app.Spec.Driver.Annotations)) - } - if test.app.Spec.Driver.Annotations[prometheusPortAnnotation] != test.port { - t.Errorf("java agent port expected %s got %s", test.port, test.app.Spec.Driver.Annotations[prometheusPortAnnotation]) - } - - if *test.app.Spec.Driver.JavaOptions != test.driverJavaOptions { - t.Errorf("driver Java options expected %s got %s", test.driverJavaOptions, *test.app.Spec.Driver.JavaOptions) - } - } - - if test.app.Spec.Monitoring.ExposeExecutorMetrics { - if len(test.app.Spec.Executor.Annotations) != 3 { - t.Errorf("expected %d driver annotations got %d", 3, len(test.app.Spec.Executor.Annotations)) - } - if test.app.Spec.Executor.Annotations[prometheusPortAnnotation] != test.port { - t.Errorf("java agent port expected %s got %s", test.port, test.app.Spec.Executor.Annotations[prometheusPortAnnotation]) - } - - if *test.app.Spec.Executor.JavaOptions != test.executorJavaOptions { - t.Errorf("driver Java options expected %s got %s", test.executorJavaOptions, *test.app.Spec.Executor.JavaOptions) - } - } - - if test.app.Spec.Monitoring.MetricsPropertiesFile != nil { - if test.app.Spec.SparkConf["spark.metrics.conf"] != test.metricsPropertiesFile { - t.Errorf("expected sparkConf %s got %s", test.metricsPropertiesFile, test.app.Spec.SparkConf["spark.metrics.conf"]) - } - } - } - - testcases := []testcase{ - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "app1", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - Monitoring: &v1beta2.MonitoringSpec{ - ExposeDriverMetrics: true, - ExposeExecutorMetrics: true, - Prometheus: &v1beta2.PrometheusSpec{ - JmxExporterJar: "/prometheus/exporter.jar", - }, - }, - }, - }, - metricsProperties: config.DefaultMetricsProperties, - prometheusConfig: config.DefaultPrometheusConfiguration, - port: fmt.Sprintf("%d", config.DefaultPrometheusJavaAgentPort), - driverJavaOptions: "-javaagent:/prometheus/exporter.jar=8090:/etc/metrics/conf/prometheus.yaml", - executorJavaOptions: "-javaagent:/prometheus/exporter.jar=8090:/etc/metrics/conf/prometheus.yaml", - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "app2", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Executor: v1beta2.ExecutorSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Monitoring: &v1beta2.MonitoringSpec{ - ExposeDriverMetrics: true, - ExposeExecutorMetrics: true, - MetricsProperties: stringptr("testcase2dummy"), - Prometheus: &v1beta2.PrometheusSpec{ - JmxExporterJar: "/prometheus/exporter.jar", - Port: int32ptr(8091), - Configuration: stringptr("testcase2dummy"), - }, - }, - }, - }, - metricsProperties: "testcase2dummy", - prometheusConfig: "testcase2dummy", - port: "8091", - driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", - executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "app2", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Executor: v1beta2.ExecutorSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Monitoring: &v1beta2.MonitoringSpec{ - ExposeDriverMetrics: true, - ExposeExecutorMetrics: true, - MetricsProperties: stringptr("testcase3dummy"), - Prometheus: &v1beta2.PrometheusSpec{ - JmxExporterJar: "/prometheus/exporter.jar", - Port: int32ptr(8091), - ConfigFile: stringptr("testcase3dummy.yaml"), - }, - }, - }, - }, - metricsProperties: "testcase3dummy", - port: "8091", - driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase3dummy.yaml", - executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase3dummy.yaml", - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "app2", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Executor: v1beta2.ExecutorSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Monitoring: &v1beta2.MonitoringSpec{ - ExposeDriverMetrics: true, - ExposeExecutorMetrics: true, - MetricsPropertiesFile: stringptr("/testcase4dummy/metrics.properties"), - Prometheus: &v1beta2.PrometheusSpec{ - JmxExporterJar: "/prometheus/exporter.jar", - Port: int32ptr(8091), - ConfigFile: stringptr("testcase4dummy.yaml"), - }, - }, - }, - }, - metricsPropertiesFile: "/testcase4dummy/metrics.properties", - port: "8091", - driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase4dummy.yaml", - executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:testcase4dummy.yaml", - }, - { - app: &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "app2", - Namespace: "default", - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Executor: v1beta2.ExecutorSpec{ - JavaOptions: stringptr("-XX:+PrintGCDetails -XX:+PrintGCTimeStamps"), - }, - Monitoring: &v1beta2.MonitoringSpec{ - ExposeDriverMetrics: true, - ExposeExecutorMetrics: true, - MetricsPropertiesFile: stringptr("/testcase5dummy/metrics.properties"), - Prometheus: &v1beta2.PrometheusSpec{ - JmxExporterJar: "/prometheus/exporter.jar", - Port: int32ptr(8091), - }, - }, - }, - }, - metricsPropertiesFile: "/testcase5dummy/metrics.properties", - prometheusConfig: config.DefaultPrometheusConfiguration, - port: "8091", - driverJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", - executorJavaOptions: "-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -javaagent:/prometheus/exporter.jar=8091:/etc/metrics/conf/prometheus.yaml", - }, - } - - for _, test := range testcases { - testFn(test, t) - } -} diff --git a/pkg/controller/sparkapplication/spark_pod_eventhandler.go b/pkg/controller/sparkapplication/spark_pod_eventhandler.go deleted file mode 100644 index 8ebb398cbb..0000000000 --- a/pkg/controller/sparkapplication/spark_pod_eventhandler.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "github.com/golang/glog" - - apiv1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/cache" - - crdlisters "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" -) - -// sparkPodEventHandler monitors Spark executor pods and update the SparkApplication objects accordingly. -type sparkPodEventHandler struct { - applicationLister crdlisters.SparkApplicationLister - // call-back function to enqueue SparkApp key for processing. - enqueueFunc func(appKey interface{}) -} - -// newSparkPodEventHandler creates a new sparkPodEventHandler instance. -func newSparkPodEventHandler(enqueueFunc func(appKey interface{}), lister crdlisters.SparkApplicationLister) *sparkPodEventHandler { - monitor := &sparkPodEventHandler{ - enqueueFunc: enqueueFunc, - applicationLister: lister, - } - return monitor -} - -func (s *sparkPodEventHandler) onPodAdded(obj interface{}) { - pod := obj.(*apiv1.Pod) - glog.V(2).Infof("Pod %s added in namespace %s.", pod.GetName(), pod.GetNamespace()) - s.enqueueSparkAppForUpdate(pod) -} - -func (s *sparkPodEventHandler) onPodUpdated(old, updated interface{}) { - oldPod := old.(*apiv1.Pod) - updatedPod := updated.(*apiv1.Pod) - - if updatedPod.ResourceVersion == oldPod.ResourceVersion { - return - } - glog.V(2).Infof("Pod %s updated in namespace %s.", updatedPod.GetName(), updatedPod.GetNamespace()) - s.enqueueSparkAppForUpdate(updatedPod) - -} - -func (s *sparkPodEventHandler) onPodDeleted(obj interface{}) { - var deletedPod *apiv1.Pod - - switch obj.(type) { - case *apiv1.Pod: - deletedPod = obj.(*apiv1.Pod) - case cache.DeletedFinalStateUnknown: - deletedObj := obj.(cache.DeletedFinalStateUnknown).Obj - deletedPod = deletedObj.(*apiv1.Pod) - } - - if deletedPod == nil { - return - } - glog.V(2).Infof("Pod %s deleted in namespace %s.", deletedPod.GetName(), deletedPod.GetNamespace()) - s.enqueueSparkAppForUpdate(deletedPod) -} - -func (s *sparkPodEventHandler) enqueueSparkAppForUpdate(pod *apiv1.Pod) { - appName, exists := getAppName(pod) - if !exists { - return - } - - if submissionID, exists := pod.Labels[config.SubmissionIDLabel]; exists { - app, err := s.applicationLister.SparkApplications(pod.GetNamespace()).Get(appName) - if err != nil || app.Status.SubmissionID != submissionID { - return - } - } - - appKey := createMetaNamespaceKey(pod.GetNamespace(), appName) - glog.V(2).Infof("Enqueuing SparkApplication %s for app update processing.", appKey) - s.enqueueFunc(appKey) -} diff --git a/pkg/controller/sparkapplication/spark_pod_eventhandler_test.go b/pkg/controller/sparkapplication/spark_pod_eventhandler_test.go deleted file mode 100644 index 2fa8360221..0000000000 --- a/pkg/controller/sparkapplication/spark_pod_eventhandler_test.go +++ /dev/null @@ -1,288 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/util/workqueue" - - "github.com/kubeflow/spark-operator/pkg/config" -) - -func TestOnPodAdded(t *testing.T) { - monitor, queue := newMonitor() - - appName := "foo-1" - namespace := "foo-namespace" - driverPod := &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-driver", - Namespace: namespace, - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkApplicationSelectorLabel: "foo-123", - config.SparkAppNameLabel: appName, - }, - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodPending, - }, - } - go monitor.onPodAdded(driverPod) - - key, _ := queue.Get() - actualNamespace, actualAppName, err := cache.SplitMetaNamespaceKey(key.(string)) - assert.Nil(t, err) - - assert.Equal( - t, - appName, - actualAppName, - "wanted app name %s got %s", - appName, - actualAppName) - - assert.Equal( - t, - namespace, - actualNamespace, - "wanted app namespace %s got %s", - namespace, - actualNamespace) - - appName = "foo-2" - executorPod := &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-driver", - Namespace: "foo-namespace", - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkApplicationSelectorLabel: "foo-123", - config.SparkAppNameLabel: appName, - sparkExecutorIDLabel: "1", - }, - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - }, - } - go monitor.onPodAdded(executorPod) - - key, _ = queue.Get() - - actualNamespace, actualAppName, err = cache.SplitMetaNamespaceKey(key.(string)) - assert.Nil(t, err) - - assert.Equal( - t, - appName, - actualAppName, - "wanted app name %s got %s", - appName, - actualAppName) - - assert.Equal( - t, - namespace, - actualNamespace, - "wanted app namespace %s got %s", - namespace, - actualNamespace) -} - -func TestOnPodUpdated(t *testing.T) { - monitor, queue := newMonitor() - - appName := "foo-3" - namespace := "foo-namespace" - oldDriverPod := &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-driver", - Namespace: namespace, - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkApplicationSelectorLabel: "foo-123", - config.SparkAppNameLabel: appName, - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodPending, - }, - } - newDriverPod := oldDriverPod.DeepCopy() - newDriverPod.ResourceVersion = "2" - newDriverPod.Status.Phase = apiv1.PodSucceeded - go monitor.onPodUpdated(oldDriverPod, newDriverPod) - - key, _ := queue.Get() - - actualNamespace, actualAppName, err := cache.SplitMetaNamespaceKey(key.(string)) - assert.Nil(t, err) - - assert.Equal( - t, - appName, - actualAppName, - "wanted app name %s got %s", - appName, - actualAppName) - - assert.Equal( - t, - namespace, - actualNamespace, - "wanted app namespace %s got %s", - namespace, - actualNamespace) - - appName = "foo-4" - oldExecutorPod := &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-driver", - Namespace: namespace, - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkApplicationSelectorLabel: "foo-123", - config.SparkAppNameLabel: appName, - sparkExecutorIDLabel: "1", - }, - ResourceVersion: "1", - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - }, - } - newExecutorPod := oldExecutorPod.DeepCopy() - newExecutorPod.ResourceVersion = "2" - newExecutorPod.Status.Phase = apiv1.PodFailed - go monitor.onPodUpdated(oldExecutorPod, newExecutorPod) - - key, _ = queue.Get() - - actualNamespace, actualAppName, err = cache.SplitMetaNamespaceKey(key.(string)) - assert.Nil(t, err) - - assert.Equal( - t, - appName, - actualAppName, - "wanted app name %s got %s", - appName, - actualAppName) - - assert.Equal( - t, - namespace, - actualNamespace, - "wanted app namespace %s got %s", - namespace, - actualNamespace) -} - -func TestOnPodDeleted(t *testing.T) { - monitor, queue := newMonitor() - - appName := "foo-5" - namespace := "foo-namespace" - driverPod := &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-driver", - Namespace: namespace, - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.SparkApplicationSelectorLabel: "foo-123", - config.SparkAppNameLabel: appName, - }, - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodRunning, - }, - } - go monitor.onPodDeleted(driverPod) - - key, _ := queue.Get() - actualNamespace, actualAppName, err := cache.SplitMetaNamespaceKey(key.(string)) - assert.Nil(t, err) - - assert.Equal( - t, - appName, - actualAppName, - "wanted app name %s got %s", - appName, - actualAppName) - - assert.Equal( - t, - namespace, - actualNamespace, - "wanted app namespace %s got %s", - namespace, - actualNamespace) - - appName = "foo-6" - executorPod := &apiv1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo-exec-1", - Namespace: namespace, - Labels: map[string]string{ - config.SparkRoleLabel: config.SparkExecutorRole, - config.SparkApplicationSelectorLabel: "foo-123", - config.SparkAppNameLabel: appName, - sparkExecutorIDLabel: "1", - }, - }, - Status: apiv1.PodStatus{ - Phase: apiv1.PodSucceeded, - }, - } - go monitor.onPodDeleted(executorPod) - - key, _ = queue.Get() - actualNamespace, actualAppName, err = cache.SplitMetaNamespaceKey(key.(string)) - assert.Nil(t, err) - - assert.Equal( - t, - appName, - actualAppName, - "wanted app name %s got %s", - appName, - actualAppName) - - assert.Equal( - t, - namespace, - actualNamespace, - "wanted app namespace %s got %s", - namespace, - actualNamespace) -} - -func newMonitor() (*sparkPodEventHandler, workqueue.RateLimitingInterface) { - queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), - "spark-application-controller-test") - monitor := newSparkPodEventHandler(queue.AddRateLimited, nil) - return monitor, queue -} diff --git a/pkg/controller/sparkapplication/sparkapp_metrics.go b/pkg/controller/sparkapplication/sparkapp_metrics.go deleted file mode 100644 index 1dfb309b1c..0000000000 --- a/pkg/controller/sparkapplication/sparkapp_metrics.go +++ /dev/null @@ -1,336 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "time" - - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/util" -) - -type sparkAppMetrics struct { - labels []string - prefix string - - sparkAppCount *prometheus.CounterVec - sparkAppSubmitCount *prometheus.CounterVec - sparkAppSuccessCount *prometheus.CounterVec - sparkAppFailureCount *prometheus.CounterVec - sparkAppFailedSubmissionCount *prometheus.CounterVec - sparkAppRunningCount *util.PositiveGauge - - sparkAppSuccessExecutionTime *prometheus.SummaryVec - sparkAppFailureExecutionTime *prometheus.SummaryVec - sparkAppStartLatency *prometheus.SummaryVec - sparkAppStartLatencyHistogram *prometheus.HistogramVec - - sparkAppExecutorRunningCount *util.PositiveGauge - sparkAppExecutorFailureCount *prometheus.CounterVec - sparkAppExecutorSuccessCount *prometheus.CounterVec -} - -func newSparkAppMetrics(metricsConfig *util.MetricConfig) *sparkAppMetrics { - prefix := metricsConfig.MetricsPrefix - labels := metricsConfig.MetricsLabels - validLabels := make([]string, len(labels)) - for i, label := range labels { - validLabels[i] = util.CreateValidMetricNameLabel("", label) - } - - sparkAppCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_count"), - Help: "Total Number of Spark Apps Handled by the Operator", - }, - validLabels, - ) - sparkAppSubmitCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_submit_count"), - Help: "Spark App Submits via the Operator", - }, - validLabels, - ) - sparkAppSuccessCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_success_count"), - Help: "Spark App Success Count via the Operator", - }, - validLabels, - ) - sparkAppFailureCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_failure_count"), - Help: "Spark App Failure Count via the Operator", - }, - validLabels, - ) - sparkAppFailedSubmissionCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_failed_submission_count"), - Help: "Spark App Failed Submission Count via the Operator", - }, - validLabels, - ) - sparkAppSuccessExecutionTime := prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_success_execution_time_microseconds"), - Help: "Spark App Successful Execution Runtime via the Operator", - }, - validLabels, - ) - sparkAppFailureExecutionTime := prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_failure_execution_time_microseconds"), - Help: "Spark App Failed Execution Runtime via the Operator", - }, - validLabels, - ) - sparkAppStartLatency := prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_start_latency_microseconds"), - Help: "Spark App Start Latency via the Operator", - }, - validLabels, - ) - sparkAppStartLatencyHistogram := prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_start_latency_seconds"), - Help: "Spark App Start Latency counts in buckets via the Operator", - Buckets: metricsConfig.MetricsJobStartLatencyBuckets, - }, - validLabels, - ) - sparkAppExecutorSuccessCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_executor_success_count"), - Help: "Spark App Successful Executor Count via the Operator", - }, - validLabels, - ) - sparkAppExecutorFailureCount := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: util.CreateValidMetricNameLabel(prefix, "spark_app_executor_failure_count"), - Help: "Spark App Failed Executor Count via the Operator", - }, - validLabels, - ) - sparkAppRunningCount := util.NewPositiveGauge(util.CreateValidMetricNameLabel(prefix, "spark_app_running_count"), - "Spark App Running Count via the Operator", validLabels) - sparkAppExecutorRunningCount := util.NewPositiveGauge(util.CreateValidMetricNameLabel(prefix, - "spark_app_executor_running_count"), "Spark App Running Executor Count via the Operator", validLabels) - - return &sparkAppMetrics{ - labels: validLabels, - prefix: prefix, - sparkAppCount: sparkAppCount, - sparkAppSubmitCount: sparkAppSubmitCount, - sparkAppRunningCount: sparkAppRunningCount, - sparkAppSuccessCount: sparkAppSuccessCount, - sparkAppFailureCount: sparkAppFailureCount, - sparkAppFailedSubmissionCount: sparkAppFailedSubmissionCount, - sparkAppSuccessExecutionTime: sparkAppSuccessExecutionTime, - sparkAppFailureExecutionTime: sparkAppFailureExecutionTime, - sparkAppStartLatency: sparkAppStartLatency, - sparkAppStartLatencyHistogram: sparkAppStartLatencyHistogram, - sparkAppExecutorRunningCount: sparkAppExecutorRunningCount, - sparkAppExecutorSuccessCount: sparkAppExecutorSuccessCount, - sparkAppExecutorFailureCount: sparkAppExecutorFailureCount, - } -} - -func (sm *sparkAppMetrics) registerMetrics() { - util.RegisterMetric(sm.sparkAppCount) - util.RegisterMetric(sm.sparkAppSubmitCount) - util.RegisterMetric(sm.sparkAppSuccessCount) - util.RegisterMetric(sm.sparkAppFailureCount) - util.RegisterMetric(sm.sparkAppSuccessExecutionTime) - util.RegisterMetric(sm.sparkAppFailureExecutionTime) - util.RegisterMetric(sm.sparkAppStartLatency) - util.RegisterMetric(sm.sparkAppStartLatencyHistogram) - util.RegisterMetric(sm.sparkAppExecutorSuccessCount) - util.RegisterMetric(sm.sparkAppExecutorFailureCount) - sm.sparkAppRunningCount.Register() - sm.sparkAppExecutorRunningCount.Register() -} - -func (sm *sparkAppMetrics) exportMetricsOnDelete(oldApp *v1beta2.SparkApplication) { - metricLabels := fetchMetricLabels(oldApp, sm.labels) - oldState := oldApp.Status.AppState.State - if oldState == v1beta2.RunningState { - sm.sparkAppRunningCount.Dec(metricLabels) - } - for executor, oldExecState := range oldApp.Status.ExecutorState { - if oldExecState == v1beta2.ExecutorRunningState { - glog.V(2).Infof("Application is deleted. Decreasing Running Count for Executor %s.", executor) - sm.sparkAppExecutorRunningCount.Dec(metricLabels) - } - } -} - -func (sm *sparkAppMetrics) exportMetrics(oldApp, newApp *v1beta2.SparkApplication) { - metricLabels := fetchMetricLabels(newApp, sm.labels) - - oldState := oldApp.Status.AppState.State - newState := newApp.Status.AppState.State - if newState != oldState { - if oldState == v1beta2.NewState { - if m, err := sm.sparkAppCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - } - - switch newState { - case v1beta2.SubmittedState: - if m, err := sm.sparkAppSubmitCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - case v1beta2.RunningState: - sm.sparkAppRunningCount.Inc(metricLabels) - sm.exportJobStartLatencyMetrics(newApp, metricLabels) - case v1beta2.SucceedingState: - if !newApp.Status.LastSubmissionAttemptTime.Time.IsZero() && !newApp.Status.TerminationTime.Time.IsZero() { - d := newApp.Status.TerminationTime.Time.Sub(newApp.Status.LastSubmissionAttemptTime.Time) - if m, err := sm.sparkAppSuccessExecutionTime.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Observe(float64(d / time.Microsecond)) - } - } - sm.sparkAppRunningCount.Dec(metricLabels) - if m, err := sm.sparkAppSuccessCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - case v1beta2.FailingState: - if !newApp.Status.LastSubmissionAttemptTime.Time.IsZero() && !newApp.Status.TerminationTime.Time.IsZero() { - d := newApp.Status.TerminationTime.Time.Sub(newApp.Status.LastSubmissionAttemptTime.Time) - if m, err := sm.sparkAppFailureExecutionTime.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Observe(float64(d / time.Microsecond)) - } - } - sm.sparkAppRunningCount.Dec(metricLabels) - if m, err := sm.sparkAppFailureCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - case v1beta2.FailedSubmissionState: - if m, err := sm.sparkAppFailedSubmissionCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - } - } - - // In the event that state transitions happened too quickly and the spark app skipped the RUNNING state, the job - // start latency should still be captured. - // Note: There is an edge case that a Submitted state can go directly to a Failing state if the driver pod is - // deleted. This is very unlikely if not being done intentionally, so we choose not to handle it. - if newState != oldState { - if (newState == v1beta2.FailingState || newState == v1beta2.SucceedingState) && oldState == v1beta2.SubmittedState { - // TODO: remove this log once we've gathered some data in prod fleets. - glog.V(2).Infof("Calculating job start latency metrics for edge case transition from %v to %v in app %v in namespace %v.", oldState, newState, newApp.Name, newApp.Namespace) - sm.exportJobStartLatencyMetrics(newApp, metricLabels) - } - } - - oldExecutorStates := oldApp.Status.ExecutorState - // Potential Executor status updates - for executor, newExecState := range newApp.Status.ExecutorState { - switch newExecState { - case v1beta2.ExecutorRunningState: - if oldExecutorStates[executor] != newExecState { - glog.V(2).Infof("Exporting Metrics for Executor %s. OldState: %v NewState: %v", executor, - oldExecutorStates[executor], newExecState) - sm.sparkAppExecutorRunningCount.Inc(metricLabels) - } - case v1beta2.ExecutorCompletedState: - if oldExecutorStates[executor] != newExecState { - glog.V(2).Infof("Exporting Metrics for Executor %s. OldState: %v NewState: %v", executor, - oldExecutorStates[executor], newExecState) - sm.sparkAppExecutorRunningCount.Dec(metricLabels) - if m, err := sm.sparkAppExecutorSuccessCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - } - case v1beta2.ExecutorFailedState: - if oldExecutorStates[executor] != newExecState { - glog.V(2).Infof("Exporting Metrics for Executor %s. OldState: %v NewState: %v", executor, - oldExecutorStates[executor], newExecState) - sm.sparkAppExecutorRunningCount.Dec(metricLabels) - if m, err := sm.sparkAppExecutorFailureCount.GetMetricWith(metricLabels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Inc() - } - } - } - } -} - -func (sm *sparkAppMetrics) exportJobStartLatencyMetrics(app *v1beta2.SparkApplication, labels map[string]string) { - // Expose the job start latency related metrics of an SparkApp only once when it runs for the first time - if app.Status.ExecutionAttempts == 1 { - latency := time.Now().Sub(app.CreationTimestamp.Time) - if m, err := sm.sparkAppStartLatency.GetMetricWith(labels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Observe(float64(latency / time.Microsecond)) - } - if m, err := sm.sparkAppStartLatencyHistogram.GetMetricWith(labels); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Observe(float64(latency / time.Second)) - } - } -} - -func fetchMetricLabels(app *v1beta2.SparkApplication, labels []string) map[string]string { - // Convert app labels into ones that can be used as metric labels. - validLabels := make(map[string]string) - for labelKey, v := range app.Labels { - newKey := util.CreateValidMetricNameLabel("", labelKey) - validLabels[newKey] = v - } - - metricLabels := make(map[string]string) - for _, label := range labels { - if value, ok := validLabels[label]; ok { - metricLabels[label] = value - } else if label == "namespace" { // If the "namespace" label is in the metrics config, use it. - metricLabels[label] = app.Namespace - } else { - metricLabels[label] = "Unknown" - } - } - return metricLabels -} diff --git a/pkg/controller/sparkapplication/sparkapp_metrics_test.go b/pkg/controller/sparkapplication/sparkapp_metrics_test.go deleted file mode 100644 index a860d7f411..0000000000 --- a/pkg/controller/sparkapplication/sparkapp_metrics_test.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "github.com/kubeflow/spark-operator/pkg/util" - "net/http" - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSparkAppMetrics(t *testing.T) { - http.DefaultServeMux = new(http.ServeMux) - // Test with label containing "-". Expect them to be converted to "_". - metricsConfig := &util.MetricConfig{ - MetricsPrefix: "", - MetricsLabels: []string{"app-id", "namespace"}, - MetricsJobStartLatencyBuckets: []float64{30, 60, 90, 120}, - } - metrics := newSparkAppMetrics(metricsConfig) - app1 := map[string]string{"app_id": "test1", "namespace": "default"} - - var wg sync.WaitGroup - wg.Add(1) - go func() { - for i := 0; i < 10; i++ { - metrics.sparkAppCount.With(app1).Inc() - metrics.sparkAppSubmitCount.With(app1).Inc() - metrics.sparkAppRunningCount.Inc(app1) - metrics.sparkAppSuccessCount.With(app1).Inc() - metrics.sparkAppFailureCount.With(app1).Inc() - metrics.sparkAppFailedSubmissionCount.With(app1).Inc() - metrics.sparkAppSuccessExecutionTime.With(app1).Observe(float64(100 * i)) - metrics.sparkAppFailureExecutionTime.With(app1).Observe(float64(500 * i)) - metrics.sparkAppStartLatency.With(app1).Observe(float64(10 * i)) - metrics.sparkAppStartLatencyHistogram.With(app1).Observe(float64(10 * i)) - metrics.sparkAppExecutorRunningCount.Inc(app1) - metrics.sparkAppExecutorSuccessCount.With(app1).Inc() - metrics.sparkAppExecutorFailureCount.With(app1).Inc() - } - for i := 0; i < 5; i++ { - metrics.sparkAppRunningCount.Dec(app1) - metrics.sparkAppExecutorRunningCount.Dec(app1) - } - wg.Done() - }() - - wg.Wait() - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppCount, app1)) - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppSubmitCount, app1)) - assert.Equal(t, float64(5), metrics.sparkAppRunningCount.Value(app1)) - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppSuccessCount, app1)) - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppFailureCount, app1)) - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppFailedSubmissionCount, app1)) - assert.Equal(t, float64(5), metrics.sparkAppExecutorRunningCount.Value(app1)) - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppExecutorFailureCount, app1)) - assert.Equal(t, float64(10), fetchCounterValue(metrics.sparkAppExecutorSuccessCount, app1)) -} diff --git a/pkg/controller/sparkapplication/sparkapp_util.go b/pkg/controller/sparkapplication/sparkapp_util.go deleted file mode 100644 index 3d2a58f07c..0000000000 --- a/pkg/controller/sparkapplication/sparkapp_util.go +++ /dev/null @@ -1,218 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "encoding/json" - "fmt" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - apiv1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" -) - -// Helper method to create a key with namespace and appName -func createMetaNamespaceKey(namespace, name string) string { - return fmt.Sprintf("%s/%s", namespace, name) -} - -func getAppName(pod *apiv1.Pod) (string, bool) { - appName, ok := pod.Labels[config.SparkAppNameLabel] - return appName, ok -} - -func getSparkApplicationID(pod *apiv1.Pod) string { - return pod.Labels[config.SparkApplicationSelectorLabel] -} - -func getDriverPodName(app *v1beta2.SparkApplication) string { - name := app.Spec.Driver.PodName - if name != nil && len(*name) > 0 { - return *name - } - - sparkConf := app.Spec.SparkConf - if sparkConf[config.SparkDriverPodNameKey] != "" { - return sparkConf[config.SparkDriverPodNameKey] - } - - return fmt.Sprintf("%s-driver", app.Name) -} - -func getUIServiceType(app *v1beta2.SparkApplication) apiv1.ServiceType { - if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceType != nil { - return *app.Spec.SparkUIOptions.ServiceType - } - return apiv1.ServiceTypeClusterIP -} - -func getDefaultUIServiceName(app *v1beta2.SparkApplication) string { - return fmt.Sprintf("%s-ui-svc", app.Name) -} - -func getDefaultUIIngressName(app *v1beta2.SparkApplication) string { - return fmt.Sprintf("%s-ui-ingress", app.Name) -} - -func getResourceLabels(app *v1beta2.SparkApplication) map[string]string { - labels := map[string]string{config.SparkAppNameLabel: app.Name} - if app.Status.SubmissionID != "" { - labels[config.SubmissionIDLabel] = app.Status.SubmissionID - } - return labels -} - -func getServiceAnnotations(app *v1beta2.SparkApplication) map[string]string { - serviceAnnotations := map[string]string{} - if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceAnnotations != nil { - for key, value := range app.Spec.SparkUIOptions.ServiceAnnotations { - serviceAnnotations[key] = value - } - } - return serviceAnnotations -} - -func getServiceLabels(app *v1beta2.SparkApplication) map[string]string { - serviceLabels := map[string]string{} - if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceLabels != nil { - for key, value := range app.Spec.SparkUIOptions.ServiceLabels { - serviceLabels[key] = value - } - } - return serviceLabels -} - -func getIngressResourceAnnotations(app *v1beta2.SparkApplication) map[string]string { - ingressAnnotations := map[string]string{} - if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.IngressAnnotations != nil { - for key, value := range app.Spec.SparkUIOptions.IngressAnnotations { - ingressAnnotations[key] = value - } - } - return ingressAnnotations -} - -func getIngressTlsHosts(app *v1beta2.SparkApplication) []networkingv1.IngressTLS { - var ingressTls []networkingv1.IngressTLS - if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.IngressTLS != nil { - for _, ingTls := range app.Spec.SparkUIOptions.IngressTLS { - ingressTls = append(ingressTls, ingTls) - } - } - return ingressTls -} - -func podPhaseToExecutorState(podPhase apiv1.PodPhase) v1beta2.ExecutorState { - switch podPhase { - case apiv1.PodPending: - return v1beta2.ExecutorPendingState - case apiv1.PodRunning: - return v1beta2.ExecutorRunningState - case apiv1.PodSucceeded: - return v1beta2.ExecutorCompletedState - case apiv1.PodFailed: - return v1beta2.ExecutorFailedState - default: - return v1beta2.ExecutorUnknownState - } -} - -func isExecutorTerminated(executorState v1beta2.ExecutorState) bool { - return executorState == v1beta2.ExecutorCompletedState || executorState == v1beta2.ExecutorFailedState -} - -func isDriverRunning(app *v1beta2.SparkApplication) bool { - return app.Status.AppState.State == v1beta2.RunningState -} - -func getDriverContainerTerminatedState(podStatus apiv1.PodStatus) *apiv1.ContainerStateTerminated { - return getContainerTerminatedState(config.SparkDriverContainerName, podStatus) -} - -func getExecutorContainerTerminatedState(podStatus apiv1.PodStatus) *apiv1.ContainerStateTerminated { - state := getContainerTerminatedState(config.Spark3DefaultExecutorContainerName, podStatus) - if state == nil { - state = getContainerTerminatedState(config.SparkExecutorContainerName, podStatus) - } - return state -} - -func getContainerTerminatedState(name string, podStatus apiv1.PodStatus) *apiv1.ContainerStateTerminated { - for _, c := range podStatus.ContainerStatuses { - if c.Name == name { - if c.State.Terminated != nil { - return c.State.Terminated - } - return nil - } - } - return nil -} - -func podStatusToDriverState(podStatus apiv1.PodStatus) v1beta2.DriverState { - switch podStatus.Phase { - case apiv1.PodPending: - return v1beta2.DriverPendingState - case apiv1.PodRunning: - state := getDriverContainerTerminatedState(podStatus) - if state != nil { - if state.ExitCode == 0 { - return v1beta2.DriverCompletedState - } - return v1beta2.DriverFailedState - } - return v1beta2.DriverRunningState - case apiv1.PodSucceeded: - return v1beta2.DriverCompletedState - case apiv1.PodFailed: - state := getDriverContainerTerminatedState(podStatus) - if state != nil && state.ExitCode == 0 { - return v1beta2.DriverCompletedState - } - return v1beta2.DriverFailedState - default: - return v1beta2.DriverUnknownState - } -} - -func hasDriverTerminated(driverState v1beta2.DriverState) bool { - return driverState == v1beta2.DriverCompletedState || driverState == v1beta2.DriverFailedState -} - -func driverStateToApplicationState(driverState v1beta2.DriverState) v1beta2.ApplicationStateType { - switch driverState { - case v1beta2.DriverPendingState: - return v1beta2.SubmittedState - case v1beta2.DriverCompletedState: - return v1beta2.SucceedingState - case v1beta2.DriverFailedState: - return v1beta2.FailingState - case v1beta2.DriverRunningState: - return v1beta2.RunningState - default: - return v1beta2.UnknownState - } -} - -func printStatus(status *v1beta2.SparkApplicationStatus) (string, error) { - marshalled, err := json.MarshalIndent(status, "", " ") - if err != nil { - return "", err - } - return string(marshalled), nil -} diff --git a/pkg/controller/sparkapplication/sparkapp_util_test.go b/pkg/controller/sparkapplication/sparkapp_util_test.go deleted file mode 100644 index c1605656ed..0000000000 --- a/pkg/controller/sparkapplication/sparkapp_util_test.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2020 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "testing" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" -) - -var expectedStatusString = `{ - "sparkApplicationId": "test-app", - "submissionID": "test-app-submission", - "lastSubmissionAttemptTime": null, - "terminationTime": null, - "driverInfo": {}, - "applicationState": { - "state": "COMPLETED" - }, - "executorState": { - "executor-1": "COMPLETED" - } -}` - -func TestPrintStatus(t *testing.T) { - status := &v1beta2.SparkApplicationStatus{ - SparkApplicationID: "test-app", - SubmissionID: "test-app-submission", - AppState: v1beta2.ApplicationState{ - State: v1beta2.CompletedState, - }, - ExecutorState: map[string]v1beta2.ExecutorState{ - "executor-1": v1beta2.ExecutorCompletedState, - }, - } - - statusString, err := printStatus(status) - if err != nil { - t.Fail() - } - - if statusString != expectedStatusString { - t.Errorf("status string\n %s is different from expected status string\n %s", statusString, expectedStatusString) - } -} diff --git a/pkg/controller/sparkapplication/sparkui.go b/pkg/controller/sparkapplication/sparkui.go deleted file mode 100644 index b247974da8..0000000000 --- a/pkg/controller/sparkapplication/sparkui.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "fmt" - "net/url" - "strconv" - - clientset "k8s.io/client-go/kubernetes" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/util" -) - -const ( - sparkUIPortConfigurationKey = "spark.ui.port" - defaultSparkWebUIPort int32 = 4040 - defaultSparkWebUIPortName string = "spark-driver-ui-port" -) - -func createSparkUIIngress(app *v1beta2.SparkApplication, service SparkService, ingressURL *url.URL, ingressClassName string, kubeClient clientset.Interface) (*SparkIngress, error) { - ingressName := getDefaultUIIngressName(app) - if util.IngressCapabilities.Has("networking.k8s.io/v1") { - return createDriverIngress_v1(app, service, ingressName, ingressURL, ingressClassName, kubeClient) - } else { - return createDriverIngress_legacy(app, service, ingressName, ingressURL, kubeClient) - } -} - -func createSparkUIService( - app *v1beta2.SparkApplication, - kubeClient clientset.Interface) (*SparkService, error) { - portName := getUIServicePortName(app) - port, err := getUIServicePort(app) - if err != nil { - return nil, fmt.Errorf("invalid Spark UI servicePort: %d", port) - } - tPort, err := getUITargetPort(app) - if err != nil { - return nil, fmt.Errorf("invalid Spark UI targetPort: %d", tPort) - } - serviceName := getDefaultUIServiceName(app) - serviceType := getUIServiceType(app) - serviceAnnotations := getServiceAnnotations(app) - serviceLabels := getServiceLabels(app) - return createDriverIngressService(app, portName, port, tPort, serviceName, serviceType, serviceAnnotations, serviceLabels, kubeClient) -} - -// getWebUITargetPort attempts to get the Spark web UI port from configuration property spark.ui.port -// in Spec.SparkConf if it is present, otherwise the default port is returned. -// Note that we don't attempt to get the port from Spec.SparkConfigMap. -func getUITargetPort(app *v1beta2.SparkApplication) (int32, error) { - portStr, ok := app.Spec.SparkConf[sparkUIPortConfigurationKey] - if ok { - port, err := strconv.Atoi(portStr) - return int32(port), err - } - return defaultSparkWebUIPort, nil -} - -func getUIServicePort(app *v1beta2.SparkApplication) (int32, error) { - if app.Spec.SparkUIOptions == nil { - return getUITargetPort(app) - } - port := app.Spec.SparkUIOptions.ServicePort - if port != nil { - return *port, nil - } - return defaultSparkWebUIPort, nil -} - -func getUIServicePortName(app *v1beta2.SparkApplication) string { - if app.Spec.SparkUIOptions == nil { - return defaultSparkWebUIPortName - } - portName := app.Spec.SparkUIOptions.ServicePortName - if portName != nil { - return *portName - } - return defaultSparkWebUIPortName -} diff --git a/pkg/controller/sparkapplication/sparkui_test.go b/pkg/controller/sparkapplication/sparkui_test.go deleted file mode 100644 index 6427aa5304..0000000000 --- a/pkg/controller/sparkapplication/sparkui_test.go +++ /dev/null @@ -1,673 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "context" - "fmt" - "net/url" - "reflect" - "testing" - - apiv1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes/fake" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/util" -) - -func TestCreateSparkUIService(t *testing.T) { - type testcase struct { - name string - app *v1beta2.SparkApplication - expectedService SparkService - expectedSelector map[string]string - expectError bool - } - testFn := func(test testcase, t *testing.T) { - fakeClient := fake.NewSimpleClientset() - util.IngressCapabilities = map[string]bool{"networking.k8s.io/v1": true} - sparkService, err := createSparkUIService(test.app, fakeClient) - if err != nil { - if test.expectError { - return - } - t.Fatal(err) - } - if sparkService.serviceName != test.expectedService.serviceName { - t.Errorf("%s: for service name wanted %s got %s", test.name, test.expectedService.serviceName, sparkService.serviceName) - } - service, err := fakeClient.CoreV1(). - Services(test.app.Namespace). - Get(context.TODO(), sparkService.serviceName, metav1.GetOptions{}) - if err != nil { - if test.expectError { - return - } - t.Fatal(err) - } - if service.Labels[config.SparkAppNameLabel] != test.app.Name { - t.Errorf("%s: service of app %s has the wrong labels", test.name, test.app.Name) - } - if !reflect.DeepEqual(test.expectedSelector, service.Spec.Selector) { - t.Errorf("%s: for label selector wanted %s got %s", test.name, test.expectedSelector, service.Spec.Selector) - } - if service.Spec.Type != test.expectedService.serviceType { - t.Errorf("%s: for service type wanted %s got %s", test.name, test.expectedService.serviceType, service.Spec.Type) - } - if len(service.Spec.Ports) != 1 { - t.Errorf("%s: wanted a single port got %d ports", test.name, len(service.Spec.Ports)) - } - port := service.Spec.Ports[0] - if port.Port != test.expectedService.servicePort { - t.Errorf("%s: unexpected port wanted %d got %d", test.name, test.expectedService.servicePort, port.Port) - } - if port.Name != test.expectedService.servicePortName { - t.Errorf("%s: unexpected port name wanted %s got %s", test.name, test.expectedService.servicePortName, port.Name) - } - serviceAnnotations := service.ObjectMeta.Annotations - if !reflect.DeepEqual(serviceAnnotations, test.expectedService.serviceAnnotations) { - t.Errorf("%s: unexpected annotations wanted %s got %s", test.name, test.expectedService.serviceAnnotations, serviceAnnotations) - } - serviceLabels := service.ObjectMeta.Labels - if !reflect.DeepEqual(serviceLabels, test.expectedService.serviceLabels) { - t.Errorf("%s: unexpected labels wanted %s got %s", test.name, test.expectedService.serviceLabels, serviceLabels) - } - } - defaultPort := defaultSparkWebUIPort - defaultPortName := defaultSparkWebUIPortName - app1 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo1", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkConf: map[string]string{ - sparkUIPortConfigurationKey: "4041", - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - ExecutionAttempts: 1, - }, - } - app2 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo2", - Namespace: "default", - UID: "foo-123", - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-2", - ExecutionAttempts: 2, - }, - } - app3 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo3", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkConf: map[string]string{ - sparkUIPortConfigurationKey: "4041x", - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-3", - }, - } - var appPort int32 = 80 - app4 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo4", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServicePort: &appPort, - IngressAnnotations: nil, - IngressTLS: nil, - }, - SparkConf: map[string]string{ - sparkUIPortConfigurationKey: "4041", - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-3", - }, - } - var serviceTypeNodePort apiv1.ServiceType = apiv1.ServiceTypeNodePort - app5 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo5", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServiceType: &serviceTypeNodePort, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-2", - ExecutionAttempts: 2, - }, - } - appPortName := "http-spark-test" - app6 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo6", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServicePort: &appPort, - ServicePortName: &appPortName, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-6", - }, - } - app7 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo7", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServiceAnnotations: map[string]string{ - "key": "value", - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-7", - ExecutionAttempts: 1, - }, - } - app8 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo8", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServiceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo8", - "key": "value", - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-8", - ExecutionAttempts: 1, - }, - } - testcases := []testcase{ - { - name: "service with custom serviceport and serviceport and target port are same", - app: app1, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app1.GetName()), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: defaultPortName, - servicePort: 4041, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo1", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo1", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with default port", - app: app2, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app2.GetName()), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: defaultPortName, - servicePort: int32(defaultPort), - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo2", - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo2", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom serviceport and serviceport and target port are different", - app: app4, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app4.GetName()), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: defaultPortName, - servicePort: 80, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo4", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo4", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom servicetype", - app: app5, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app5.GetName()), - serviceType: apiv1.ServiceTypeNodePort, - servicePortName: defaultPortName, - servicePort: int32(defaultPort), - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo5", - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo5", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom serviceportname", - app: app6, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app6.GetName()), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: "http-spark-test", - servicePort: int32(80), - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo6", - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo6", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with annotation", - app: app7, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app7.GetName()), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: defaultPortName, - servicePort: defaultPort, - serviceAnnotations: map[string]string{ - "key": "value", - }, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo7", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo7", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with custom labels", - app: app8, - expectedService: SparkService{ - serviceName: fmt.Sprintf("%s-ui-svc", app8.GetName()), - serviceType: apiv1.ServiceTypeClusterIP, - servicePortName: defaultPortName, - servicePort: defaultPort, - serviceLabels: map[string]string{ - "sparkoperator.k8s.io/app-name": "foo8", - "key": "value", - }, - targetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(4041), - }, - }, - expectedSelector: map[string]string{ - config.SparkAppNameLabel: "foo8", - config.SparkRoleLabel: config.SparkDriverRole, - }, - expectError: false, - }, - { - name: "service with bad port configurations", - app: app3, - expectError: true, - }, - } - for _, test := range testcases { - testFn(test, t) - } -} - -func TestCreateSparkUIIngress(t *testing.T) { - type testcase struct { - name string - app *v1beta2.SparkApplication - expectedIngress SparkIngress - expectError bool - } - - testFn := func(test testcase, t *testing.T, ingressURLFormat string, ingressClassName string) { - fakeClient := fake.NewSimpleClientset() - sparkService, err := createSparkUIService(test.app, fakeClient) - if err != nil { - t.Fatal(err) - } - ingressURL, err := getDriverIngressURL(ingressURLFormat, test.app.Name, test.app.Namespace) - if err != nil { - t.Fatal(err) - } - sparkIngress, err := createSparkUIIngress(test.app, *sparkService, ingressURL, ingressClassName, fakeClient) - if err != nil { - if test.expectError { - return - } - t.Fatal(err) - } - if sparkIngress.ingressName != test.expectedIngress.ingressName { - t.Errorf("Ingress name wanted %s got %s", test.expectedIngress.ingressName, sparkIngress.ingressName) - } - if sparkIngress.ingressURL.String() != test.expectedIngress.ingressURL.String() { - t.Errorf("Ingress URL wanted %s got %s", test.expectedIngress.ingressURL, sparkIngress.ingressURL) - } - ingress, err := fakeClient.NetworkingV1().Ingresses(test.app.Namespace). - Get(context.TODO(), sparkIngress.ingressName, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - if len(ingress.Annotations) != 0 { - for key, value := range ingress.Annotations { - if test.expectedIngress.annotations[key] != ingress.Annotations[key] { - t.Errorf("Expected annotation: %s=%s but found : %s=%s", key, value, key, ingress.Annotations[key]) - } - } - } - if len(ingress.Spec.TLS) != 0 { - for _, ingressTls := range ingress.Spec.TLS { - if ingressTls.Hosts[0] != test.expectedIngress.ingressTLS[0].Hosts[0] { - t.Errorf("Expected ingressTls host: %s but found : %s", test.expectedIngress.ingressTLS[0].Hosts[0], ingressTls.Hosts[0]) - } - if ingressTls.SecretName != test.expectedIngress.ingressTLS[0].SecretName { - t.Errorf("Expected ingressTls secretName: %s but found : %s", test.expectedIngress.ingressTLS[0].SecretName, ingressTls.SecretName) - } - } - } - if ingress.Labels[config.SparkAppNameLabel] != test.app.Name { - t.Errorf("Ingress of app %s has the wrong labels", test.app.Name) - } - - if len(ingress.Spec.Rules) != 1 { - t.Errorf("No Ingress rules found.") - } - ingressRule := ingress.Spec.Rules[0] - // If we have a path, then the ingress adds capture groups - if ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "" && ingressRule.IngressRuleValue.HTTP.Paths[0].Path != "/" { - test.expectedIngress.ingressURL.Path = test.expectedIngress.ingressURL.Path + "(/|$)(.*)" - } - if ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path != test.expectedIngress.ingressURL.Host+test.expectedIngress.ingressURL.Path { - - t.Errorf("Ingress of app %s has the wrong host %s", ingressRule.Host+ingressRule.IngressRuleValue.HTTP.Paths[0].Path, test.expectedIngress.ingressURL.Host+test.expectedIngress.ingressURL.Path) - } - - if len(ingressRule.IngressRuleValue.HTTP.Paths) != 1 { - t.Errorf("No Ingress paths found.") - } - ingressPath := ingressRule.IngressRuleValue.HTTP.Paths[0] - if ingressPath.Backend.Service.Name != sparkService.serviceName { - t.Errorf("Service name wanted %s got %s", sparkService.serviceName, ingressPath.Backend.Service.Name) - } - if *ingressPath.PathType != networkingv1.PathTypeImplementationSpecific { - t.Errorf("PathType wanted %s got %s", networkingv1.PathTypeImplementationSpecific, *ingressPath.PathType) - } - if ingressPath.Backend.Service.Port.Number != sparkService.servicePort { - t.Errorf("Service port wanted %v got %v", sparkService.servicePort, ingressPath.Backend.Service.Port.Number) - } - } - - var appPort int32 = 80 - app1 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - app2 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServicePort: &appPort, - IngressAnnotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - app3 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServicePort: &appPort, - IngressAnnotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - IngressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - app4 := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - UID: "foo-123", - }, - Spec: v1beta2.SparkApplicationSpec{ - SparkUIOptions: &v1beta2.SparkUIConfiguration{ - ServicePort: &appPort, - IngressAnnotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - }, - IngressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: ""}, - }, - }, - }, - Status: v1beta2.SparkApplicationStatus{ - SparkApplicationID: "foo-1", - DriverInfo: v1beta2.DriverInfo{ - WebUIServiceName: "blah-service", - }, - }, - } - - testcases := []testcase{ - { - name: "simple ingress object", - app: app1, - expectedIngress: SparkIngress{ - ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), - ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), - }, - expectError: false, - }, - { - name: "ingress with annotations and without tls configuration", - app: app2, - expectedIngress: SparkIngress{ - ingressName: fmt.Sprintf("%s-ui-ingress", app2.GetName()), - ingressURL: parseURLAndAssertError(app2.GetName()+".ingress.clusterName.com", t), - annotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - }, - expectError: false, - }, - { - name: "ingress with annotations and tls configuration", - app: app3, - expectedIngress: SparkIngress{ - ingressName: fmt.Sprintf("%s-ui-ingress", app3.GetName()), - ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), - annotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - ingressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: "secret"}, - }, - }, - expectError: false, - }, - { - name: "ingress with incomplete list of annotations", - app: app4, - expectedIngress: SparkIngress{ - ingressName: fmt.Sprintf("%s-ui-ingress", app4.GetName()), - ingressURL: parseURLAndAssertError(app3.GetName()+".ingress.clusterName.com", t), - annotations: map[string]string{ - "kubernetes.io/ingress.class": "nginx", - "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", - }, - ingressTLS: []networkingv1.IngressTLS{ - {Hosts: []string{"host1", "host2"}, SecretName: ""}, - }, - }, - expectError: true, - }, - } - - for _, test := range testcases { - testFn(test, t, "{{$appName}}.ingress.clusterName.com", "") - } - - testcases = []testcase{ - { - name: "simple ingress object with ingress URL Format with path", - app: app1, - expectedIngress: SparkIngress{ - ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), - ingressURL: parseURLAndAssertError("ingress.clusterName.com/"+app1.GetNamespace()+"/"+app1.GetName(), t), - annotations: map[string]string{ - "nginx.ingress.kubernetes.io/rewrite-target": "/$2", - }, - }, - expectError: false, - }, - } - - for _, test := range testcases { - testFn(test, t, "ingress.clusterName.com/{{$appNamespace}}/{{$appName}}", "") - } - - testcases = []testcase{ - { - name: "simple ingress object with ingressClassName set", - app: app1, - expectedIngress: SparkIngress{ - ingressName: fmt.Sprintf("%s-ui-ingress", app1.GetName()), - ingressURL: parseURLAndAssertError(app1.GetName()+".ingress.clusterName.com", t), - ingressClassName: "nginx", - }, - expectError: false, - }, - } - for _, test := range testcases { - testFn(test, t, "{{$appName}}.ingress.clusterName.com", "nginx") - } -} - -func parseURLAndAssertError(testURL string, t *testing.T) *url.URL { - fallbackURL, _ := url.Parse("http://example.com") - parsedURL, err := url.Parse(testURL) - if err != nil { - t.Errorf("failed to parse the url: %s", testURL) - return fallbackURL - } - if parsedURL.Scheme == "" { - //url does not contain any scheme, adding http:// so url.Parse can function correctly - parsedURL, err = url.Parse("http://" + testURL) - if err != nil { - t.Errorf("failed to parse the url: %s", testURL) - return fallbackURL - } - } - return parsedURL -} diff --git a/pkg/controller/sparkapplication/submission.go b/pkg/controller/sparkapplication/submission.go deleted file mode 100644 index 2f3fe1dd7b..0000000000 --- a/pkg/controller/sparkapplication/submission.go +++ /dev/null @@ -1,532 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" - "reflect" - "strings" - - "github.com/golang/glog" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" -) - -const ( - sparkHomeEnvVar = "SPARK_HOME" - kubernetesServiceHostEnvVar = "KUBERNETES_SERVICE_HOST" - kubernetesServicePortEnvVar = "KUBERNETES_SERVICE_PORT" -) - -// submission includes information of a Spark application to be submitted. -type submission struct { - namespace string - name string - args []string -} - -func newSubmission(args []string, app *v1beta2.SparkApplication) *submission { - return &submission{ - namespace: app.Namespace, - name: app.Name, - args: args, - } -} - -func runSparkSubmit(submission *submission) (bool, error) { - sparkHome, present := os.LookupEnv(sparkHomeEnvVar) - if !present { - glog.Error("SPARK_HOME is not specified") - } - command := filepath.Join(sparkHome, "/bin/spark-submit") - - cmd := execCommand(command, submission.args...) - glog.V(2).Infof("spark-submit arguments: %v", cmd.Args) - output, err := cmd.Output() - glog.V(3).Infof("spark-submit output: %s", string(output)) - if err != nil { - var errorMsg string - if exitErr, ok := err.(*exec.ExitError); ok { - errorMsg = string(exitErr.Stderr) - } - // The driver pod of the application already exists. - if strings.Contains(errorMsg, podAlreadyExistsErrorCode) { - glog.Warningf("trying to resubmit an already submitted SparkApplication %s/%s", submission.namespace, submission.name) - return false, nil - } - if errorMsg != "" { - return false, fmt.Errorf("failed to run spark-submit for SparkApplication %s/%s: %s", submission.namespace, submission.name, errorMsg) - } - return false, fmt.Errorf("failed to run spark-submit for SparkApplication %s/%s: %v", submission.namespace, submission.name, err) - } - - return true, nil -} - -func buildSubmissionCommandArgs(app *v1beta2.SparkApplication, driverPodName string, submissionID string) ([]string, error) { - var args []string - if app.Spec.MainClass != nil { - args = append(args, "--class", *app.Spec.MainClass) - } - masterURL, err := getMasterURL() - if err != nil { - return nil, err - } - - args = append(args, "--master", masterURL) - args = append(args, "--deploy-mode", string(app.Spec.Mode)) - - // Add proxy user - if app.Spec.ProxyUser != nil { - args = append(args, "--proxy-user", *app.Spec.ProxyUser) - } - - args = append(args, "--conf", fmt.Sprintf("%s=%s", config.SparkAppNamespaceKey, app.Namespace)) - args = append(args, "--conf", fmt.Sprintf("%s=%s", config.SparkAppNameKey, app.Name)) - args = append(args, "--conf", fmt.Sprintf("%s=%s", config.SparkDriverPodNameKey, driverPodName)) - - // Add application dependencies. - args = append(args, addDependenciesConfOptions(app)...) - - if app.Spec.Image != nil { - args = append(args, "--conf", - fmt.Sprintf("%s=%s", config.SparkContainerImageKey, *app.Spec.Image)) - } - if app.Spec.ImagePullPolicy != nil { - args = append(args, "--conf", - fmt.Sprintf("%s=%s", config.SparkContainerImagePullPolicyKey, *app.Spec.ImagePullPolicy)) - } - if len(app.Spec.ImagePullSecrets) > 0 { - secretNames := strings.Join(app.Spec.ImagePullSecrets, ",") - args = append(args, "--conf", fmt.Sprintf("%s=%s", config.SparkImagePullSecretKey, secretNames)) - } - if app.Spec.PythonVersion != nil { - args = append(args, "--conf", - fmt.Sprintf("%s=%s", config.SparkPythonVersion, *app.Spec.PythonVersion)) - } - if app.Spec.MemoryOverheadFactor != nil { - args = append(args, "--conf", - fmt.Sprintf("%s=%s", config.SparkMemoryOverheadFactor, *app.Spec.MemoryOverheadFactor)) - } - - // Operator triggered spark-submit should never wait for App completion - args = append(args, "--conf", fmt.Sprintf("%s=false", config.SparkWaitAppCompletion)) - - // Add Spark configuration properties. - for key, value := range app.Spec.SparkConf { - // Configuration property for the driver pod name has already been set. - if key != config.SparkDriverPodNameKey { - args = append(args, "--conf", fmt.Sprintf("%s=%s", key, value)) - } - } - - // Add Hadoop configuration properties. - for key, value := range app.Spec.HadoopConf { - args = append(args, "--conf", fmt.Sprintf("spark.hadoop.%s=%s", key, value)) - } - - // Add the driver and executor configuration options. - // Note that when the controller submits the application, it expects that all dependencies are local - // so init-container is not needed and therefore no init-container image needs to be specified. - options, err := addDriverConfOptions(app, submissionID) - if err != nil { - return nil, err - } - for _, option := range options { - args = append(args, "--conf", option) - } - options, err = addExecutorConfOptions(app, submissionID) - if err != nil { - return nil, err - } - for _, option := range options { - args = append(args, "--conf", option) - } - - options = addDynamicAllocationConfOptions(app) - for _, option := range options { - args = append(args, "--conf", option) - } - - for key, value := range app.Spec.NodeSelector { - conf := fmt.Sprintf("%s%s=%s", config.SparkNodeSelectorKeyPrefix, key, value) - args = append(args, "--conf", conf) - } - - if app.Spec.Volumes != nil { - options, err = addLocalDirConfOptions(app) - if err != nil { - return nil, err - } - - for _, option := range options { - args = append(args, "--conf", option) - } - } - - if app.Spec.MainApplicationFile != nil { - // Add the main application file if it is present. - args = append(args, *app.Spec.MainApplicationFile) - } - - // Add application arguments. - for _, argument := range app.Spec.Arguments { - args = append(args, argument) - } - - return args, nil -} - -func getMasterURL() (string, error) { - kubernetesServiceHost := os.Getenv(kubernetesServiceHostEnvVar) - if kubernetesServiceHost == "" { - return "", fmt.Errorf("environment variable %s is not found", kubernetesServiceHostEnvVar) - } - kubernetesServicePort := os.Getenv(kubernetesServicePortEnvVar) - if kubernetesServicePort == "" { - return "", fmt.Errorf("environment variable %s is not found", kubernetesServicePortEnvVar) - } - // check if the host is IPv6 address - if strings.Contains(kubernetesServiceHost, ":") && !strings.HasPrefix(kubernetesServiceHost, "[") { - return fmt.Sprintf("k8s://https://[%s]:%s", kubernetesServiceHost, kubernetesServicePort), nil - } - return fmt.Sprintf("k8s://https://%s:%s", kubernetesServiceHost, kubernetesServicePort), nil -} - -func getOwnerReference(app *v1beta2.SparkApplication) *metav1.OwnerReference { - controller := true - return &metav1.OwnerReference{ - APIVersion: v1beta2.SchemeGroupVersion.String(), - Kind: reflect.TypeOf(v1beta2.SparkApplication{}).Name(), - Name: app.Name, - UID: app.UID, - Controller: &controller, - } -} - -func addDependenciesConfOptions(app *v1beta2.SparkApplication) []string { - var depsConfOptions []string - - if len(app.Spec.Deps.Jars) > 0 { - depsConfOptions = append(depsConfOptions, "--jars", strings.Join(app.Spec.Deps.Jars, ",")) - } - if len(app.Spec.Deps.Files) > 0 { - depsConfOptions = append(depsConfOptions, "--files", strings.Join(app.Spec.Deps.Files, ",")) - } - if len(app.Spec.Deps.PyFiles) > 0 { - depsConfOptions = append(depsConfOptions, "--py-files", strings.Join(app.Spec.Deps.PyFiles, ",")) - } - if len(app.Spec.Deps.Packages) > 0 { - depsConfOptions = append(depsConfOptions, "--packages", strings.Join(app.Spec.Deps.Packages, ",")) - } - if len(app.Spec.Deps.ExcludePackages) > 0 { - depsConfOptions = append(depsConfOptions, "--exclude-packages", strings.Join(app.Spec.Deps.ExcludePackages, ",")) - } - if len(app.Spec.Deps.Repositories) > 0 { - depsConfOptions = append(depsConfOptions, "--repositories", strings.Join(app.Spec.Deps.Repositories, ",")) - } - - return depsConfOptions -} - -func addDriverConfOptions(app *v1beta2.SparkApplication, submissionID string) ([]string, error) { - var driverConfOptions []string - - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverLabelKeyPrefix, config.SparkAppNameLabel, app.Name)) - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverLabelKeyPrefix, config.LaunchedBySparkOperatorLabel, "true")) - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverLabelKeyPrefix, config.SubmissionIDLabel, submissionID)) - - if app.Spec.Driver.Image != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s=%s", config.SparkDriverContainerImageKey, *app.Spec.Driver.Image)) - } - - if app.Spec.Driver.Cores != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("spark.driver.cores=%d", *app.Spec.Driver.Cores)) - } - if app.Spec.Driver.CoreRequest != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s=%s", config.SparkDriverCoreRequestKey, *app.Spec.Driver.CoreRequest)) - } - if app.Spec.Driver.CoreLimit != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s=%s", config.SparkDriverCoreLimitKey, *app.Spec.Driver.CoreLimit)) - } - if app.Spec.Driver.Memory != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("spark.driver.memory=%s", *app.Spec.Driver.Memory)) - } - if app.Spec.Driver.MemoryOverhead != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("spark.driver.memoryOverhead=%s", *app.Spec.Driver.MemoryOverhead)) - } - - if app.Spec.Driver.ServiceAccount != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s=%s", config.SparkDriverServiceAccountName, *app.Spec.Driver.ServiceAccount)) - } - - if app.Spec.Driver.JavaOptions != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s=%s", config.SparkDriverJavaOptions, *app.Spec.Driver.JavaOptions)) - } - - if app.Spec.Driver.KubernetesMaster != nil { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s=%s", config.SparkDriverKubernetesMaster, *app.Spec.Driver.KubernetesMaster)) - } - - // Populate SparkApplication Labels to Driver - driverLabels := make(map[string]string) - for key, value := range app.Labels { - driverLabels[key] = value - } - for key, value := range app.Spec.Driver.Labels { - driverLabels[key] = value - } - - for key, value := range driverLabels { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverLabelKeyPrefix, key, value)) - } - - for key, value := range app.Spec.Driver.Annotations { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverAnnotationKeyPrefix, key, value)) - } - - for key, value := range app.Spec.Driver.EnvSecretKeyRefs { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s:%s", config.SparkDriverSecretKeyRefKeyPrefix, key, value.Name, value.Key)) - } - - for key, value := range app.Spec.Driver.ServiceAnnotations { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverServiceAnnotationKeyPrefix, key, value)) - } - - for key, value := range app.Spec.Driver.ServiceLabels { - driverConfOptions = append(driverConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkDriverServiceLabelKeyPrefix, key, value)) - } - - driverConfOptions = append(driverConfOptions, config.GetDriverSecretConfOptions(app)...) - driverConfOptions = append(driverConfOptions, config.GetDriverEnvVarConfOptions(app)...) - - return driverConfOptions, nil -} - -func addExecutorConfOptions(app *v1beta2.SparkApplication, submissionID string) ([]string, error) { - var executorConfOptions []string - - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkExecutorLabelKeyPrefix, config.SparkAppNameLabel, app.Name)) - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkExecutorLabelKeyPrefix, config.LaunchedBySparkOperatorLabel, "true")) - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkExecutorLabelKeyPrefix, config.SubmissionIDLabel, submissionID)) - - if app.Spec.Executor.Instances != nil { - conf := fmt.Sprintf("spark.executor.instances=%d", *app.Spec.Executor.Instances) - executorConfOptions = append(executorConfOptions, conf) - } - - if app.Spec.Executor.Image != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s=%s", config.SparkExecutorContainerImageKey, *app.Spec.Executor.Image)) - } - - if app.Spec.Executor.Cores != nil { - // Property "spark.executor.cores" does not allow float values. - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("spark.executor.cores=%d", int32(*app.Spec.Executor.Cores))) - } - if app.Spec.Executor.CoreRequest != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s=%s", config.SparkExecutorCoreRequestKey, *app.Spec.Executor.CoreRequest)) - } - if app.Spec.Executor.CoreLimit != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s=%s", config.SparkExecutorCoreLimitKey, *app.Spec.Executor.CoreLimit)) - } - if app.Spec.Executor.Memory != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("spark.executor.memory=%s", *app.Spec.Executor.Memory)) - } - if app.Spec.Executor.MemoryOverhead != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("spark.executor.memoryOverhead=%s", *app.Spec.Executor.MemoryOverhead)) - } - - if app.Spec.Executor.ServiceAccount != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s=%s", config.SparkExecutorAccountName, *app.Spec.Executor.ServiceAccount)) - } - - if app.Spec.Executor.DeleteOnTermination != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s=%t", config.SparkExecutorDeleteOnTermination, *app.Spec.Executor.DeleteOnTermination)) - } - - // Populate SparkApplication Labels to Executors - executorLabels := make(map[string]string) - for key, value := range app.Labels { - executorLabels[key] = value - } - for key, value := range app.Spec.Executor.Labels { - executorLabels[key] = value - } - for key, value := range executorLabels { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkExecutorLabelKeyPrefix, key, value)) - } - - for key, value := range app.Spec.Executor.Annotations { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s%s=%s", config.SparkExecutorAnnotationKeyPrefix, key, value)) - } - - for key, value := range app.Spec.Executor.EnvSecretKeyRefs { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s%s=%s:%s", config.SparkExecutorSecretKeyRefKeyPrefix, key, value.Name, value.Key)) - } - - if app.Spec.Executor.JavaOptions != nil { - executorConfOptions = append(executorConfOptions, - fmt.Sprintf("%s=%s", config.SparkExecutorJavaOptions, *app.Spec.Executor.JavaOptions)) - } - - executorConfOptions = append(executorConfOptions, config.GetExecutorSecretConfOptions(app)...) - executorConfOptions = append(executorConfOptions, config.GetExecutorEnvVarConfOptions(app)...) - - return executorConfOptions, nil -} - -func addDynamicAllocationConfOptions(app *v1beta2.SparkApplication) []string { - if app.Spec.DynamicAllocation == nil { - return nil - } - - dynamicAllocation := app.Spec.DynamicAllocation - if !dynamicAllocation.Enabled { - return nil - } - - var options []string - options = append(options, fmt.Sprintf("%s=true", config.SparkDynamicAllocationEnabled)) - // Turn on shuffle tracking if dynamic allocation is enabled. - options = append(options, fmt.Sprintf("%s=true", config.SparkDynamicAllocationShuffleTrackingEnabled)) - if dynamicAllocation.InitialExecutors != nil { - options = append(options, fmt.Sprintf("%s=%d", config.SparkDynamicAllocationInitialExecutors, *dynamicAllocation.InitialExecutors)) - } - if dynamicAllocation.MinExecutors != nil { - options = append(options, fmt.Sprintf("%s=%d", config.SparkDynamicAllocationMinExecutors, *dynamicAllocation.MinExecutors)) - } - if dynamicAllocation.MaxExecutors != nil { - options = append(options, fmt.Sprintf("%s=%d", config.SparkDynamicAllocationMaxExecutors, *dynamicAllocation.MaxExecutors)) - } - if dynamicAllocation.ShuffleTrackingTimeout != nil { - options = append(options, fmt.Sprintf("%s=%d", config.SparkDynamicAllocationShuffleTrackingTimeout, *dynamicAllocation.ShuffleTrackingTimeout)) - } - - return options -} - -// addLocalDirConfOptions excludes local dir volumes, update SparkApplication and returns local dir config options -func addLocalDirConfOptions(app *v1beta2.SparkApplication) ([]string, error) { - var localDirConfOptions []string - - sparkLocalVolumes := map[string]v1.Volume{} - var mutateVolumes []v1.Volume - - // Filter local dir volumes - for _, volume := range app.Spec.Volumes { - if strings.HasPrefix(volume.Name, config.SparkLocalDirVolumePrefix) { - sparkLocalVolumes[volume.Name] = volume - } else { - mutateVolumes = append(mutateVolumes, volume) - } - } - app.Spec.Volumes = mutateVolumes - - // Filter local dir volumeMounts and set mutate volume mounts to driver and executor - if app.Spec.Driver.VolumeMounts != nil { - driverMutateVolumeMounts, driverLocalDirConfConfOptions := filterMutateMountVolumes(app.Spec.Driver.VolumeMounts, config.SparkDriverVolumesPrefix, sparkLocalVolumes) - app.Spec.Driver.VolumeMounts = driverMutateVolumeMounts - localDirConfOptions = append(localDirConfOptions, driverLocalDirConfConfOptions...) - } - - if app.Spec.Executor.VolumeMounts != nil { - executorMutateVolumeMounts, executorLocalDirConfConfOptions := filterMutateMountVolumes(app.Spec.Executor.VolumeMounts, config.SparkExecutorVolumesPrefix, sparkLocalVolumes) - app.Spec.Executor.VolumeMounts = executorMutateVolumeMounts - localDirConfOptions = append(localDirConfOptions, executorLocalDirConfConfOptions...) - } - - return localDirConfOptions, nil -} - -func filterMutateMountVolumes(volumeMounts []v1.VolumeMount, prefix string, sparkLocalVolumes map[string]v1.Volume) ([]v1.VolumeMount, []string) { - var mutateMountVolumes []v1.VolumeMount - var localDirConfOptions []string - for _, volumeMount := range volumeMounts { - if volume, ok := sparkLocalVolumes[volumeMount.Name]; ok { - options := buildLocalVolumeOptions(prefix, volume, volumeMount) - for _, option := range options { - localDirConfOptions = append(localDirConfOptions, option) - } - } else { - mutateMountVolumes = append(mutateMountVolumes, volumeMount) - } - } - - return mutateMountVolumes, localDirConfOptions -} - -func buildLocalVolumeOptions(prefix string, volume v1.Volume, volumeMount v1.VolumeMount) []string { - VolumeMountPathTemplate := prefix + "%s.%s.mount.path=%s" - VolumeMountOptionTemplate := prefix + "%s.%s.options.%s=%s" - - var options []string - switch { - case volume.HostPath != nil: - options = append(options, fmt.Sprintf(VolumeMountPathTemplate, "hostPath", volume.Name, volumeMount.MountPath)) - options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "hostPath", volume.Name, "path", volume.HostPath.Path)) - if volume.HostPath.Type != nil { - options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "hostPath", volume.Name, "type", *volume.HostPath.Type)) - } - case volume.EmptyDir != nil: - options = append(options, fmt.Sprintf(VolumeMountPathTemplate, "emptyDir", volume.Name, volumeMount.MountPath)) - if volume.EmptyDir.SizeLimit != nil { - options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "emptyDir", volume.Name, "sizeLimit", volume.EmptyDir.SizeLimit.String())) - } - case volume.PersistentVolumeClaim != nil: - options = append(options, fmt.Sprintf(VolumeMountPathTemplate, "persistentVolumeClaim", volume.Name, volumeMount.MountPath)) - options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "persistentVolumeClaim", volume.Name, "claimName", volume.PersistentVolumeClaim.ClaimName)) - } - - return options -} diff --git a/pkg/controller/sparkapplication/submission_test.go b/pkg/controller/sparkapplication/submission_test.go deleted file mode 100644 index 16c6a17161..0000000000 --- a/pkg/controller/sparkapplication/submission_test.go +++ /dev/null @@ -1,695 +0,0 @@ -/* -Copyright 2017 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package sparkapplication - -import ( - "fmt" - "os" - "reflect" - "sort" - "strconv" - "testing" - - "github.com/google/uuid" - "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" -) - -const ( - VolumeMountPathTemplate = "spark.kubernetes.%s.volumes.%s.%s.mount.path=%s" - VolumeMountOptionPathTemplate = "spark.kubernetes.%s.volumes.%s.%s.options.%s=%s" - SparkDriverLabelAnnotationTemplate = "spark.kubernetes.driver.label.sparkoperator.k8s.io/%s=%s" - SparkDriverLabelTemplate = "spark.kubernetes.driver.label.%s=%s" - SparkDriverServiceLabelTemplate = "spark.kubernetes.driver.service.label.%s=%s" - SparkExecutorLabelAnnotationTemplate = "spark.kubernetes.executor.label.sparkoperator.k8s.io/%s=%s" - SparkExecutorLabelTemplate = "spark.kubernetes.executor.label.%s=%s" -) - -func TestAddLocalDir_HostPath(t *testing.T) { - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/mnt", - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 0, len(app.Spec.Volumes)) - assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) - assert.Equal(t, 2, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) -} - -func TestAddLocalDir_PVC(t *testing.T) { - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ - ClaimName: "/tmp/mnt-1", - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 0, len(app.Spec.Volumes)) - assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) - assert.Equal(t, 2, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "persistentVolumeClaim", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "persistentVolumeClaim", volumes[0].Name, "claimName", volumes[0].PersistentVolumeClaim.ClaimName), localDirOptions[1]) -} - -func TestAddLocalDir_MixedVolumes(t *testing.T) { - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/mnt-1", - }, - }, - }, - { - Name: "log-dir", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/var/log/spark", - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - { - Name: "log-dir", - MountPath: "/var/log/spark", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 1, len(app.Spec.Volumes)) - assert.Equal(t, 1, len(app.Spec.Driver.VolumeMounts)) - assert.Equal(t, 2, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) -} - -func TestAddLocalDir_MultipleScratchVolumes(t *testing.T) { - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/mnt-1", - }, - }, - }, - { - Name: "spark-local-dir-2", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/mnt-2", - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - { - Name: "spark-local-dir-2", - MountPath: "/tmp/mnt-2", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 0, len(app.Spec.Volumes)) - assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) - assert.Equal(t, 4, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[1].Name, volumeMounts[1].MountPath), localDirOptions[2]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[1].Name, "path", volumes[1].HostPath.Path), localDirOptions[3]) -} - -func TestAddLocalDir_Executor(t *testing.T) { - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/mnt", - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 0, len(app.Spec.Volumes)) - assert.Equal(t, 0, len(app.Spec.Executor.VolumeMounts)) - assert.Equal(t, 2, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) -} - -func TestAddLocalDir_Driver_Executor(t *testing.T) { - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/mnt", - }, - }, - }, - { - Name: "test-volume", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/test", - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - { - Name: "test-volume", - MountPath: "/tmp/test", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 1, len(app.Spec.Volumes)) - assert.Equal(t, 1, len(app.Spec.Driver.VolumeMounts)) - assert.Equal(t, 1, len(app.Spec.Executor.VolumeMounts)) - assert.Equal(t, 4, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[1]) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "hostPath", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[2]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[3]) -} - -func TestAddEmptyDir_Driver_Executor_WithSizeLimit(t *testing.T) { - sizeLimit := resource.MustParse("5Gi") - volumes := []corev1.Volume{ - { - Name: "spark-local-dir-1", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{ - SizeLimit: &sizeLimit, - }, - }, - }, - } - - volumeMounts := []corev1.VolumeMount{ - { - Name: "spark-local-dir-1", - MountPath: "/tmp/mnt-1", - }, - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Volumes: volumes, - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - VolumeMounts: volumeMounts, - }, - }, - }, - } - - localDirOptions, err := addLocalDirConfOptions(app) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, 0, len(app.Spec.Volumes)) - assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts)) - assert.Equal(t, 0, len(app.Spec.Executor.VolumeMounts)) - assert.Equal(t, 4, len(localDirOptions)) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "emptyDir", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "emptyDir", volumes[0].Name, "sizeLimit", volumes[0].EmptyDir.SizeLimit.String()), localDirOptions[1]) - assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "emptyDir", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[2]) - assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "emptyDir", volumes[0].Name, "sizeLimit", volumes[0].EmptyDir.SizeLimit.String()), localDirOptions[3]) -} - -func TestPopulateLabels_Driver_Executor(t *testing.T) { - const ( - AppLabelKey = "app-label-key" - AppLabelValue = "app-label-value" - DriverLabelKey = "driver-label-key" - DriverLabelValue = "driver-label-key" - DriverServiceLabelKey = "driver-svc-label-key" - DriverServiceLabelValue = "driver-svc-label-value" - ExecutorLabelKey = "executor-label-key" - ExecutorLabelValue = "executor-label-key" - ) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - Labels: map[string]string{AppLabelKey: AppLabelValue}, - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - ServiceLabels: map[string]string{DriverServiceLabelKey: DriverServiceLabelValue}, - SparkPodSpec: v1beta2.SparkPodSpec{ - Labels: map[string]string{DriverLabelKey: DriverLabelValue}, - }, - }, - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - Labels: map[string]string{ExecutorLabelKey: ExecutorLabelValue}, - }, - }, - }, - } - - submissionID := uuid.New().String() - driverOptions, err := addDriverConfOptions(app, submissionID) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, 6, len(driverOptions)) - sort.Strings(driverOptions) - expectedDriverLabels := []string{ - fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), - fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "app-name", "spark-test"), - fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "submission-id", submissionID), - fmt.Sprintf(SparkDriverLabelTemplate, AppLabelKey, AppLabelValue), - fmt.Sprintf(SparkDriverLabelTemplate, DriverLabelKey, DriverLabelValue), - fmt.Sprintf(SparkDriverServiceLabelTemplate, DriverServiceLabelKey, DriverServiceLabelValue), - } - sort.Strings(expectedDriverLabels) - - if !reflect.DeepEqual(expectedDriverLabels, driverOptions) { - t.Errorf("Executor labels: wanted %+q got %+q", expectedDriverLabels, driverOptions) - } - - executorOptions, err := addExecutorConfOptions(app, submissionID) - sort.Strings(executorOptions) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, 5, len(executorOptions)) - expectedExecutorLabels := []string{ - fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "app-name", "spark-test"), - fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), - fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "submission-id", submissionID), - fmt.Sprintf(SparkExecutorLabelTemplate, AppLabelKey, AppLabelValue), - fmt.Sprintf(SparkExecutorLabelTemplate, ExecutorLabelKey, ExecutorLabelValue), - } - sort.Strings(expectedExecutorLabels) - - if !reflect.DeepEqual(expectedExecutorLabels, executorOptions) { - t.Errorf("Executor labels: wanted %+q got %+q", expectedExecutorLabels, executorOptions) - } -} - -func TestPopulateLabelsOverride_Driver_Executor(t *testing.T) { - const ( - AppLabelKey = "app-label-key" - AppLabelValue = "app-label-value" - DriverLabelKey = "driver-label-key" - DriverLabelValue = "driver-label-key" - DriverAppLabelOverride = "driver-app-label-override" - ExecutorLabelKey = "executor-label-key" - ExecutorLabelValue = "executor-label-key" - ExecutorAppLabelOverride = "executor-app-label-override" - ) - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - Labels: map[string]string{AppLabelKey: AppLabelValue}, - }, - Spec: v1beta2.SparkApplicationSpec{ - Driver: v1beta2.DriverSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - Labels: map[string]string{DriverLabelKey: DriverLabelValue, AppLabelKey: DriverAppLabelOverride}, - }, - }, - Executor: v1beta2.ExecutorSpec{ - SparkPodSpec: v1beta2.SparkPodSpec{ - Labels: map[string]string{ExecutorLabelKey: ExecutorLabelValue, AppLabelKey: ExecutorAppLabelOverride}, - }, - }, - }, - } - - submissionID := uuid.New().String() - driverOptions, err := addDriverConfOptions(app, submissionID) - if err != nil { - t.Fatal(err) - } - sort.Strings(driverOptions) - assert.Equal(t, 5, len(driverOptions)) - expectedDriverLabels := []string{ - fmt.Sprintf(SparkDriverLabelTemplate, AppLabelKey, DriverAppLabelOverride), - fmt.Sprintf(SparkDriverLabelTemplate, DriverLabelKey, DriverLabelValue), - fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "app-name", "spark-test"), - fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), - fmt.Sprintf(SparkDriverLabelAnnotationTemplate, "submission-id", submissionID), - } - sort.Strings(expectedDriverLabels) - - if !reflect.DeepEqual(expectedDriverLabels, driverOptions) { - t.Errorf("Executor labels: wanted %+q got %+q", expectedDriverLabels, driverOptions) - } - - executorOptions, err := addExecutorConfOptions(app, submissionID) - if err != nil { - t.Fatal(err) - } - sort.Strings(executorOptions) - assert.Equal(t, 5, len(executorOptions)) - expectedExecutorLabels := []string{ - fmt.Sprintf(SparkExecutorLabelTemplate, AppLabelKey, ExecutorAppLabelOverride), - fmt.Sprintf(SparkExecutorLabelTemplate, ExecutorLabelKey, ExecutorLabelValue), - fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "launched-by-spark-operator", strconv.FormatBool(true)), - fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "app-name", "spark-test"), - fmt.Sprintf(SparkExecutorLabelAnnotationTemplate, "submission-id", submissionID), - } - sort.Strings(expectedExecutorLabels) - - if !reflect.DeepEqual(expectedExecutorLabels, executorOptions) { - t.Errorf("Executor labels: wanted %+q got %+q", expectedExecutorLabels, executorOptions) - } -} - -func TestDynamicAllocationOptions(t *testing.T) { - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{}, - } - options := addDynamicAllocationConfOptions(app) - assert.Equal(t, 0, len(options)) - - app = &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - DynamicAllocation: &v1beta2.DynamicAllocation{ - Enabled: true, - InitialExecutors: int32ptr(2), - MinExecutors: int32ptr(0), - MaxExecutors: int32ptr(10), - ShuffleTrackingTimeout: int64ptr(6000000), - }, - }, - } - - options = addDynamicAllocationConfOptions(app) - assert.Equal(t, 6, len(options)) - assert.Equal(t, fmt.Sprintf("%s=true", config.SparkDynamicAllocationEnabled), options[0]) - assert.Equal(t, fmt.Sprintf("%s=true", config.SparkDynamicAllocationShuffleTrackingEnabled), options[1]) - assert.Equal(t, fmt.Sprintf("%s=2", config.SparkDynamicAllocationInitialExecutors), options[2]) - assert.Equal(t, fmt.Sprintf("%s=0", config.SparkDynamicAllocationMinExecutors), options[3]) - assert.Equal(t, fmt.Sprintf("%s=10", config.SparkDynamicAllocationMaxExecutors), options[4]) - assert.Equal(t, fmt.Sprintf("%s=6000000", config.SparkDynamicAllocationShuffleTrackingTimeout), options[5]) -} - -func TestProxyUserArg(t *testing.T) { - const ( - host = "localhost" - port = "6443" - ) - - if err := os.Setenv(kubernetesServiceHostEnvVar, host); err != nil { - t.Fatal(err) - } - if err := os.Setenv(kubernetesServicePortEnvVar, port); err != nil { - t.Fatal(err) - } - - app := &v1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-test", - UID: "spark-test-1", - }, - Spec: v1beta2.SparkApplicationSpec{ - Mode: v1beta2.ClusterMode, - ProxyUser: stringptr("foo"), - }, - } - - submissionID := uuid.New().String() - driverPodName := getDriverPodName(app) - args, err := buildSubmissionCommandArgs(app, driverPodName, submissionID) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "--master", args[0]) - assert.Equal(t, fmt.Sprintf("k8s://https://%s:%s", host, port), args[1]) - assert.Equal(t, "--deploy-mode", args[2]) - assert.Equal(t, string(v1beta2.ClusterMode), args[3]) - assert.Equal(t, "--proxy-user", args[4]) - assert.Equal(t, "foo", args[5]) -} - -func Test_getMasterURL(t *testing.T) { - setEnv := func(host string, port string) { - if err := os.Setenv(kubernetesServiceHostEnvVar, host); err != nil { - t.Fatal(err) - } - if err := os.Setenv(kubernetesServicePortEnvVar, port); err != nil { - t.Fatal(err) - } - } - - tests := []struct { - name string - host string - port string - want string - wantErr assert.ErrorAssertionFunc - }{ - { - name: "should return a valid master url when IPv4 address is used", - host: "localhost", - port: "6443", - want: "k8s://https://localhost:6443", - wantErr: assert.NoError, - }, - { - name: "should return a valid master url when IPv6 address is used", - host: "::1", - port: "6443", - want: "k8s://https://[::1]:6443", - wantErr: assert.NoError, - }, - { - name: "should throw an error when the host is empty", - host: "", - port: "6443", - want: "", - wantErr: assert.Error, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - setEnv(tt.host, tt.port) - got, err := getMasterURL() - if !tt.wantErr(t, err, fmt.Sprintf("getMasterURL()")) { - return - } - assert.Equalf(t, tt.want, got, "getMasterURL()") - }) - } -} diff --git a/pkg/util/capabilities.go b/pkg/util/capabilities.go index 5040da6e96..068bdcb4df 100644 --- a/pkg/util/capabilities.go +++ b/pkg/util/capabilities.go @@ -38,6 +38,19 @@ func (c Capabilities) String() string { return strings.Join(keys, ", ") } +var ( + IngressCapabilities Capabilities +) + +func InitializeIngressCapabilities(client kubernetes.Interface) (err error) { + if IngressCapabilities != nil { + return + } + + IngressCapabilities, err = getPreferredAvailableAPIs(client, "Ingress") + return +} + // getPreferredAvailableAPIs queries the cluster for the preferred resources information and returns a Capabilities // instance containing those api groups that support the specified kind. // @@ -70,15 +83,3 @@ func getPreferredAvailableAPIs(client kubernetes.Interface, kind string) (Capabi return caps, nil } - -var ( - IngressCapabilities Capabilities -) - -func InitializeIngressCapabilities(client kubernetes.Interface) (err error) { - if IngressCapabilities != nil { - return - } - IngressCapabilities, err = getPreferredAvailableAPIs(client, "Ingress") - return -} diff --git a/pkg/util/cert_test.go b/pkg/util/cert_test.go deleted file mode 100644 index 700bc234d4..0000000000 --- a/pkg/util/cert_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package util - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "testing" - "time" - - "k8s.io/client-go/util/cert" -) - -func TestNewPrivateKey(t *testing.T) { - _, err := NewPrivateKey() - if err != nil { - t.Errorf("failed to generate private key: %v", err) - } -} - -func TestNewSignedServerCert(t *testing.T) { - cfg := cert.Config{ - CommonName: "test-server", - Organization: []string{"test-org"}, - NotBefore: time.Now(), - } - - caKey, _ := rsa.GenerateKey(rand.Reader, RSAKeySize) - caCert := &x509.Certificate{} - serverKey, _ := rsa.GenerateKey(rand.Reader, RSAKeySize) - - serverCert, err := NewSignedServerCert(cfg, caKey, caCert, serverKey) - if err != nil { - t.Errorf("failed to generate signed server certificate: %v", err) - } - - if serverCert == nil { - t.Error("server certificate is nil") - } -} diff --git a/pkg/util/histogram_buckets.go b/pkg/util/histogram_buckets.go deleted file mode 100644 index 0cdf25da21..0000000000 --- a/pkg/util/histogram_buckets.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "fmt" - "strconv" - "strings" -) - -var DefaultJobStartLatencyBuckets = []float64{30, 60, 90, 120, 150, 180, 210, 240, 270, 300} - -type HistogramBuckets []float64 - -func (hb *HistogramBuckets) String() string { - return fmt.Sprint(*hb) -} - -func (hb *HistogramBuckets) Set(value string) error { - *hb = nil - for _, boundaryStr := range strings.Split(value, ",") { - boundary, err := strconv.ParseFloat(strings.TrimSpace(boundaryStr), 64) - if err != nil { - return err - } - *hb = append(*hb, boundary) - } - return nil -} diff --git a/pkg/util/metrics.go b/pkg/util/metrics.go index 81cb14573b..fe87508d9d 100644 --- a/pkg/util/metrics.go +++ b/pkg/util/metrics.go @@ -17,201 +17,10 @@ limitations under the License. package util import ( - "fmt" - "net/http" "strings" - "sync" - - "github.com/golang/glog" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - prometheusmodel "github.com/prometheus/client_model/go" - - "k8s.io/client-go/util/workqueue" ) func CreateValidMetricNameLabel(prefix, name string) string { // "-" is not a valid character for prometheus metric names or labels. return strings.Replace(prefix+name, "-", "_", -1) } - -// Best effort metric registration with Prometheus. -func RegisterMetric(metric prometheus.Collector) { - if err := prometheus.Register(metric); err != nil { - // Ignore AlreadyRegisteredError. - if _, ok := err.(prometheus.AlreadyRegisteredError); ok { - return - } - glog.Errorf("failed to register metric: %v", err) - } -} - -// MetricConfig is a container of configuration properties for the collection and exporting of -// application metrics to Prometheus. -type MetricConfig struct { - MetricsEndpoint string - MetricsPort string - MetricsPrefix string - MetricsLabels []string - MetricsJobStartLatencyBuckets []float64 -} - -// A variant of Prometheus Gauge that only holds non-negative values. -type PositiveGauge struct { - mux sync.RWMutex - name string - gaugeMetric *prometheus.GaugeVec -} - -func NewPositiveGauge(name string, description string, labels []string) *PositiveGauge { - validLabels := make([]string, len(labels)) - for i, label := range labels { - validLabels[i] = CreateValidMetricNameLabel("", label) - } - - gauge := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: name, - Help: description, - }, - validLabels, - ) - - return &PositiveGauge{ - gaugeMetric: gauge, - name: name, - } -} - -func fetchGaugeValue(m *prometheus.GaugeVec, labels map[string]string) float64 { - // Hack to get the current value of the metric to support PositiveGauge - pb := &prometheusmodel.Metric{} - - m.With(labels).Write(pb) - return pb.GetGauge().GetValue() -} - -func (p *PositiveGauge) Register() { - RegisterMetric(p.gaugeMetric) -} - -func (p *PositiveGauge) Value(labelMap map[string]string) float64 { - p.mux.RLock() - defer p.mux.RUnlock() - return fetchGaugeValue(p.gaugeMetric, labelMap) -} - -// Increment the Metric for the labels specified -func (p *PositiveGauge) Inc(labelMap map[string]string) { - p.mux.Lock() - defer p.mux.Unlock() - - if m, err := p.gaugeMetric.GetMetricWith(labelMap); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - glog.V(2).Infof("Incrementing %s with labels %s", p.name, labelMap) - m.Inc() - } -} - -// Decrement the metric only if its positive for the labels specified -func (p *PositiveGauge) Dec(labelMap map[string]string) { - p.mux.Lock() - defer p.mux.Unlock() - - // Decrement only if positive - val := fetchGaugeValue(p.gaugeMetric, labelMap) - if val > 0 { - glog.V(2).Infof("Decrementing %s with labels %s metricVal to %v", p.name, labelMap, val-1) - if m, err := p.gaugeMetric.GetMetricWith(labelMap); err != nil { - glog.Errorf("Error while exporting metrics: %v", err) - } else { - m.Dec() - } - } -} - -type WorkQueueMetrics struct { - prefix string -} - -func InitializeMetrics(metricsConfig *MetricConfig) { - // Start the metrics endpoint for Prometheus to scrape - http.Handle(metricsConfig.MetricsEndpoint, promhttp.Handler()) - go http.ListenAndServe(fmt.Sprintf(":%s", metricsConfig.MetricsPort), nil) - glog.Infof("Started Metrics server at localhost:%s%s", metricsConfig.MetricsPort, metricsConfig.MetricsEndpoint) - - workQueueMetrics := WorkQueueMetrics{prefix: metricsConfig.MetricsPrefix} - workqueue.SetProvider(&workQueueMetrics) -} - -// Depth Metric for the kubernetes workqueue. -func (p *WorkQueueMetrics) NewDepthMetric(name string) workqueue.GaugeMetric { - depthMetric := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_depth"), - Help: fmt.Sprintf("Current depth of workqueue: %s", name), - }, - ) - RegisterMetric(depthMetric) - return depthMetric -} - -// Adds Count Metrics for the kubernetes workqueue. -func (p *WorkQueueMetrics) NewAddsMetric(name string) workqueue.CounterMetric { - addsMetric := prometheus.NewCounter(prometheus.CounterOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_adds"), - Help: fmt.Sprintf("Total number of adds handled by workqueue: %s", name), - }) - RegisterMetric(addsMetric) - return addsMetric -} - -// Latency Metric for the kubernetes workqueue. -func (p *WorkQueueMetrics) NewLatencyMetric(name string) workqueue.HistogramMetric { - latencyMetric := prometheus.NewSummary(prometheus.SummaryOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_latency"), - Help: fmt.Sprintf("Latency for workqueue: %s", name), - }) - RegisterMetric(latencyMetric) - return latencyMetric -} - -// WorkDuration Metric for the kubernetes workqueue. -func (p *WorkQueueMetrics) NewWorkDurationMetric(name string) workqueue.HistogramMetric { - workDurationMetric := prometheus.NewSummary(prometheus.SummaryOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_work_duration"), - Help: fmt.Sprintf("How long processing an item from workqueue %s takes.", name), - }) - RegisterMetric(workDurationMetric) - return workDurationMetric -} - -// Retry Metric for the kubernetes workqueue. -func (p *WorkQueueMetrics) NewRetriesMetric(name string) workqueue.CounterMetric { - retriesMetrics := prometheus.NewCounter(prometheus.CounterOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_retries"), - Help: fmt.Sprintf("Total number of retries handled by workqueue: %s", name), - }) - RegisterMetric(retriesMetrics) - return retriesMetrics -} - -func (p *WorkQueueMetrics) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { - unfinishedWorkSecondsMetric := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_unfinished_work_seconds"), - Help: fmt.Sprintf("Unfinished work seconds: %s", name), - }, - ) - RegisterMetric(unfinishedWorkSecondsMetric) - return unfinishedWorkSecondsMetric -} - -func (p *WorkQueueMetrics) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { - longestRunningProcessorMicrosecondsMetric := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: CreateValidMetricNameLabel(p.prefix, name+"_longest_running_processor_microseconds"), - Help: fmt.Sprintf("Longest running processor microseconds: %s", name), - }, - ) - RegisterMetric(longestRunningProcessorMicrosecondsMetric) - return longestRunningProcessorMicrosecondsMetric -} diff --git a/pkg/util/metrics_test.go b/pkg/util/metrics_test.go deleted file mode 100644 index 4771a08588..0000000000 --- a/pkg/util/metrics_test.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestPositiveGauge_EmptyLabels(t *testing.T) { - gauge := NewPositiveGauge("testGauge", "test-description", []string{}) - emptyMap := map[string]string{} - gauge.Dec(emptyMap) - assert.Equal(t, fetchGaugeValue(gauge.gaugeMetric, emptyMap), float64(0)) - - gauge.Inc(emptyMap) - assert.Equal(t, fetchGaugeValue(gauge.gaugeMetric, emptyMap), float64(1)) - gauge.Dec(map[string]string{}) - assert.Equal(t, fetchGaugeValue(gauge.gaugeMetric, emptyMap), float64(0)) -} - -func TestPositiveGauge_WithLabels(t *testing.T) { - gauge := NewPositiveGauge("testGauge1", "test-description-1", []string{"app_id"}) - app1 := map[string]string{"app_id": "test1"} - app2 := map[string]string{"app_id": "test2"} - - var wg sync.WaitGroup - wg.Add(2) - go func() { - for i := 0; i < 10; i++ { - gauge.Inc(app1) - } - for i := 0; i < 5; i++ { - gauge.Dec(app1) - } - wg.Done() - }() - go func() { - for i := 0; i < 5; i++ { - gauge.Inc(app2) - } - for i := 0; i < 10; i++ { - gauge.Dec(app2) - } - wg.Done() - }() - - wg.Wait() - assert.Equal(t, float64(5), fetchGaugeValue(gauge.gaugeMetric, app1)) - // Always Positive Gauge. - assert.Equal(t, float64(0), fetchGaugeValue(gauge.gaugeMetric, app2)) -} diff --git a/pkg/webhook/scheme.go b/pkg/util/resourcequota.go similarity index 57% rename from pkg/webhook/scheme.go rename to pkg/util/resourcequota.go index e9a02c5f04..1b79eea234 100644 --- a/pkg/webhook/scheme.go +++ b/pkg/util/resourcequota.go @@ -1,5 +1,5 @@ /* -Copyright 2018 Google LLC +Copyright 2024 The Kubeflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,25 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -package webhook +package util import ( - admissionv1 "k8s.io/api/admission/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" ) -var ( - scheme = runtime.NewScheme() - codecs = serializer.NewCodecFactory(scheme) -) - -func init() { - addToScheme(scheme) -} - -func addToScheme(scheme *runtime.Scheme) { - corev1.AddToScheme(scheme) - admissionv1.AddToScheme(scheme) +// SumResourceList sums the resource list. +func SumResourceList(lists []corev1.ResourceList) corev1.ResourceList { + total := corev1.ResourceList{} + for _, list := range lists { + for name, quantity := range list { + if value, ok := total[name]; !ok { + total[name] = quantity.DeepCopy() + } else { + value.Add(quantity) + total[name] = value + } + } + } + return total } diff --git a/pkg/util/sparkapplication.go b/pkg/util/sparkapplication.go new file mode 100644 index 0000000000..273ad7401d --- /dev/null +++ b/pkg/util/sparkapplication.go @@ -0,0 +1,430 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "reflect" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" +) + +// GetDriverPodName returns name of the driver pod of the given spark application. +func GetDriverPodName(app *v1beta2.SparkApplication) string { + name := app.Spec.Driver.PodName + if name != nil && len(*name) > 0 { + return *name + } + + sparkConf := app.Spec.SparkConf + if sparkConf[common.SparkKubernetesDriverPodName] != "" { + return sparkConf[common.SparkKubernetesDriverPodName] + } + + return fmt.Sprintf("%s-driver", app.Name) +} + +// GetApplicationState returns the state of the given SparkApplication. +func GetApplicationState(app *v1beta2.SparkApplication) v1beta2.ApplicationStateType { + return app.Status.AppState.State +} + +// IsExpired returns whether the given SparkApplication is expired. +func IsExpired(app *v1beta2.SparkApplication) bool { + // The application has no TTL defined and will never expire. + if app.Spec.TimeToLiveSeconds == nil { + return false + } + + ttl := time.Duration(*app.Spec.TimeToLiveSeconds) * time.Second + now := time.Now() + if !app.Status.TerminationTime.IsZero() && now.Sub(app.Status.TerminationTime.Time) > ttl { + return true + } + + return false +} + +// IsDriverRunning returns whether the driver pod of the given SparkApplication is running. +func IsDriverRunning(app *v1beta2.SparkApplication) bool { + return app.Status.AppState.State == v1beta2.ApplicationStateRunning +} + +func ShouldRetry(app *v1beta2.SparkApplication) bool { + switch app.Status.AppState.State { + case v1beta2.ApplicationStateSucceeding: + return app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyAlways + case v1beta2.ApplicationStateFailing: + if app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyAlways { + return true + } else if app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyOnFailure { + // We retry if we haven't hit the retry limit. + if app.Spec.RestartPolicy.OnFailureRetries != nil && app.Status.ExecutionAttempts <= *app.Spec.RestartPolicy.OnFailureRetries { + return true + } + } + case v1beta2.ApplicationStateFailedSubmission: + if app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyAlways { + return true + } else if app.Spec.RestartPolicy.Type == v1beta2.RestartPolicyOnFailure { + // We retry if we haven't hit the retry limit. + if app.Spec.RestartPolicy.OnSubmissionFailureRetries != nil && app.Status.SubmissionAttempts <= *app.Spec.RestartPolicy.OnSubmissionFailureRetries { + return true + } + } + } + return false +} + +func GetLocalVolumes(app *v1beta2.SparkApplication) map[string]corev1.Volume { + volumes := make(map[string]corev1.Volume) + for _, volume := range app.Spec.Volumes { + if strings.HasPrefix(volume.Name, common.SparkLocalDirVolumePrefix) { + volumes[volume.Name] = volume + } + } + return volumes +} + +func GetDriverLocalVolumeMounts(app *v1beta2.SparkApplication) []corev1.VolumeMount { + volumeMounts := []corev1.VolumeMount{} + for _, volumeMount := range app.Spec.Driver.VolumeMounts { + if strings.HasPrefix(volumeMount.Name, common.SparkLocalDirVolumePrefix) { + volumeMounts = append(volumeMounts, volumeMount) + } + } + return volumeMounts +} + +func GetExecutorLocalVolumeMounts(app *v1beta2.SparkApplication) []corev1.VolumeMount { + volumeMounts := []corev1.VolumeMount{} + for _, volumeMount := range app.Spec.Executor.VolumeMounts { + if strings.HasPrefix(volumeMount.Name, common.SparkLocalDirVolumePrefix) { + volumeMounts = append(volumeMounts, volumeMount) + } + } + return volumeMounts +} + +func GetDefaultUIServiceName(app *v1beta2.SparkApplication) string { + return fmt.Sprintf("%s-ui-svc", app.Name) +} + +func GetDefaultUIIngressName(app *v1beta2.SparkApplication) string { + return fmt.Sprintf("%s-ui-ingress", app.Name) +} + +func GetResourceLabels(app *v1beta2.SparkApplication) map[string]string { + labels := map[string]string{ + common.LabelSparkAppName: app.Name, + } + if app.Status.SubmissionID != "" { + labels[common.LabelSubmissionID] = app.Status.SubmissionID + } + return labels +} + +func GetWebUIServiceLabels(app *v1beta2.SparkApplication) map[string]string { + labels := map[string]string{} + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceLabels != nil { + for key, value := range app.Spec.SparkUIOptions.ServiceLabels { + labels[key] = value + } + } + return labels +} + +func GetWebUIServiceAnnotations(app *v1beta2.SparkApplication) map[string]string { + serviceAnnotations := map[string]string{} + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceAnnotations != nil { + for key, value := range app.Spec.SparkUIOptions.ServiceAnnotations { + serviceAnnotations[key] = value + } + } + return serviceAnnotations +} + +func GetWebUIServiceType(app *v1beta2.SparkApplication) corev1.ServiceType { + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.ServiceType != nil { + return *app.Spec.SparkUIOptions.ServiceType + } + return corev1.ServiceTypeClusterIP +} + +func GetWebUIIngressAnnotations(app *v1beta2.SparkApplication) map[string]string { + annotations := map[string]string{} + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.IngressAnnotations != nil { + for key, value := range app.Spec.SparkUIOptions.IngressAnnotations { + annotations[key] = value + } + } + return annotations +} + +func GetWebUIIngressTLS(app *v1beta2.SparkApplication) []networkingv1.IngressTLS { + ingressTLSs := []networkingv1.IngressTLS{} + if app.Spec.SparkUIOptions != nil && app.Spec.SparkUIOptions.IngressTLS != nil { + ingressTLSs = append(ingressTLSs, app.Spec.SparkUIOptions.IngressTLS...) + } + return ingressTLSs +} + +// GetPrometheusConfigMapName returns the name of the ConfigMap for Prometheus configuration. +func GetPrometheusConfigMapName(app *v1beta2.SparkApplication) string { + return fmt.Sprintf("%s-%s", app.Name, common.PrometheusConfigMapNameSuffix) +} + +// PrometheusMonitoringEnabled returns if Prometheus monitoring is enabled or not. +func PrometheusMonitoringEnabled(app *v1beta2.SparkApplication) bool { + return app.Spec.Monitoring != nil && app.Spec.Monitoring.Prometheus != nil +} + +// HasPrometheusConfigFile returns if Prometheus monitoring uses a configuration file in the container. +func HasPrometheusConfigFile(app *v1beta2.SparkApplication) bool { + return PrometheusMonitoringEnabled(app) && + app.Spec.Monitoring.Prometheus.ConfigFile != nil && + *app.Spec.Monitoring.Prometheus.ConfigFile != "" +} + +// HasPrometheusConfig returns if Prometheus monitoring defines metricsProperties in the spec. +func HasMetricsProperties(app *v1beta2.SparkApplication) bool { + return PrometheusMonitoringEnabled(app) && + app.Spec.Monitoring.MetricsProperties != nil && + *app.Spec.Monitoring.MetricsProperties != "" +} + +// HasPrometheusConfigFile returns if Monitoring defines metricsPropertiesFile in the spec. +func HasMetricsPropertiesFile(app *v1beta2.SparkApplication) bool { + return PrometheusMonitoringEnabled(app) && + app.Spec.Monitoring.MetricsPropertiesFile != nil && + *app.Spec.Monitoring.MetricsPropertiesFile != "" +} + +// ExposeDriverMetrics returns if driver metrics should be exposed. +func ExposeDriverMetrics(app *v1beta2.SparkApplication) bool { + return app.Spec.Monitoring != nil && app.Spec.Monitoring.ExposeDriverMetrics +} + +// ExposeExecutorMetrics returns if executor metrics should be exposed. +func ExposeExecutorMetrics(app *v1beta2.SparkApplication) bool { + return app.Spec.Monitoring != nil && app.Spec.Monitoring.ExposeExecutorMetrics +} + +// GetOwnerReference returns an OwnerReference pointing to the given app. +func GetOwnerReference(app *v1beta2.SparkApplication) metav1.OwnerReference { + return metav1.OwnerReference{ + APIVersion: v1beta2.SchemeGroupVersion.String(), + Kind: reflect.TypeOf(v1beta2.SparkApplication{}).Name(), + Name: app.Name, + UID: app.UID, + Controller: BoolPtr(true), + BlockOwnerDeletion: BoolPtr(true), + } +} + +// GetDriverState returns the driver state from the given driver pod. +func GetDriverState(pod *corev1.Pod) v1beta2.DriverState { + switch pod.Status.Phase { + case corev1.PodPending: + return v1beta2.DriverStatePending + case corev1.PodRunning: + state := GetDriverContainerTerminatedState(pod) + if state != nil { + if state.ExitCode == 0 { + return v1beta2.DriverStateCompleted + } + return v1beta2.DriverStateFailed + } + return v1beta2.DriverStateRunning + case corev1.PodSucceeded: + return v1beta2.DriverStateCompleted + case corev1.PodFailed: + state := GetDriverContainerTerminatedState(pod) + if state != nil && state.ExitCode == 0 { + return v1beta2.DriverStateCompleted + } + return v1beta2.DriverStateFailed + default: + return v1beta2.DriverStateUnknown + } +} + +// GetExecutorState returns the executor state from the given executor pod. +func GetExecutorState(pod *corev1.Pod) v1beta2.ExecutorState { + switch pod.Status.Phase { + case corev1.PodPending: + return v1beta2.ExecutorStatePending + case corev1.PodRunning: + return v1beta2.ExecutorStateRunning + case corev1.PodSucceeded: + return v1beta2.ExecutorStateCompleted + case corev1.PodFailed: + return v1beta2.ExecutorStateFailed + default: + return v1beta2.ExecutorStateUnknown + } +} + +// GetDriverContainerTerminatedState returns the terminated state of the driver container. +func GetDriverContainerTerminatedState(pod *corev1.Pod) *corev1.ContainerStateTerminated { + return GetContainerTerminatedState(pod, common.SparkDriverContainerName) +} + +// GetExecutorContainerTerminatedState returns the terminated state of the executor container. +func GetExecutorContainerTerminatedState(pod *corev1.Pod) *corev1.ContainerStateTerminated { + state := GetContainerTerminatedState(pod, common.Spark3DefaultExecutorContainerName) + if state == nil { + state = GetContainerTerminatedState(pod, common.SparkExecutorContainerName) + } + return state +} + +// GetContainerTerminatedState returns the terminated state of the container. +func GetContainerTerminatedState(pod *corev1.Pod, container string) *corev1.ContainerStateTerminated { + for _, c := range pod.Status.ContainerStatuses { + if c.Name == container { + if c.State.Terminated != nil { + return c.State.Terminated + } + return nil + } + } + return nil +} + +// IsDriverTerminated returns whether the driver state is a terminated state. +func IsDriverTerminated(driverState v1beta2.DriverState) bool { + return driverState == v1beta2.DriverStateCompleted || driverState == v1beta2.DriverStateFailed +} + +// IsExecutorTerminated returns whether the executor state is a terminated state. +func IsExecutorTerminated(executorState v1beta2.ExecutorState) bool { + return executorState == v1beta2.ExecutorStateCompleted || executorState == v1beta2.ExecutorStateFailed +} + +// DriverStateToApplicationState converts driver state to application state. +func DriverStateToApplicationState(driverState v1beta2.DriverState) v1beta2.ApplicationStateType { + switch driverState { + case v1beta2.DriverStatePending: + return v1beta2.ApplicationStateSubmitted + case v1beta2.DriverStateRunning: + return v1beta2.ApplicationStateRunning + case v1beta2.DriverStateCompleted: + return v1beta2.ApplicationStateSucceeding + case v1beta2.DriverStateFailed: + return v1beta2.ApplicationStateFailing + default: + return v1beta2.ApplicationStateUnknown + } +} + +// GetDriverRequestResource returns the driver request resource list. +func GetDriverRequestResource(app *v1beta2.SparkApplication) corev1.ResourceList { + minResource := corev1.ResourceList{} + + // Cores correspond to driver's core request + if app.Spec.Driver.Cores != nil { + if value, err := resource.ParseQuantity(fmt.Sprintf("%d", *app.Spec.Driver.Cores)); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + + // CoreLimit correspond to driver's core limit, this attribute will be used only when core request is empty. + if app.Spec.Driver.CoreLimit != nil { + if _, ok := minResource[corev1.ResourceCPU]; !ok { + if value, err := resource.ParseQuantity(*app.Spec.Driver.CoreLimit); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + } + + // Memory + MemoryOverhead correspond to driver's memory request + if app.Spec.Driver.Memory != nil { + if value, err := resource.ParseQuantity(*app.Spec.Driver.Memory); err == nil { + minResource[corev1.ResourceMemory] = value + } + } + if app.Spec.Driver.MemoryOverhead != nil { + if value, err := resource.ParseQuantity(*app.Spec.Driver.MemoryOverhead); err == nil { + if existing, ok := minResource[corev1.ResourceMemory]; ok { + existing.Add(value) + minResource[corev1.ResourceMemory] = existing + } + } + } + + return minResource +} + +// GetExecutorRequestResource returns the executor request resource list. +func GetExecutorRequestResource(app *v1beta2.SparkApplication) corev1.ResourceList { + minResource := corev1.ResourceList{} + + // CoreRequest correspond to executor's core request + if app.Spec.Executor.CoreRequest != nil { + if value, err := resource.ParseQuantity(*app.Spec.Executor.CoreRequest); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + + // Use Core attribute if CoreRequest is empty + if app.Spec.Executor.Cores != nil { + if _, ok := minResource[corev1.ResourceCPU]; !ok { + if value, err := resource.ParseQuantity(fmt.Sprintf("%d", *app.Spec.Executor.Cores)); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + } + + // CoreLimit correspond to executor's core limit, this attribute will be used only when core request is empty. + if app.Spec.Executor.CoreLimit != nil { + if _, ok := minResource[corev1.ResourceCPU]; !ok { + if value, err := resource.ParseQuantity(*app.Spec.Executor.CoreLimit); err == nil { + minResource[corev1.ResourceCPU] = value + } + } + } + + // Memory + MemoryOverhead correspond to executor's memory request + if app.Spec.Executor.Memory != nil { + if value, err := resource.ParseQuantity(*app.Spec.Executor.Memory); err == nil { + minResource[corev1.ResourceMemory] = value + } + } + if app.Spec.Executor.MemoryOverhead != nil { + if value, err := resource.ParseQuantity(*app.Spec.Executor.MemoryOverhead); err == nil { + if existing, ok := minResource[corev1.ResourceMemory]; ok { + existing.Add(value) + minResource[corev1.ResourceMemory] = existing + } + } + } + + resourceList := []corev1.ResourceList{{}} + for i := int32(0); i < *app.Spec.Executor.Instances; i++ { + resourceList = append(resourceList, minResource) + } + return SumResourceList(resourceList) +} diff --git a/pkg/util/sparkapplication_test.go b/pkg/util/sparkapplication_test.go new file mode 100644 index 0000000000..7f0ab4a464 --- /dev/null +++ b/pkg/util/sparkapplication_test.go @@ -0,0 +1,330 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util_test + +import ( + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var _ = Describe("GetDriverPodName", func() { + Context("SparkApplication without driver pod name field and driver pod name conf", func() { + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return the default driver pod name", func() { + Expect(util.GetDriverPodName(app)).To(Equal("test-app-driver")) + }) + }) + + Context("SparkApplication with only driver pod name field", func() { + driverPodName := "test-app-driver-pod" + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: v1beta2.SparkApplicationSpec{ + Driver: v1beta2.DriverSpec{ + PodName: &driverPodName, + }, + }, + } + + It("Should return the driver pod name from driver spec", func() { + Expect(util.GetDriverPodName(app)).To(Equal(driverPodName)) + }) + }) + + Context("SparkApplication with only driver pod name conf", func() { + driverPodName := "test-app-driver-pod" + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: v1beta2.SparkApplicationSpec{ + SparkConf: map[string]string{ + common.SparkKubernetesDriverPodName: driverPodName, + }, + }, + } + + It("Should return the driver name from spark conf", func() { + Expect(util.GetDriverPodName(app)).To(Equal(driverPodName)) + }) + }) + + Context("SparkApplication with both driver pod name field and driver pod name conf", func() { + driverPodName1 := "test-app-driver-1" + driverPodName2 := "test-app-driver-2" + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: v1beta2.SparkApplicationSpec{ + SparkConf: map[string]string{ + common.SparkKubernetesDriverPodName: driverPodName1, + }, + Driver: v1beta2.DriverSpec{ + PodName: &driverPodName2, + }, + }, + } + + It("Should return the driver pod name from driver spec", func() { + Expect(util.GetDriverPodName(app)).To(Equal(driverPodName2)) + }) + }) +}) + +var _ = Describe("GetApplicationState", func() { + Context("SparkApplication with completed state", func() { + app := &v1beta2.SparkApplication{ + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.ApplicationStateCompleted, + }, + }, + } + + It("Should return completed state", func() { + Expect(util.GetApplicationState(app)).To(Equal(v1beta2.ApplicationStateCompleted)) + }) + }) +}) + +var _ = Describe("IsExpired", func() { + Context("SparkApplication without TTL", func() { + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return false", func() { + Expect(util.IsExpired(app)).To(BeFalse()) + }) + }) + + Context("SparkApplication not terminated with TTL", func() { + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: v1beta2.SparkApplicationSpec{ + TimeToLiveSeconds: util.Int64Ptr(3600), + }, + } + + It("Should return false", func() { + Expect(util.IsExpired(app)).To(BeFalse()) + }) + }) + + Context("SparkApplication terminated with TTL not expired", func() { + now := time.Now() + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: v1beta2.SparkApplicationSpec{ + TimeToLiveSeconds: util.Int64Ptr(3600), + }, + Status: v1beta2.SparkApplicationStatus{ + TerminationTime: metav1.NewTime(now.Add(-30 * time.Minute)), + }, + } + + It("Should return false", func() { + Expect(util.IsExpired(app)).To(BeFalse()) + }) + }) + + Context("SparkApplication terminated with TTL expired", func() { + now := time.Now() + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: v1beta2.SparkApplicationSpec{ + TimeToLiveSeconds: util.Int64Ptr(3600), + }, + Status: v1beta2.SparkApplicationStatus{ + TerminationTime: metav1.NewTime(now.Add(-2 * time.Hour)), + }, + } + + It("Should return true", func() { + Expect(util.IsExpired(app)).To(BeTrue()) + }) + }) +}) + +var _ = Describe("IsDriverRunning", func() { + Context("SparkApplication with completed state", func() { + app := &v1beta2.SparkApplication{ + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.ApplicationStateCompleted, + }, + }, + } + + It("Should return false", func() { + Expect(util.IsDriverRunning(app)).To(BeFalse()) + }) + }) + + Context("SparkApplication with running state", func() { + app := &v1beta2.SparkApplication{ + Status: v1beta2.SparkApplicationStatus{ + AppState: v1beta2.ApplicationState{ + State: v1beta2.ApplicationStateRunning, + }, + }, + } + + It("Should return true", func() { + Expect(util.IsDriverRunning(app)).To(BeTrue()) + }) + }) +}) + +var _ = Describe("GetLocalVolumes", func() { + Context("SparkApplication with local volumes", func() { + volume1 := corev1.Volume{ + Name: "local-volume", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/tmp", + }, + }, + } + + volume2 := corev1.Volume{ + Name: "spark-local-dir-1", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/mnt/spark-local-dir-1", + }, + }, + } + + volume3 := corev1.Volume{ + Name: "spark-local-dir-2", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/mnt/spark-local-dir-2", + }, + }, + } + + app := &v1beta2.SparkApplication{ + Spec: v1beta2.SparkApplicationSpec{ + Volumes: []corev1.Volume{ + volume1, + volume2, + volume3, + }, + }, + } + + It("Should return volumes with the correct prefix", func() { + volumes := util.GetLocalVolumes(app) + expected := map[string]corev1.Volume{ + volume2.Name: volume2, + volume3.Name: volume3, + } + Expect(volumes).To(Equal(expected)) + }) + }) +}) + +var _ = Describe("GetDefaultUIServiceName", func() { + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return the default UI service name", func() { + Expect(util.GetDefaultUIServiceName(app)).To(Equal("test-app-ui-svc")) + }) +}) + +var _ = Describe("GetDefaultUIIngressName", func() { + app := &v1beta2.SparkApplication{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return the default UI ingress name", func() { + Expect(util.GetDefaultUIIngressName(app)).To(Equal("test-app-ui-ingress")) + }) +}) + +var _ = Describe("IsDriverTerminated", func() { + It("Should check whether driver is terminated", func() { + Expect(util.IsDriverTerminated(v1beta2.DriverStatePending)).To(BeFalse()) + Expect(util.IsDriverTerminated(v1beta2.DriverStateRunning)).To(BeFalse()) + Expect(util.IsDriverTerminated(v1beta2.DriverStateCompleted)).To(BeTrue()) + Expect(util.IsDriverTerminated(v1beta2.DriverStateFailed)).To(BeTrue()) + Expect(util.IsDriverTerminated(v1beta2.DriverStateUnknown)).To(BeFalse()) + }) +}) + +var _ = Describe("IsExecutorTerminated", func() { + It("Should check whether executor is terminated", func() { + Expect(util.IsExecutorTerminated(v1beta2.ExecutorStatePending)).To(BeFalse()) + Expect(util.IsExecutorTerminated(v1beta2.ExecutorStateRunning)).To(BeFalse()) + Expect(util.IsExecutorTerminated(v1beta2.ExecutorStateCompleted)).To(BeTrue()) + Expect(util.IsExecutorTerminated(v1beta2.ExecutorStateFailed)).To(BeTrue()) + Expect(util.IsExecutorTerminated(v1beta2.ExecutorStateUnknown)).To(BeFalse()) + }) +}) + +var _ = Describe("DriverStateToApplicationState", func() { + It("Should convert driver state to application state correctly", func() { + Expect(util.DriverStateToApplicationState(v1beta2.DriverStatePending)).To(Equal(v1beta2.ApplicationStateSubmitted)) + Expect(util.DriverStateToApplicationState(v1beta2.DriverStateRunning)).To(Equal(v1beta2.ApplicationStateRunning)) + Expect(util.DriverStateToApplicationState(v1beta2.DriverStateCompleted)).To(Equal(v1beta2.ApplicationStateSucceeding)) + Expect(util.DriverStateToApplicationState(v1beta2.DriverStateFailed)).To(Equal(v1beta2.ApplicationStateFailing)) + Expect(util.DriverStateToApplicationState(v1beta2.DriverStateUnknown)).To(Equal(v1beta2.ApplicationStateUnknown)) + }) +}) diff --git a/pkg/util/sparkpod.go b/pkg/util/sparkpod.go new file mode 100644 index 0000000000..137fb0d5b9 --- /dev/null +++ b/pkg/util/sparkpod.go @@ -0,0 +1,48 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + corev1 "k8s.io/api/core/v1" + + "github.com/kubeflow/spark-operator/pkg/common" +) + +// IsLaunchedBySparkOperator returns whether the given pod is launched by the Spark Operator. +func IsLaunchedBySparkOperator(pod *corev1.Pod) bool { + return pod.Labels[common.LabelLaunchedBySparkOperator] == "true" +} + +// IsDriverPod returns whether the given pod is a Spark driver Pod. +func IsDriverPod(pod *corev1.Pod) bool { + return pod.Labels[common.LabelSparkRole] == common.SparkRoleDriver +} + +// IsExecutorPod returns whether the given pod is a Spark executor Pod. +func IsExecutorPod(pod *corev1.Pod) bool { + return pod.Labels[common.LabelSparkRole] == common.SparkRoleExecutor +} + +// GetAppName returns the spark application name by checking out pod labels. +func GetAppName(pod *corev1.Pod) string { + return pod.Labels[common.LabelSparkAppName] +} + +// GetSparkApplicationID returns the spark application ID by checking out pod labels. +func GetSparkApplicationID(pod *corev1.Pod) string { + return pod.Labels[common.LabelSparkApplicationSelector] +} diff --git a/pkg/util/sparkpod_test.go b/pkg/util/sparkpod_test.go new file mode 100644 index 0000000000..a138f6795a --- /dev/null +++ b/pkg/util/sparkpod_test.go @@ -0,0 +1,301 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var _ = Describe("IsLaunchedBySparkOperator", func() { + Context("Pod without labels", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return false", func() { + Expect(util.IsLaunchedBySparkOperator(pod)).To(BeFalse()) + }) + }) + + Context("Pod without launched by spark operator label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + }, + }, + } + + It("Should return false", func() { + Expect(util.IsLaunchedBySparkOperator(pod)).To(BeFalse()) + }) + }) + + Context("Pod with launched by spark operator label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + common.LabelLaunchedBySparkOperator: "true", + }, + }, + } + + It("Should return true", func() { + Expect(util.IsLaunchedBySparkOperator(pod)).To(BeTrue()) + }) + }) +}) + +var _ = Describe("IsDriverPod", func() { + Context("Pod without labels", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return false", func() { + Expect(util.IsDriverPod(pod)).To(BeFalse()) + }) + }) + + Context("Pod without spark role label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + }, + }, + } + + It("Should return false", func() { + Expect(util.IsDriverPod(pod)).To(BeFalse()) + }) + }) + + Context("Pod with spark role label not equal to driver", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + common.LabelSparkRole: common.SparkRoleExecutor, + }, + }, + } + + It("Should return false", func() { + Expect(util.IsDriverPod(pod)).To(BeFalse()) + }) + }) + + Context("Pod with spark role label equal to driver", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + common.LabelSparkRole: common.SparkRoleDriver, + }, + }, + } + + It("Should return true", func() { + Expect(util.IsDriverPod(pod)).To(BeTrue()) + }) + }) +}) + +var _ = Describe("IsExecutorPod", func() { + Context("Pod without labels", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return false", func() { + Expect(util.IsExecutorPod(pod)).To(BeFalse()) + }) + }) + + Context("Pod without spark role label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + }, + }, + } + + It("Should return false", func() { + Expect(util.IsExecutorPod(pod)).To(BeFalse()) + }) + }) + + Context("Pod with spark role label not equal to executor", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + common.LabelSparkRole: common.SparkRoleDriver, + }, + }, + } + + It("Should return false", func() { + Expect(util.IsExecutorPod(pod)).To(BeFalse()) + }) + }) + + Context("Pod with spark role label equal to executor", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + common.LabelSparkRole: common.SparkRoleExecutor, + }, + }, + } + + It("Should return true", func() { + Expect(util.IsExecutorPod(pod)).To(BeTrue()) + }) + }) +}) + +var _ = Describe("GetAppName", func() { + Context("Pod without labels", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return empty application name", func() { + Expect(util.GetAppName(pod)).To(BeEmpty()) + }) + }) + + Context("Pod without app name label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelScheduledSparkAppName: "true", + }, + }, + } + + It("Should return empty application name", func() { + Expect(util.GetAppName(pod)).To(BeEmpty()) + }) + }) + + Context("Pod with app name label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + }, + }, + } + + It("Should return the application name", func() { + Expect(util.GetAppName(pod)).To(Equal("test-app")) + }) + }) +}) + +var _ = Describe("GetSparkApplicationID", func() { + Context("Pod without labels", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + } + + It("Should return empty application ID", func() { + Expect(util.GetSparkApplicationID(pod)).To(BeEmpty()) + }) + }) + + Context("Pod without spark app selector label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + }, + }, + } + + It("Should return empty application ID", func() { + Expect(util.GetSparkApplicationID(pod)).To(BeEmpty()) + }) + }) + + Context("Pod with spark app selector label", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + Labels: map[string]string{ + common.LabelSparkAppName: "test-app", + common.LabelSparkApplicationSelector: "test-app-id", + }, + }, + } + + It("Should return the application ID", func() { + Expect(util.GetSparkApplicationID(pod)).To(Equal("test-app-id")) + }) + }) +}) diff --git a/pkg/util/suite_test.go b/pkg/util/suite_test.go new file mode 100644 index 0000000000..e442df1464 --- /dev/null +++ b/pkg/util/suite_test.go @@ -0,0 +1,37 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestUtil(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Util Suite") +} + +var _ = BeforeSuite(func() { + log.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) +}) diff --git a/pkg/util/util.go b/pkg/util/util.go index d39e2b19bd..850bc209d0 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -17,45 +17,63 @@ limitations under the License. package util import ( - "hash" - "hash/fnv" - "reflect" + "fmt" + "os" + "strings" - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" + "github.com/kubeflow/spark-operator/pkg/common" ) -// NewHash32 returns a 32-bit hash computed from the given byte slice. -func NewHash32() hash.Hash32 { - return fnv.New32() +func GetMasterURL() (string, error) { + kubernetesServiceHost := os.Getenv(common.EnvKubernetesServiceHost) + if kubernetesServiceHost == "" { + return "", fmt.Errorf("environment variable %s is not found", common.EnvKubernetesServiceHost) + } + + kubernetesServicePort := os.Getenv(common.EnvKubernetesServicePort) + if kubernetesServicePort == "" { + return "", fmt.Errorf("environment variable %s is not found", common.EnvKubernetesServicePort) + } + // check if the host is IPv6 address + if strings.Contains(kubernetesServiceHost, ":") && !strings.HasPrefix(kubernetesServiceHost, "[") { + return fmt.Sprintf("k8s://https://[%s]:%s", kubernetesServiceHost, kubernetesServicePort), nil + } + return fmt.Sprintf("k8s://https://%s:%s", kubernetesServiceHost, kubernetesServicePort), nil +} + +// Helper functions to check and remove a string from a slice of strings. +// ContainsString checks if a given string is present in a slice +func ContainsString(slice []string, s string) bool { + for _, item := range slice { + if item == s { + return true + } + } + return false } -// GetOwnerReference returns an OwnerReference pointing to the given app. -func GetOwnerReference(app *v1beta2.SparkApplication) metav1.OwnerReference { - controller := true - return metav1.OwnerReference{ - APIVersion: v1beta2.SchemeGroupVersion.String(), - Kind: reflect.TypeOf(v1beta2.SparkApplication{}).Name(), - Name: app.Name, - UID: app.UID, - Controller: &controller, +// RemoveString removes a given string from a slice, if present +func RemoveString(slice []string, s string) (result []string) { + for _, item := range slice { + if item != s { + result = append(result, item) + } } + return result +} + +func BoolPtr(b bool) *bool { + return &b } -// IsLaunchedBySparkOperator returns whether the given pod is launched by the Spark Operator. -func IsLaunchedBySparkOperator(pod *apiv1.Pod) bool { - return pod.Labels[config.LaunchedBySparkOperatorLabel] == "true" +func Int32Ptr(n int32) *int32 { + return &n } -// IsDriverPod returns whether the given pod is a Spark driver Pod. -func IsDriverPod(pod *apiv1.Pod) bool { - return pod.Labels[config.SparkRoleLabel] == config.SparkDriverRole +func Int64Ptr(n int64) *int64 { + return &n } -// IsExecutorPod returns whether the given pod is a Spark executor Pod. -func IsExecutorPod(pod *apiv1.Pod) bool { - return pod.Labels[config.SparkRoleLabel] == config.SparkExecutorRole +func StringPtr(s string) *string { + return &s } diff --git a/pkg/util/util_test.go b/pkg/util/util_test.go new file mode 100644 index 0000000000..324ed3580f --- /dev/null +++ b/pkg/util/util_test.go @@ -0,0 +1,131 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util_test + +import ( + "os" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/kubeflow/spark-operator/pkg/common" + "github.com/kubeflow/spark-operator/pkg/util" +) + +var _ = Describe("GetMasterURL", func() { + BeforeEach(func() { + os.Setenv(common.EnvKubernetesServiceHost, "127.0.0.1") + os.Setenv(common.EnvKubernetesServicePort, "443") + }) + + AfterEach(func() { + os.Unsetenv(common.EnvKubernetesServiceHost) + os.Unsetenv(common.EnvKubernetesServicePort) + }) + + Context("IPv4 address", func() { + It("Should return correct master URL without error", func() { + masterURL, err := util.GetMasterURL() + Expect(masterURL).To(Equal("k8s://https://127.0.0.1:443")) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) + +var _ = Describe("GetMasterURL", func() { + BeforeEach(func() { + os.Setenv(common.EnvKubernetesServiceHost, "::1") + os.Setenv(common.EnvKubernetesServicePort, "443") + }) + + AfterEach(func() { + os.Unsetenv(common.EnvKubernetesServiceHost) + os.Unsetenv(common.EnvKubernetesServicePort) + }) + + Context("IPv6 address", func() { + It("Should return correct master URL without error", func() { + masterURL, err := util.GetMasterURL() + Expect(masterURL).To(Equal("k8s://https://[::1]:443")) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) + +var _ = Describe("ContainsString", func() { + slice := []string{"a", "b", "c"} + + Context("When the string is in the slice", func() { + It("Should return true", func() { + Expect(util.ContainsString(slice, "b")).To(BeTrue()) + }) + }) + + Context("When the string is not in the slice", func() { + It("Should return false", func() { + Expect(util.ContainsString(slice, "d")).To(BeFalse()) + }) + }) +}) + +var _ = Describe("RemoveString", func() { + Context("When the string is in the slice", func() { + slice := []string{"a", "b", "c"} + expected := []string{"a", "c"} + + It("Should remove the string", func() { + Expect(util.RemoveString(slice, "b")).To(Equal(expected)) + }) + }) + + Context("When the string is not in the slice", func() { + slice := []string{"a", "b", "c"} + expected := []string{"a", "b", "c"} + + It("Should do nothing", func() { + Expect(util.RemoveString(slice, "d")).To(Equal(expected)) + }) + }) +}) + +var _ = Describe("BoolPtr", func() { + It("Should return a pointer to the given bool value", func() { + b := true + Expect(util.BoolPtr(b)).To(Equal(&b)) + }) +}) + +var _ = Describe("Int32Ptr", func() { + It("Should return a pointer to the given int32 value", func() { + i := int32(42) + Expect(util.Int32Ptr(i)).To(Equal(&i)) + }) +}) + +var _ = Describe("Int64Ptr", func() { + It("Should return a pointer to the given int64 value", func() { + i := int64(42) + Expect(util.Int64Ptr(i)).To(Equal(&i)) + }) +}) + +var _ = Describe("StringPtr", func() { + It("Should return a pointer to the given string value", func() { + s := "hello" + Expect(util.StringPtr(s)).To(Equal(&s)) + }) +}) diff --git a/pkg/webhook/certs.go b/pkg/webhook/certs.go deleted file mode 100644 index 75e0668332..0000000000 --- a/pkg/webhook/certs.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" - "net" - - "k8s.io/client-go/util/cert" - - "github.com/kubeflow/spark-operator/pkg/util" -) - -const ( - Organization = "spark-operator" -) - -// certProvider is a container of a X509 certificate file and a corresponding key file for the -// webhook server, and a CA certificate file for the API server to verify the server certificate. -type certProvider struct { - caKey *rsa.PrivateKey - caCert *x509.Certificate - serverKey *rsa.PrivateKey - serverCert *x509.Certificate -} - -// NewCertProvider creates a new CertProvider instance. -func NewCertProvider(name, namespace string) (*certProvider, error) { - commonName := fmt.Sprintf("%s.%s.svc", name, namespace) - - // Generate CA private caKey - caKey, err := util.NewPrivateKey() - if err != nil { - return nil, fmt.Errorf("failed to generate CA private key: %v", err) - } - - // Generate self-signed CA certificate - caCfg := cert.Config{ - CommonName: commonName, - Organization: []string{Organization}, - } - caCert, err := cert.NewSelfSignedCACert(caCfg, caKey) - if err != nil { - return nil, fmt.Errorf("failed to generate self-signed CA certificate: %v", err) - } - - // Generate server private key - serverKey, err := util.NewPrivateKey() - if err != nil { - return nil, fmt.Errorf("failed to generate server private key: %v", err) - } - - // Generate signed server certificate - var ips []net.IP - dnsNames := []string{"localhost"} - hostIP := net.ParseIP(commonName) - if hostIP.To4() != nil { - ips = append(ips, hostIP.To4()) - } else { - dnsNames = append(dnsNames, commonName) - } - serverCfg := cert.Config{ - CommonName: commonName, - Organization: []string{Organization}, - AltNames: cert.AltNames{IPs: ips, DNSNames: dnsNames}, - } - serverCert, err := util.NewSignedServerCert(serverCfg, caKey, caCert, serverKey) - if err != nil { - return nil, fmt.Errorf("failed to generate signed server certificate: %v", err) - } - - certProvider := certProvider{ - caKey: caKey, - caCert: caCert, - serverKey: serverKey, - serverCert: serverCert, - } - - return &certProvider, nil -} - -// CAKey returns the PEM-encoded CA private key. -func (cp *certProvider) CAKey() ([]byte, error) { - if cp.caKey == nil { - return nil, errors.New("CA key is not set") - } - data := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(cp.caKey), - }) - return data, nil -} - -// CACert returns the PEM-encoded CA certificate. -func (cp *certProvider) CACert() ([]byte, error) { - if cp.caCert == nil { - return nil, errors.New("CA certificate is not set") - } - data := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: cp.serverCert.Raw, - }) - return data, nil -} - -// ServerKey returns the PEM-encoded server private key. -func (cp *certProvider) ServerKey() ([]byte, error) { - if cp.serverKey == nil { - return nil, errors.New("server key is not set") - } - data := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(cp.serverKey), - }) - return data, nil -} - -// ServerCert returns the PEM-encoded server cert. -func (cp *certProvider) ServerCert() ([]byte, error) { - if cp.serverCert == nil { - return nil, errors.New("server cert is not set") - } - data := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: cp.serverCert.Raw, - }) - return data, nil -} - -// TLSConfig returns the TLS configuration. -func (cp *certProvider) TLSConfig() (*tls.Config, error) { - keyPEMBlock, err := cp.ServerKey() - if err != nil { - return nil, fmt.Errorf("failed to get server key: %v", err) - } - - certPEMBlock, err := cp.ServerCert() - if err != nil { - return nil, fmt.Errorf("failed to get server certificate: %v", err) - } - - tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) - if err != nil { - return nil, fmt.Errorf("failed to generate TLS certificate: %v", err) - } - - cfg := &tls.Config{ - Certificates: []tls.Certificate{tlsCert}, - } - return cfg, nil -} diff --git a/pkg/webhook/certs_test.go b/pkg/webhook/certs_test.go deleted file mode 100644 index d8f10ec193..0000000000 --- a/pkg/webhook/certs_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package webhook - -import "testing" - -// TestNewCertProvider tests the NewCertProvider function. -func TestNewCertProvider(t *testing.T) { - name := "test-name" - namespace := "test-namespace" - - cp, err := NewCertProvider(name, namespace) - if err != nil { - t.Errorf("failed to create CertProvider: %v", err) - } - - // Check if the returned CertProvider has non-nil fields. - if cp.caKey == nil { - t.Error("CA key is nil") - } - if cp.caCert == nil { - t.Error("CA certificate is nil") - } - if cp.serverKey == nil { - t.Error("server key is nil") - } - if cp.serverCert == nil { - t.Error("server certificate is nil") - } -} - -// TestCAKey tests the CAKey method of certProvider. -func TestCAKey(t *testing.T) { - cp, err := NewCertProvider("test-name", "test-namespace") - if err != nil { - t.Errorf("failed to create CertProvider: %v", err) - } - - key, err := cp.CAKey() - if err != nil { - t.Errorf("failed to get CA key: %v", err) - } - - // Check if the returned key is not nil. - if key == nil { - t.Error("CA key is nil") - } -} - -// TestCACert tests the CACert method of certProvider. -func TestCACert(t *testing.T) { - cp, err := NewCertProvider("test-name", "test-namespace") - if err != nil { - t.Errorf("failed to create CertProvider: %v", err) - } - - cert, err := cp.CACert() - if err != nil { - t.Errorf("failed to get CA certificate: %v", err) - } - - // Check if the returned certificate is not nil. - if cert == nil { - t.Error("CA certificate is nil") - } -} - -// TestServerKey tests the ServerKey method of certProvider. -func TestServerKey(t *testing.T) { - cp, err := NewCertProvider("test-name", "test-namespace") - if err != nil { - t.Errorf("failed to create CertProvider: %v", err) - } - - key, err := cp.ServerKey() - if err != nil { - t.Errorf("failed to get server key: %v", err) - } - - // Check if the returned key is not nil. - if key == nil { - t.Error("server key is nil") - } -} - -// TestServerCert tests the ServerCert method of certProvider. -func TestServerCert(t *testing.T) { - cp, err := NewCertProvider("test-name", "test-namespace") - if err != nil { - t.Errorf("failed to create CertProvider: %v", err) - } - - cert, err := cp.ServerCert() - if err != nil { - t.Errorf("failed to get server certificate: %v", err) - } - - // Check if the returned certificate is not nil. - if cert == nil { - t.Error("server certificate is nil") - } -} - -// TestTLSConfig tests the TLSConfig method of certProvider. -func TestTLSConfig(t *testing.T) { - cp, err := NewCertProvider("test-name", "test-namespace") - if err != nil { - t.Errorf("failed to create CertProvider: %v", err) - } - - cfg, err := cp.TLSConfig() - if err != nil { - t.Errorf("failed to get TLS configuration: %v", err) - } - - // Check if the returned configuration is not nil. - if cfg == nil { - t.Error("TLS configuration is nil") - } -} diff --git a/pkg/webhook/patch.go b/pkg/webhook/patch.go deleted file mode 100644 index a7c20a8168..0000000000 --- a/pkg/webhook/patch.go +++ /dev/null @@ -1,856 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "fmt" - "strings" - - "github.com/golang/glog" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/util" -) - -const ( - maxNameLength = 63 -) - -// patchOperation represents a RFC6902 JSON patch operation. -type patchOperation struct { - Op string `json:"op"` - Path string `json:"path"` - Value interface{} `json:"value,omitempty"` -} - -func patchSparkPod(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var patchOps []patchOperation - - if util.IsDriverPod(pod) { - patchOps = append(patchOps, addOwnerReference(pod, app)) - } - - patchOps = append(patchOps, addVolumes(pod, app)...) - patchOps = append(patchOps, addGeneralConfigMaps(pod, app)...) - patchOps = append(patchOps, addSparkConfigMap(pod, app)...) - patchOps = append(patchOps, addHadoopConfigMap(pod, app)...) - patchOps = append(patchOps, getPrometheusConfigPatches(pod, app)...) - patchOps = append(patchOps, addTolerations(pod, app)...) - patchOps = append(patchOps, addSidecarContainers(pod, app)...) - patchOps = append(patchOps, addInitContainers(pod, app)...) - patchOps = append(patchOps, addHostNetwork(pod, app)...) - patchOps = append(patchOps, addNodeSelectors(pod, app)...) - patchOps = append(patchOps, addDNSConfig(pod, app)...) - patchOps = append(patchOps, addEnvVars(pod, app)...) - patchOps = append(patchOps, addEnvFrom(pod, app)...) - patchOps = append(patchOps, addHostAliases(pod, app)...) - patchOps = append(patchOps, addContainerPorts(pod, app)...) - patchOps = append(patchOps, addPriorityClassName(pod, app)...) - - op := addSchedulerName(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - if pod.Spec.Affinity == nil { - op := addAffinity(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - } - - op = addPodSecurityContext(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - op = addSecurityContext(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - op = addGPU(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - op = addTerminationGracePeriodSeconds(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - op = addPodLifeCycleConfig(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - op = addShareProcessNamespace(pod, app) - if op != nil { - patchOps = append(patchOps, *op) - } - - return patchOps -} - -func addOwnerReference(pod *corev1.Pod, app *v1beta2.SparkApplication) patchOperation { - ownerReference := util.GetOwnerReference(app) - - path := "/metadata/ownerReferences" - var value interface{} - if len(pod.OwnerReferences) == 0 { - value = []metav1.OwnerReference{ownerReference} - } else { - path += "/-" - value = ownerReference - } - - return patchOperation{Op: "add", Path: path, Value: value} -} - -func addVolumes(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - volumes := app.Spec.Volumes - - volumeMap := make(map[string]corev1.Volume) - for _, v := range volumes { - volumeMap[v.Name] = v - } - - var volumeMounts []corev1.VolumeMount - if util.IsDriverPod(pod) { - volumeMounts = app.Spec.Driver.VolumeMounts - } else if util.IsExecutorPod(pod) { - volumeMounts = app.Spec.Executor.VolumeMounts - } - - var ops []patchOperation - addedVolumeMap := make(map[string]corev1.Volume) - for _, m := range volumeMounts { - // Skip adding localDirVolumes - if strings.HasPrefix(m.Name, config.SparkLocalDirVolumePrefix) { - continue - } - - if v, ok := volumeMap[m.Name]; ok { - if _, ok := addedVolumeMap[m.Name]; !ok { - ops = append(ops, addVolume(pod, v)) - addedVolumeMap[m.Name] = v - } - vmPatchOp := addVolumeMount(pod, m) - if vmPatchOp == nil { - return nil - } - ops = append(ops, *vmPatchOp) - } - } - - return ops -} - -func addVolume(pod *corev1.Pod, volume corev1.Volume) patchOperation { - path := "/spec/volumes" - var value interface{} - if len(pod.Spec.Volumes) == 0 { - value = []corev1.Volume{volume} - } else { - path += "/-" - value = volume - } - pod.Spec.Volumes = append(pod.Spec.Volumes, volume) - - return patchOperation{Op: "add", Path: path, Value: value} -} - -func addVolumeMount(pod *corev1.Pod, mount corev1.VolumeMount) *patchOperation { - i := findContainer(pod) - if i < 0 { - glog.Warningf("not able to add VolumeMount %s as Spark container was not found in pod %s", mount.Name, pod.Name) - return nil - } - - path := fmt.Sprintf("/spec/containers/%d/volumeMounts", i) - var value interface{} - if len(pod.Spec.Containers[i].VolumeMounts) == 0 { - value = []corev1.VolumeMount{mount} - } else { - path += "/-" - value = mount - } - pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts, mount) - - return &patchOperation{Op: "add", Path: path, Value: value} -} - -func addEnvVars(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var envVars []corev1.EnvVar - if util.IsDriverPod(pod) { - envVars = app.Spec.Driver.Env - } else if util.IsExecutorPod(pod) { - envVars = app.Spec.Executor.Env - } - - i := findContainer(pod) - if i < 0 { - glog.Warningf("not able to add EnvVars as Spark container was not found in pod %s", pod.Name) - return nil - } - basePath := fmt.Sprintf("/spec/containers/%d/env", i) - - var value interface{} - var patchOps []patchOperation - - first := false - if len(pod.Spec.Containers[i].Env) == 0 { - first = true - } - - for _, envVar := range envVars { - path := basePath - if first { - value = []corev1.EnvVar{envVar} - first = false - } else { - path += "/-" - value = envVar - } - patchOps = append(patchOps, patchOperation{Op: "add", Path: path, Value: value}) - } - return patchOps -} - -func addEnvFrom(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var envFrom []corev1.EnvFromSource - if util.IsDriverPod(pod) { - envFrom = app.Spec.Driver.EnvFrom - } else if util.IsExecutorPod(pod) { - envFrom = app.Spec.Executor.EnvFrom - } - - i := findContainer(pod) - if i < 0 { - glog.Warningf("not able to add EnvFrom as Spark container was not found in pod %s", pod.Name) - return nil - } - basePath := fmt.Sprintf("/spec/containers/%d/envFrom", i) - - var value interface{} - var patchOps []patchOperation - - first := false - if len(pod.Spec.Containers[i].EnvFrom) == 0 { - first = true - } - - for _, ef := range envFrom { - path := basePath - if first { - value = []corev1.EnvFromSource{ef} - first = false - } else { - path += "/-" - value = ef - } - patchOps = append(patchOps, patchOperation{Op: "add", Path: path, Value: value}) - } - return patchOps -} - -func addEnvironmentVariable(pod *corev1.Pod, envName, envValue string) *patchOperation { - i := findContainer(pod) - if i < 0 { - glog.Warningf("not able to add environment variable %s as Spark container was not found in pod %s", envName, pod.Name) - return nil - } - - path := fmt.Sprintf("/spec/containers/%d/env", i) - var value interface{} - if len(pod.Spec.Containers[i].Env) == 0 { - value = []corev1.EnvVar{{Name: envName, Value: envValue}} - } else { - path += "/-" - value = corev1.EnvVar{Name: envName, Value: envValue} - } - - return &patchOperation{Op: "add", Path: path, Value: value} -} - -func addSparkConfigMap(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var patchOps []patchOperation - sparkConfigMapName := app.Spec.SparkConfigMap - if sparkConfigMapName != nil { - patchOps = append(patchOps, addConfigMapVolume(pod, *sparkConfigMapName, config.SparkConfigMapVolumeName)) - vmPatchOp := addConfigMapVolumeMount(pod, config.SparkConfigMapVolumeName, config.DefaultSparkConfDir) - if vmPatchOp == nil { - return nil - } - patchOps = append(patchOps, *vmPatchOp) - envPatchOp := addEnvironmentVariable(pod, config.SparkConfDirEnvVar, config.DefaultSparkConfDir) - if envPatchOp == nil { - return nil - } - patchOps = append(patchOps, *envPatchOp) - } - return patchOps -} - -func addHadoopConfigMap(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var patchOps []patchOperation - hadoopConfigMapName := app.Spec.HadoopConfigMap - if hadoopConfigMapName != nil { - patchOps = append(patchOps, addConfigMapVolume(pod, *hadoopConfigMapName, config.HadoopConfigMapVolumeName)) - vmPatchOp := addConfigMapVolumeMount(pod, config.HadoopConfigMapVolumeName, config.DefaultHadoopConfDir) - if vmPatchOp == nil { - return nil - } - patchOps = append(patchOps, *vmPatchOp) - envPatchOp := addEnvironmentVariable(pod, config.HadoopConfDirEnvVar, config.DefaultHadoopConfDir) - if envPatchOp == nil { - return nil - } - patchOps = append(patchOps, *envPatchOp) - } - return patchOps -} - -func addGeneralConfigMaps(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var configMaps []v1beta2.NamePath - if util.IsDriverPod(pod) { - configMaps = app.Spec.Driver.ConfigMaps - } else if util.IsExecutorPod(pod) { - configMaps = app.Spec.Executor.ConfigMaps - } - - var patchOps []patchOperation - for _, namePath := range configMaps { - volumeName := namePath.Name + "-vol" - if len(volumeName) > maxNameLength { - volumeName = volumeName[0:maxNameLength] - glog.V(2).Infof("ConfigMap volume name is too long. Truncating to length %d. Result: %s.", maxNameLength, volumeName) - } - patchOps = append(patchOps, addConfigMapVolume(pod, namePath.Name, volumeName)) - vmPatchOp := addConfigMapVolumeMount(pod, volumeName, namePath.Path) - if vmPatchOp == nil { - return nil - } - patchOps = append(patchOps, *vmPatchOp) - } - return patchOps -} - -func getPrometheusConfigPatches(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - // Skip if Prometheus Monitoring is not enabled or an in-container ConfigFile is used, - // in which cases a Prometheus ConfigMap won't be created. - if !app.PrometheusMonitoringEnabled() || (app.HasMetricsPropertiesFile() && app.HasPrometheusConfigFile()) { - return nil - } - - if util.IsDriverPod(pod) && !app.ExposeDriverMetrics() { - return nil - } - if util.IsExecutorPod(pod) && !app.ExposeExecutorMetrics() { - return nil - } - - var patchOps []patchOperation - name := config.GetPrometheusConfigMapName(app) - volumeName := name + "-vol" - mountPath := config.PrometheusConfigMapMountPath - promPort := config.DefaultPrometheusJavaAgentPort - if app.Spec.Monitoring.Prometheus.Port != nil { - promPort = *app.Spec.Monitoring.Prometheus.Port - } - promProtocol := config.DefaultPrometheusPortProtocol - promPortName := config.DefaultPrometheusPortName - if app.Spec.Monitoring.Prometheus.PortName != nil { - promPortName = *app.Spec.Monitoring.Prometheus.PortName - } - - patchOps = append(patchOps, addConfigMapVolume(pod, name, volumeName)) - vmPatchOp := addConfigMapVolumeMount(pod, volumeName, mountPath) - if vmPatchOp == nil { - glog.Warningf("could not mount volume %s in path %s", volumeName, mountPath) - return nil - } - patchOps = append(patchOps, *vmPatchOp) - promPortPatchOp := addContainerPort(pod, promPort, promProtocol, promPortName) - if promPortPatchOp == nil { - glog.Warningf("could not expose port %d to scrape metrics outside the pod", promPort) - return nil - } - patchOps = append(patchOps, *promPortPatchOp) - return patchOps -} - -func addContainerPorts(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var ports []v1beta2.Port - - if util.IsDriverPod(pod) { - ports = app.Spec.Driver.Ports - } else if util.IsExecutorPod(pod) { - ports = app.Spec.Executor.Ports - } - - var patchOps []patchOperation - for _, p := range ports { - portPatchOp := addContainerPort(pod, p.ContainerPort, p.Protocol, p.Name) - if portPatchOp == nil { - glog.Warningf("could not expose port named %s", p.Name) - continue - } - patchOps = append(patchOps, *portPatchOp) - } - return patchOps -} - -func addContainerPort(pod *corev1.Pod, port int32, protocol string, portName string) *patchOperation { - i := findContainer(pod) - if i < 0 { - glog.Warningf("not able to add containerPort %d as Spark container was not found in pod %s", port, pod.Name) - return nil - } - - path := fmt.Sprintf("/spec/containers/%d/ports", i) - containerPort := corev1.ContainerPort{ - Name: portName, - ContainerPort: port, - Protocol: corev1.Protocol(protocol), - } - var value interface{} - if len(pod.Spec.Containers[i].Ports) == 0 { - value = []corev1.ContainerPort{containerPort} - } else { - path += "/-" - value = containerPort - } - pod.Spec.Containers[i].Ports = append(pod.Spec.Containers[i].Ports, containerPort) - return &patchOperation{Op: "add", Path: path, Value: value} -} - -func addConfigMapVolume(pod *corev1.Pod, configMapName string, configMapVolumeName string) patchOperation { - volume := corev1.Volume{ - Name: configMapVolumeName, - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: configMapName, - }, - }, - }, - } - return addVolume(pod, volume) -} - -func addConfigMapVolumeMount(pod *corev1.Pod, configMapVolumeName string, mountPath string) *patchOperation { - mount := corev1.VolumeMount{ - Name: configMapVolumeName, - ReadOnly: true, - MountPath: mountPath, - } - return addVolumeMount(pod, mount) -} - -func addAffinity(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var affinity *corev1.Affinity - if util.IsDriverPod(pod) { - affinity = app.Spec.Driver.Affinity - } else if util.IsExecutorPod(pod) { - affinity = app.Spec.Executor.Affinity - } - - if affinity == nil { - return nil - } - return &patchOperation{Op: "add", Path: "/spec/affinity", Value: *affinity} -} - -func addTolerations(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var tolerations []corev1.Toleration - if util.IsDriverPod(pod) { - tolerations = app.Spec.Driver.SparkPodSpec.Tolerations - } else if util.IsExecutorPod(pod) { - tolerations = app.Spec.Executor.SparkPodSpec.Tolerations - } - - first := false - if len(pod.Spec.Tolerations) == 0 { - first = true - } - - var ops []patchOperation - for _, v := range tolerations { - ops = append(ops, addToleration(pod, v, first)) - if first { - first = false - } - } - return ops -} - -func addNodeSelectors(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var nodeSelector map[string]string - if util.IsDriverPod(pod) { - nodeSelector = app.Spec.Driver.NodeSelector - } else if util.IsExecutorPod(pod) { - nodeSelector = app.Spec.Executor.NodeSelector - } - - var ops []patchOperation - if len(nodeSelector) > 0 { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/nodeSelector", Value: nodeSelector}) - } - return ops -} - -func addDNSConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var dnsConfig *corev1.PodDNSConfig - - if util.IsDriverPod(pod) { - dnsConfig = app.Spec.Driver.DNSConfig - } else if util.IsExecutorPod(pod) { - dnsConfig = app.Spec.Executor.DNSConfig - } - - var ops []patchOperation - if dnsConfig != nil { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/dnsConfig", Value: dnsConfig}) - } - return ops -} - -func addSchedulerName(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var schedulerName *string - - //NOTE: Preferred to use `BatchScheduler` if application spec has it configured. - if app.Spec.BatchScheduler != nil { - schedulerName = app.Spec.BatchScheduler - } else if util.IsDriverPod(pod) { - schedulerName = app.Spec.Driver.SchedulerName - } else if util.IsExecutorPod(pod) { - schedulerName = app.Spec.Executor.SchedulerName - } - if schedulerName == nil || *schedulerName == "" { - return nil - } - return &patchOperation{Op: "add", Path: "/spec/schedulerName", Value: *schedulerName} -} - -func addPriorityClassName(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var priorityClassName *string - - if app.Spec.BatchSchedulerOptions != nil { - priorityClassName = app.Spec.BatchSchedulerOptions.PriorityClassName - } - - var ops []patchOperation - if priorityClassName != nil && *priorityClassName != "" { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/priorityClassName", Value: *priorityClassName}) - - if pod.Spec.Priority != nil { - ops = append(ops, patchOperation{Op: "remove", Path: "/spec/priority"}) - } - if pod.Spec.PreemptionPolicy != nil { - ops = append(ops, patchOperation{Op: "remove", Path: "/spec/preemptionPolicy"}) - } - } - - return ops -} - -func addToleration(pod *corev1.Pod, toleration corev1.Toleration, first bool) patchOperation { - path := "/spec/tolerations" - var value interface{} - if first { - value = []corev1.Toleration{toleration} - } else { - path += "/-" - value = toleration - } - - return patchOperation{Op: "add", Path: path, Value: value} -} - -func addPodSecurityContext(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var secContext *corev1.PodSecurityContext - if util.IsDriverPod(pod) { - secContext = app.Spec.Driver.PodSecurityContext - } else if util.IsExecutorPod(pod) { - secContext = app.Spec.Executor.PodSecurityContext - } - - if secContext == nil { - return nil - } - return &patchOperation{Op: "add", Path: "/spec/securityContext", Value: *secContext} -} - -func addSecurityContext(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var secContext *corev1.SecurityContext - if util.IsDriverPod(pod) { - secContext = app.Spec.Driver.SecurityContext - } else if util.IsExecutorPod(pod) { - secContext = app.Spec.Executor.SecurityContext - } - - if secContext == nil { - return nil - } - - i := findContainer(pod) - - if i < 0 { - glog.Warningf("Spark driver/executor container not found in pod %s", pod.Name) - return nil - } - - path := fmt.Sprintf("/spec/containers/%d/securityContext", i) - return &patchOperation{Op: "add", Path: path, Value: *secContext} -} - -func addSidecarContainers(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var sidecars []corev1.Container - if util.IsDriverPod(pod) { - sidecars = app.Spec.Driver.Sidecars - } else if util.IsExecutorPod(pod) { - sidecars = app.Spec.Executor.Sidecars - } - - var ops []patchOperation - for _, c := range sidecars { - sd := c - if !hasContainer(pod, &sd) { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/containers/-", Value: &sd}) - } - } - return ops -} - -func addInitContainers(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var initContainers []corev1.Container - if util.IsDriverPod(pod) { - initContainers = app.Spec.Driver.InitContainers - } else if util.IsExecutorPod(pod) { - initContainers = app.Spec.Executor.InitContainers - } - - first := false - if len(pod.Spec.InitContainers) == 0 { - first = true - } - - var ops []patchOperation - for _, c := range initContainers { - sd := c - if first { - first = false - value := []corev1.Container{sd} - ops = append(ops, patchOperation{Op: "add", Path: "/spec/initContainers", Value: value}) - } else if !hasInitContainer(pod, &sd) { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/initContainers/-", Value: &sd}) - } - - } - return ops -} - -func addGPU(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var gpu *v1beta2.GPUSpec - if util.IsDriverPod(pod) { - gpu = app.Spec.Driver.GPU - } - if util.IsExecutorPod(pod) { - gpu = app.Spec.Executor.GPU - } - if gpu == nil { - return nil - } - if gpu.Name == "" { - glog.V(2).Infof("Please specify GPU resource name, such as: nvidia.com/gpu, amd.com/gpu etc. Current gpu spec: %+v", gpu) - return nil - } - if gpu.Quantity <= 0 { - glog.V(2).Infof("GPU Quantity must be positive. Current gpu spec: %+v", gpu) - return nil - } - - i := findContainer(pod) - if i < 0 { - glog.Warningf("not able to add GPU as Spark container was not found in pod %s", pod.Name) - return nil - } - - path := fmt.Sprintf("/spec/containers/%d/resources/limits", i) - var value interface{} - if len(pod.Spec.Containers[i].Resources.Limits) == 0 { - value = corev1.ResourceList{ - corev1.ResourceName(gpu.Name): *resource.NewQuantity(gpu.Quantity, resource.DecimalSI), - } - } else { - encoder := strings.NewReplacer("~", "~0", "/", "~1") - path += "/" + encoder.Replace(gpu.Name) - value = *resource.NewQuantity(gpu.Quantity, resource.DecimalSI) - } - return &patchOperation{Op: "add", Path: path, Value: value} -} - -func addHostNetwork(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var hostNetwork *bool - if util.IsDriverPod(pod) { - hostNetwork = app.Spec.Driver.HostNetwork - } - if util.IsExecutorPod(pod) { - hostNetwork = app.Spec.Executor.HostNetwork - } - - if hostNetwork == nil || *hostNetwork == false { - return nil - } - var ops []patchOperation - ops = append(ops, patchOperation{Op: "add", Path: "/spec/hostNetwork", Value: true}) - // For Pods with hostNetwork, explicitly set its DNS policy to “ClusterFirstWithHostNet” - // Detail: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy - ops = append(ops, patchOperation{Op: "add", Path: "/spec/dnsPolicy", Value: corev1.DNSClusterFirstWithHostNet}) - return ops -} - -func hasContainer(pod *corev1.Pod, container *corev1.Container) bool { - for _, c := range pod.Spec.Containers { - if container.Name == c.Name && container.Image == c.Image { - return true - } - } - return false -} - -func hasInitContainer(pod *corev1.Pod, container *corev1.Container) bool { - for _, c := range pod.Spec.InitContainers { - if container.Name == c.Name && container.Image == c.Image { - return true - } - } - return false -} - -func addTerminationGracePeriodSeconds(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - path := "/spec/terminationGracePeriodSeconds" - var gracePeriodSeconds *int64 - - if util.IsDriverPod(pod) { - gracePeriodSeconds = app.Spec.Driver.TerminationGracePeriodSeconds - } else if util.IsExecutorPod(pod) { - gracePeriodSeconds = app.Spec.Executor.TerminationGracePeriodSeconds - } - if gracePeriodSeconds == nil { - return nil - } - return &patchOperation{Op: "add", Path: path, Value: *gracePeriodSeconds} -} - -func addPodLifeCycleConfig(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var lifeCycle *corev1.Lifecycle - var containerName string - if util.IsDriverPod(pod) { - lifeCycle = app.Spec.Driver.Lifecycle - containerName = config.SparkDriverContainerName - } else if util.IsExecutorPod(pod) { - lifeCycle = app.Spec.Executor.Lifecycle - containerName = config.SparkExecutorContainerName - } - if lifeCycle == nil { - return nil - } - - i := 0 - // Find the driver container in the pod. - for ; i < len(pod.Spec.Containers); i++ { - if pod.Spec.Containers[i].Name == containerName { - break - } - } - if i == len(pod.Spec.Containers) { - glog.Warningf("Spark container %s not found in pod %s", containerName, pod.Name) - return nil - } - - path := fmt.Sprintf("/spec/containers/%d/lifecycle", i) - return &patchOperation{Op: "add", Path: path, Value: *lifeCycle} -} - -func findContainer(pod *corev1.Pod) int { - var candidateContainerNames []string - if util.IsDriverPod(pod) { - candidateContainerNames = append(candidateContainerNames, config.SparkDriverContainerName) - } else if util.IsExecutorPod(pod) { - // Spark 3.x changed the default executor container name so we need to include both. - candidateContainerNames = append(candidateContainerNames, config.SparkExecutorContainerName, config.Spark3DefaultExecutorContainerName) - } - - if len(candidateContainerNames) == 0 { - return -1 - } - - for i := 0; i < len(pod.Spec.Containers); i++ { - for _, name := range candidateContainerNames { - if pod.Spec.Containers[i].Name == name { - return i - } - } - } - return -1 -} - -func addHostAliases(pod *corev1.Pod, app *v1beta2.SparkApplication) []patchOperation { - var hostAliases []corev1.HostAlias - if util.IsDriverPod(pod) { - hostAliases = app.Spec.Driver.HostAliases - } else if util.IsExecutorPod(pod) { - hostAliases = app.Spec.Executor.HostAliases - } - - first := false - if len(pod.Spec.HostAliases) == 0 { - first = true - } - - var ops []patchOperation - if len(hostAliases) > 0 { - if first { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/hostAliases", Value: hostAliases}) - } else { - ops = append(ops, patchOperation{Op: "add", Path: "/spec/hostAliases/-", Value: hostAliases}) - } - } - return ops -} - -func addShareProcessNamespace(pod *corev1.Pod, app *v1beta2.SparkApplication) *patchOperation { - var shareProcessNamespace *bool - if util.IsDriverPod(pod) { - shareProcessNamespace = app.Spec.Driver.ShareProcessNamespace - } - if util.IsExecutorPod(pod) { - shareProcessNamespace = app.Spec.Executor.ShareProcessNamespace - } - - if shareProcessNamespace == nil || *shareProcessNamespace == false { - return nil - } - return &patchOperation{Op: "add", Path: "/spec/shareProcessNamespace", Value: *shareProcessNamespace} -} diff --git a/pkg/webhook/resourceusage/enforcer.go b/pkg/webhook/resourceusage/enforcer.go deleted file mode 100644 index 87e9bbce06..0000000000 --- a/pkg/webhook/resourceusage/enforcer.go +++ /dev/null @@ -1,95 +0,0 @@ -package resourceusage - -import ( - "fmt" - "github.com/golang/glog" - so "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/informers" - corev1informers "k8s.io/client-go/informers/core/v1" - "k8s.io/client-go/tools/cache" -) - -type ResourceQuotaEnforcer struct { - watcher ResourceUsageWatcher - resourceQuotaInformer corev1informers.ResourceQuotaInformer -} - -func NewResourceQuotaEnforcer(crdInformerFactory crdinformers.SharedInformerFactory, coreV1InformerFactory informers.SharedInformerFactory) ResourceQuotaEnforcer { - resourceUsageWatcher := newResourceUsageWatcher(crdInformerFactory, coreV1InformerFactory) - informer := coreV1InformerFactory.Core().V1().ResourceQuotas() - informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{}) - return ResourceQuotaEnforcer{ - watcher: resourceUsageWatcher, - resourceQuotaInformer: informer, - } -} - -func (r ResourceQuotaEnforcer) WaitForCacheSync(stopCh <-chan struct{}) error { - if !cache.WaitForCacheSync(stopCh, func() bool { - return r.resourceQuotaInformer.Informer().HasSynced() - }) { - return fmt.Errorf("cache sync canceled") - } - return nil -} - -func (r *ResourceQuotaEnforcer) admitResource(kind, namespace, name string, requestedResources ResourceList) (string, error) { - glog.V(2).Infof("Processing admission request for %s %s/%s, requesting: %s", kind, namespace, name, requestedResources) - resourceQuotas, err := r.resourceQuotaInformer.Lister().ResourceQuotas(namespace).List(labels.Everything()) - if err != nil { - return "", err - } - if (requestedResources.cpu.IsZero() && requestedResources.memory.IsZero()) || len(resourceQuotas) == 0 { - return "", nil - } - - currentNamespaceUsage, currentApplicationUsage := r.watcher.GetCurrentResourceUsageWithApplication(namespace, kind, name) - - for _, quota := range resourceQuotas { - // Scope selectors not currently supported, ignore any ResourceQuota that does not match everything. - if quota.Spec.ScopeSelector != nil || len(quota.Spec.Scopes) > 0 { - continue - } - - // If an existing application has increased its usage, check it against the quota again. If its usage hasn't increased, always allow it. - if requestedResources.cpu.Cmp(currentApplicationUsage.cpu) == 1 { - if cpuLimit, present := quota.Spec.Hard[corev1.ResourceCPU]; present { - availableCpu := cpuLimit - availableCpu.Sub(currentNamespaceUsage.cpu) - if requestedResources.cpu.Cmp(availableCpu) == 1 { - return fmt.Sprintf("%s %s/%s requests too many cores (%.3f cores requested, %.3f available).", kind, namespace, name, float64(requestedResources.cpu.MilliValue())/1000.0, float64(availableCpu.MilliValue())/1000.0), nil - } - } - } - - if requestedResources.memory.Cmp(currentApplicationUsage.memory) == 1 { - if memoryLimit, present := quota.Spec.Hard[corev1.ResourceMemory]; present { - availableMemory := memoryLimit - availableMemory.Sub(currentNamespaceUsage.memory) - if requestedResources.memory.Cmp(availableMemory) == 1 { - return fmt.Sprintf("%s %s/%s requests too much memory (%dMi requested, %dMi available).", kind, namespace, name, requestedResources.memory.Value()/(1<<20), availableMemory.Value()/(1<<20)), nil - } - } - } - } - return "", nil -} - -func (r *ResourceQuotaEnforcer) AdmitSparkApplication(app so.SparkApplication) (string, error) { - resourceUsage, err := sparkApplicationResourceUsage(app) - if err != nil { - return "", err - } - return r.admitResource(KindSparkApplication, app.ObjectMeta.Namespace, app.ObjectMeta.Name, resourceUsage) -} - -func (r *ResourceQuotaEnforcer) AdmitScheduledSparkApplication(app so.ScheduledSparkApplication) (string, error) { - resourceUsage, err := scheduledSparkApplicationResourceUsage(app) - if err != nil { - return "", err - } - return r.admitResource(KindScheduledSparkApplication, app.ObjectMeta.Namespace, app.ObjectMeta.Name, resourceUsage) -} diff --git a/pkg/webhook/resourceusage/handlers.go b/pkg/webhook/resourceusage/handlers.go deleted file mode 100644 index c4d86fd765..0000000000 --- a/pkg/webhook/resourceusage/handlers.go +++ /dev/null @@ -1,119 +0,0 @@ -package resourceusage - -import ( - so "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - - "github.com/golang/glog" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/cache" -) - -func (r *ResourceUsageWatcher) onPodAdded(obj interface{}) { - pod := obj.(*corev1.Pod) - // A pod launched by the Spark operator will already be accounted for by the CRD informer callback - if !launchedBySparkOperator(pod.ObjectMeta) { - r.setResources("Pod", namespaceOrDefault(pod.ObjectMeta), pod.ObjectMeta.Name, podResourceUsage(pod), r.usageByNamespacePod) - } -} - -func (r *ResourceUsageWatcher) onPodUpdated(oldObj, newObj interface{}) { - newPod := newObj.(*corev1.Pod) - if !launchedBySparkOperator(newPod.ObjectMeta) { - if newPod.Status.Phase == corev1.PodFailed || newPod.Status.Phase == corev1.PodSucceeded { - r.deleteResources("Pod", namespaceOrDefault(newPod.ObjectMeta), newPod.ObjectMeta.Name, r.usageByNamespacePod) - } else { - r.setResources("Pod", namespaceOrDefault(newPod.ObjectMeta), newPod.ObjectMeta.Name, podResourceUsage(newPod), r.usageByNamespacePod) - } - } -} - -func (r *ResourceUsageWatcher) onPodDeleted(obj interface{}) { - var pod *corev1.Pod - switch o := obj.(type) { - case *corev1.Pod: - pod = o - case cache.DeletedFinalStateUnknown: - pod = o.Obj.(*corev1.Pod) - default: - return - } - if !launchedBySparkOperator(pod.ObjectMeta) { - r.deleteResources("Pod", namespaceOrDefault(pod.ObjectMeta), pod.ObjectMeta.Name, r.usageByNamespacePod) - } -} - -func (r *ResourceUsageWatcher) onSparkApplicationAdded(obj interface{}) { - app := obj.(*so.SparkApplication) - namespace := namespaceOrDefault(app.ObjectMeta) - resources, err := sparkApplicationResourceUsage(*app) - if err != nil { - glog.Errorf("failed to determine resource usage of SparkApplication %s/%s: %v", namespace, app.ObjectMeta.Name, err) - } else { - r.setResources(KindSparkApplication, namespace, app.ObjectMeta.Name, resources, r.usageByNamespaceApplication) - } -} - -func (r *ResourceUsageWatcher) onSparkApplicationUpdated(oldObj, newObj interface{}) { - oldApp := oldObj.(*so.SparkApplication) - newApp := newObj.(*so.SparkApplication) - if oldApp.ResourceVersion == newApp.ResourceVersion { - return - } - namespace := namespaceOrDefault(newApp.ObjectMeta) - newResources, err := sparkApplicationResourceUsage(*newApp) - if err != nil { - glog.Errorf("failed to determine resource usage of SparkApplication %s/%s: %v", namespace, newApp.ObjectMeta.Name, err) - } else { - r.setResources(KindSparkApplication, namespace, newApp.ObjectMeta.Name, newResources, r.usageByNamespaceApplication) - } -} - -func (r *ResourceUsageWatcher) onSparkApplicationDeleted(obj interface{}) { - var app *so.SparkApplication - switch o := obj.(type) { - case *so.SparkApplication: - app = o - case cache.DeletedFinalStateUnknown: - app = o.Obj.(*so.SparkApplication) - default: - return - } - namespace := namespaceOrDefault(app.ObjectMeta) - r.deleteResources(KindSparkApplication, namespace, app.ObjectMeta.Name, r.usageByNamespaceApplication) -} - -func (r *ResourceUsageWatcher) onScheduledSparkApplicationAdded(obj interface{}) { - app := obj.(*so.ScheduledSparkApplication) - namespace := namespaceOrDefault(app.ObjectMeta) - resources, err := scheduledSparkApplicationResourceUsage(*app) - if err != nil { - glog.Errorf("failed to determine resource usage of ScheduledSparkApplication %s/%s: %v", namespace, app.ObjectMeta.Name, err) - } else { - r.setResources(KindScheduledSparkApplication, namespace, app.ObjectMeta.Name, resources, r.usageByNamespaceScheduledApplication) - } -} - -func (r *ResourceUsageWatcher) onScheduledSparkApplicationUpdated(oldObj, newObj interface{}) { - newApp := oldObj.(*so.ScheduledSparkApplication) - namespace := namespaceOrDefault(newApp.ObjectMeta) - newResources, err := scheduledSparkApplicationResourceUsage(*newApp) - if err != nil { - glog.Errorf("failed to determine resource usage of ScheduledSparkApplication %s/%s: %v", namespace, newApp.ObjectMeta.Name, err) - } else { - r.setResources(KindSparkApplication, namespace, newApp.ObjectMeta.Name, newResources, r.usageByNamespaceScheduledApplication) - } -} - -func (r *ResourceUsageWatcher) onScheduledSparkApplicationDeleted(obj interface{}) { - var app *so.ScheduledSparkApplication - switch o := obj.(type) { - case *so.ScheduledSparkApplication: - app = o - case cache.DeletedFinalStateUnknown: - app = o.Obj.(*so.ScheduledSparkApplication) - default: - return - } - namespace := namespaceOrDefault(app.ObjectMeta) - r.deleteResources(KindScheduledSparkApplication, namespace, app.ObjectMeta.Name, r.usageByNamespaceScheduledApplication) -} diff --git a/pkg/webhook/resourceusage/util.go b/pkg/webhook/resourceusage/util.go deleted file mode 100644 index d256f3a733..0000000000 --- a/pkg/webhook/resourceusage/util.go +++ /dev/null @@ -1,241 +0,0 @@ -package resourceusage - -import ( - "fmt" - so "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "math" - "regexp" - "strconv" - "strings" -) - -// ...are you serious, Go? -func max(x, y int64) int64 { - if x > y { - return x - } - return y -} - -const ( - // https://spark.apache.org/docs/latest/configuration.html - defaultCpuMillicores = 1000 - defaultMemoryBytes = 1 << 30 // 1Gi - defaultMemoryOverhead = 0.1 - - // https://github.com/apache/spark/blob/c4bbfd177b4e7cb46f47b39df9fd71d2d9a12c6d/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Constants.scala#L85 - minMemoryOverhead = 384 * (1 << 20) // 384Mi - nonJvmDefaultMemoryOverhead = 0.4 -) - -func namespaceOrDefault(meta metav1.ObjectMeta) string { - namespace := meta.Namespace - if namespace == "" { - namespace = "default" - } - return namespace -} - -func launchedBySparkOperator(meta metav1.ObjectMeta) bool { - val, present := meta.Labels[config.LaunchedBySparkOperatorLabel] - return present && val == "true" -} - -func resourcesRequiredToSchedule(resourceRequirements corev1.ResourceRequirements) (cpu int64, memoryBytes int64) { - if coresRequest, present := resourceRequirements.Requests[corev1.ResourceCPU]; present { - cpu = coresRequest.MilliValue() - } else if coresLimit, present := resourceRequirements.Limits[corev1.ResourceCPU]; present { - cpu = coresLimit.MilliValue() - } - if memoryRequest, present := resourceRequirements.Requests[corev1.ResourceMemory]; present { - memoryBytes = memoryRequest.Value() - } else if memoryLimit, present := resourceRequirements.Limits[corev1.ResourceMemory]; present { - memoryBytes = memoryLimit.Value() - } - return cpu, memoryBytes -} - -func coresRequiredForSparkPod(spec so.SparkPodSpec, instances int64) (int64, error) { - var cpu int64 - if spec.Cores != nil { - cpu = int64(*spec.Cores) * 1000 - } else { - cpu = defaultCpuMillicores - } - return cpu * instances, nil -} - -var javaStringSuffixes = map[string]int64{ - "b": 1, - "kb": 1 << 10, - "k": 1 << 10, - "mb": 1 << 20, - "m": 1 << 20, - "gb": 1 << 30, - "g": 1 << 30, - "tb": 1 << 40, - "t": 1 << 40, - "pb": 1 << 50, - "p": 1 << 50, -} - -var javaStringPattern = regexp.MustCompile(`([0-9]+)([a-z]+)?`) -var javaFractionStringPattern = regexp.MustCompile(`([0-9]+\.[0-9]+)([a-z]+)?`) - -// Logic copied from https://github.com/apache/spark/blob/5264164a67df498b73facae207eda12ee133be7d/common/network-common/src/main/java/org/apache/spark/network/util/JavaUtils.java#L276 -func parseJavaMemoryString(str string) (int64, error) { - lower := strings.ToLower(str) - if matches := javaStringPattern.FindStringSubmatch(lower); matches != nil { - value, err := strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, err - } - suffix := matches[2] - if multiplier, present := javaStringSuffixes[suffix]; present { - return multiplier * value, nil - } - } else if matches = javaFractionStringPattern.FindStringSubmatch(lower); matches != nil { - value, err := strconv.ParseFloat(matches[1], 64) - if err != nil { - return 0, err - } - suffix := matches[2] - if multiplier, present := javaStringSuffixes[suffix]; present { - return int64(float64(multiplier) * value), nil - } - } - return 0, fmt.Errorf("could not parse string '%s' as a Java-style memory value. Examples: 100kb, 1.5mb, 1g", str) -} - -// Logic copied from https://github.com/apache/spark/blob/c4bbfd177b4e7cb46f47b39df9fd71d2d9a12c6d/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/BasicDriverFeatureStep.scala -func memoryRequiredForSparkPod(spec so.SparkPodSpec, memoryOverheadFactor *string, appType so.SparkApplicationType, replicas int64) (int64, error) { - var memoryBytes int64 - if spec.Memory != nil { - memory, err := parseJavaMemoryString(*spec.Memory) - if err != nil { - return 0, err - } - memoryBytes = memory - } else { - memoryBytes = defaultMemoryBytes - } - var memoryOverheadBytes int64 - if spec.MemoryOverhead != nil { - overhead, err := parseJavaMemoryString(*spec.MemoryOverhead) - if err != nil { - return 0, err - } - memoryOverheadBytes = overhead - } else { - var overheadFactor float64 - if memoryOverheadFactor != nil { - overheadFactorScope, err := strconv.ParseFloat(*memoryOverheadFactor, 64) - if err != nil { - return 0, err - } - overheadFactor = overheadFactorScope - } else { - if appType == so.JavaApplicationType { - overheadFactor = defaultMemoryOverhead - } else { - overheadFactor = nonJvmDefaultMemoryOverhead - } - } - memoryOverheadBytes = int64(math.Max(overheadFactor*float64(memoryBytes), minMemoryOverhead)) - } - return (memoryBytes + memoryOverheadBytes) * replicas, nil -} - -func resourceUsage(spec so.SparkApplicationSpec) (ResourceList, error) { - driverMemoryOverheadFactor := spec.MemoryOverheadFactor - executorMemoryOverheadFactor := spec.MemoryOverheadFactor - driverMemory, err := memoryRequiredForSparkPod(spec.Driver.SparkPodSpec, driverMemoryOverheadFactor, spec.Type, 1) - if err != nil { - return ResourceList{}, err - } - - var instances int64 = 1 - if spec.Executor.Instances != nil { - instances = int64(*spec.Executor.Instances) - } - executorMemory, err := memoryRequiredForSparkPod(spec.Executor.SparkPodSpec, executorMemoryOverheadFactor, spec.Type, instances) - if err != nil { - return ResourceList{}, err - } - - driverCores, err := coresRequiredForSparkPod(spec.Driver.SparkPodSpec, 1) - if err != nil { - return ResourceList{}, err - } - - executorCores, err := coresRequiredForSparkPod(spec.Executor.SparkPodSpec, instances) - if err != nil { - return ResourceList{}, err - } - - return ResourceList{ - cpu: *resource.NewMilliQuantity(driverCores+executorCores, resource.DecimalSI), - memory: *resource.NewQuantity(driverMemory+executorMemory, resource.DecimalSI), - }, nil -} - -func sparkApplicationResourceUsage(sparkApp so.SparkApplication) (ResourceList, error) { - // A completed/failed SparkApplication consumes no resources - if !sparkApp.Status.TerminationTime.IsZero() || sparkApp.Status.AppState.State == so.FailedState || sparkApp.Status.AppState.State == so.CompletedState { - return ResourceList{}, nil - } - return resourceUsage(sparkApp.Spec) -} - -func scheduledSparkApplicationResourceUsage(sparkApp so.ScheduledSparkApplication) (ResourceList, error) { - // Failed validation, will consume no resources - if sparkApp.Status.ScheduleState == so.FailedValidationState { - return ResourceList{}, nil - } - return resourceUsage(sparkApp.Spec.Template) -} - -func podResourceUsage(pod *corev1.Pod) ResourceList { - spec := pod.Spec - var initCores int64 - var initMemoryBytes int64 - completed := make(map[string]struct{}) - - for _, containerStatus := range pod.Status.InitContainerStatuses { - if containerStatus.State.Terminated != nil { - completed[containerStatus.Name] = struct{}{} - } - } - for _, containerStatus := range pod.Status.ContainerStatuses { - if containerStatus.State.Terminated != nil { - completed[containerStatus.Name] = struct{}{} - } - } - - for _, container := range spec.InitContainers { - if _, present := completed[container.Name]; !present { - c, m := resourcesRequiredToSchedule(container.Resources) - initCores = max(c, initCores) - initMemoryBytes = max(m, initMemoryBytes) - } - } - var cores int64 - var memoryBytes int64 - for _, container := range spec.Containers { - if _, present := completed[container.Name]; !present { - c, m := resourcesRequiredToSchedule(container.Resources) - cores += c - memoryBytes += m - } - } - cores = max(initCores, cores) - memoryBytes = max(initMemoryBytes, memoryBytes) - return ResourceList{ - cpu: *resource.NewMilliQuantity(cores, resource.DecimalSI), - memory: *resource.NewQuantity(memoryBytes, resource.DecimalSI), - } -} diff --git a/pkg/webhook/resourceusage/util_test.go b/pkg/webhook/resourceusage/util_test.go deleted file mode 100644 index c610136e05..0000000000 --- a/pkg/webhook/resourceusage/util_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package resourceusage - -import ( - "testing" -) - -func assertMemory(memoryString string, expectedBytes int64, t *testing.T) { - m, err := parseJavaMemoryString(memoryString) - if err != nil { - t.Error(err) - return - } - if m != expectedBytes { - t.Errorf("%s: expected %v bytes, got %v bytes", memoryString, expectedBytes, m) - return - } -} - -func TestJavaMemoryString(t *testing.T) { - assertMemory("1b", 1, t) - assertMemory("100k", 100*1024, t) - assertMemory("1gb", 1024*1024*1024, t) - assertMemory("10TB", 10*1024*1024*1024*1024, t) - assertMemory("10PB", 10*1024*1024*1024*1024*1024, t) -} diff --git a/pkg/webhook/resourceusage/watcher.go b/pkg/webhook/resourceusage/watcher.go deleted file mode 100644 index 49395bf11a..0000000000 --- a/pkg/webhook/resourceusage/watcher.go +++ /dev/null @@ -1,157 +0,0 @@ -package resourceusage - -import ( - "fmt" - "sync" - - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - - "github.com/golang/glog" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/client-go/informers" - corev1informers "k8s.io/client-go/informers/core/v1" - "k8s.io/client-go/tools/cache" -) - -type ResourceUsageWatcher struct { - currentUsageLock *sync.RWMutex - currentUsageByNamespace map[string]*ResourceList - usageByNamespacePod map[string]map[string]*ResourceList - usageByNamespaceScheduledApplication map[string]map[string]*ResourceList - usageByNamespaceApplication map[string]map[string]*ResourceList - crdInformerFactory crdinformers.SharedInformerFactory - coreV1InformerFactory informers.SharedInformerFactory - podInformer corev1informers.PodInformer -} - -// more convenient replacement for corev1.ResourceList -type ResourceList struct { - cpu resource.Quantity - memory resource.Quantity -} - -const ( - KindSparkApplication = "SparkApplication" - KindScheduledSparkApplication = "ScheduledSparkApplication" -) - -func (r ResourceList) String() string { - return fmt.Sprintf("cpu: %v mcpu, memory %v bytes", r.cpu.MilliValue(), r.memory.Value()) -} - -func newResourceUsageWatcher(crdInformerFactory crdinformers.SharedInformerFactory, coreV1InformerFactory informers.SharedInformerFactory) ResourceUsageWatcher { - glog.V(2).Infof("Creating new resource usage watcher") - r := ResourceUsageWatcher{ - crdInformerFactory: crdInformerFactory, - currentUsageLock: &sync.RWMutex{}, - coreV1InformerFactory: coreV1InformerFactory, - currentUsageByNamespace: make(map[string]*ResourceList), - usageByNamespacePod: make(map[string]map[string]*ResourceList), - usageByNamespaceScheduledApplication: make(map[string]map[string]*ResourceList), - usageByNamespaceApplication: make(map[string]map[string]*ResourceList), - } - // Note: Events for each handler are processed serially, so no coordination is needed between - // the different callbacks. Coordination is still needed around updating the shared state. - sparkApplicationInformer := r.crdInformerFactory.Sparkoperator().V1beta2().SparkApplications() - sparkApplicationInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: r.onSparkApplicationAdded, - UpdateFunc: r.onSparkApplicationUpdated, - DeleteFunc: r.onSparkApplicationDeleted, - }) - scheduledSparkApplicationInformer := r.crdInformerFactory.Sparkoperator().V1beta2().ScheduledSparkApplications() - scheduledSparkApplicationInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: r.onScheduledSparkApplicationAdded, - UpdateFunc: r.onScheduledSparkApplicationUpdated, - DeleteFunc: r.onScheduledSparkApplicationDeleted, - }) - r.podInformer = r.coreV1InformerFactory.Core().V1().Pods() - r.podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: r.onPodAdded, - UpdateFunc: r.onPodUpdated, - DeleteFunc: r.onPodDeleted, - }) - return r -} - -func (r *ResourceUsageWatcher) GetCurrentResourceUsage(namespace string) ResourceList { - r.currentUsageLock.RLock() - defer r.currentUsageLock.RUnlock() - if resourceUsageInternal, present := r.currentUsageByNamespace[namespace]; present { - return ResourceList{ - cpu: resourceUsageInternal.cpu, - memory: resourceUsageInternal.memory, - } - } - return ResourceList{} -} - -func (r *ResourceUsageWatcher) GetCurrentResourceUsageWithApplication(namespace, kind, name string) (namespaceResources, applicationResources ResourceList) { - r.currentUsageLock.RLock() - defer r.currentUsageLock.RUnlock() - if resourceUsageInternal, present := r.currentUsageByNamespace[namespace]; present { - var applicationResources ResourceList - var namespaceMap map[string]map[string]*ResourceList - switch kind { - case KindSparkApplication: - namespaceMap = r.usageByNamespaceApplication - case KindScheduledSparkApplication: - namespaceMap = r.usageByNamespaceScheduledApplication - } - if applicationMap, present := namespaceMap[namespace]; present { - if ar, present := applicationMap[name]; present { - applicationResources = *ar - } - } - currentUsage := *resourceUsageInternal // Creates a copy - currentUsage.cpu.Sub(applicationResources.cpu) - currentUsage.memory.Sub(applicationResources.memory) - return currentUsage, applicationResources - } - return ResourceList{}, ResourceList{} -} - -func (r *ResourceUsageWatcher) unsafeSetResources(namespace, name string, resources ResourceList, resourceMap map[string]map[string]*ResourceList) { - if _, present := resourceMap[namespace]; !present { - resourceMap[namespace] = make(map[string]*ResourceList) - } - // Clear any resource usage currently stored for this object - r.unsafeDeleteResources(namespace, name, resourceMap) - resourceMap[namespace][name] = &resources - if current, present := r.currentUsageByNamespace[namespace]; present { - current.cpu.Add(resources.cpu) - current.memory.Add(resources.memory) - } else { - r.currentUsageByNamespace[namespace] = &ResourceList{ - cpu: resources.cpu, - memory: resources.memory, - } - } -} - -func (r *ResourceUsageWatcher) unsafeDeleteResources(namespace, name string, resourceMap map[string]map[string]*ResourceList) { - if namespaceMap, present := resourceMap[namespace]; present { - if resources, present := namespaceMap[name]; present { - delete(resourceMap[namespace], name) - if current, present := r.currentUsageByNamespace[namespace]; present { - current.cpu.Sub(resources.cpu) - current.memory.Sub(resources.memory) - } - } - } -} - -func (r *ResourceUsageWatcher) setResources(typeName, namespace, name string, resources ResourceList, resourceMap map[string]map[string]*ResourceList) { - glog.V(3).Infof("Updating object %s %s/%s with resources %v", typeName, namespace, name, resources) - r.currentUsageLock.Lock() - r.unsafeSetResources(namespace, name, resources, resourceMap) - r.currentUsageLock.Unlock() - glog.V(3).Infof("Current resources for namespace %s: %v", namespace, r.currentUsageByNamespace[namespace]) -} - -func (r *ResourceUsageWatcher) deleteResources(typeName, namespace, name string, resourceMap map[string]map[string]*ResourceList) { - glog.V(3).Infof("Deleting resources from object %s/%s", namespace, name) - r.currentUsageLock.Lock() - r.unsafeDeleteResources(namespace, name, resourceMap) - r.currentUsageLock.Unlock() - glog.V(3).Infof("Current resources for namespace %s: %v", namespace, r.currentUsageByNamespace[namespace]) -} diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go deleted file mode 100644 index 2984e4641a..0000000000 --- a/pkg/webhook/webhook.go +++ /dev/null @@ -1,657 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "io" - "net/http" - "strings" - "time" - - "github.com/golang/glog" - admissionv1 "k8s.io/api/admission/v1" - arv1 "k8s.io/api/admissionregistration/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - - crdapi "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io" - crdv1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - crdlisters "github.com/kubeflow/spark-operator/pkg/client/listers/sparkoperator.k8s.io/v1beta2" - "github.com/kubeflow/spark-operator/pkg/config" - "github.com/kubeflow/spark-operator/pkg/util" - "github.com/kubeflow/spark-operator/pkg/webhook/resourceusage" -) - -const ( - webhookName = "webhook.sparkoperator.k8s.io" - quotaWebhookName = "quotaenforcer.sparkoperator.k8s.io" -) - -var podResource = metav1.GroupVersionResource{ - Group: corev1.SchemeGroupVersion.Group, - Version: corev1.SchemeGroupVersion.Version, - Resource: "pods", -} - -var sparkApplicationResource = metav1.GroupVersionResource{ - Group: crdapi.GroupName, - Version: crdv1beta2.Version, - Resource: "sparkapplications", -} - -var scheduledSparkApplicationResource = metav1.GroupVersionResource{ - Group: crdapi.GroupName, - Version: crdv1beta2.Version, - Resource: "scheduledsparkapplications", -} - -// WebHook encapsulates things needed to run the webhook. -type WebHook struct { - clientset kubernetes.Interface - informerFactory crinformers.SharedInformerFactory - lister crdlisters.SparkApplicationLister - server *http.Server - certProvider *certProvider - serviceRef *arv1.ServiceReference - failurePolicy arv1.FailurePolicyType - selector *metav1.LabelSelector - objectSelector *metav1.LabelSelector - sparkJobNamespace string - deregisterOnExit bool - enableResourceQuotaEnforcement bool - resourceQuotaEnforcer resourceusage.ResourceQuotaEnforcer - coreV1InformerFactory informers.SharedInformerFactory - timeoutSeconds *int32 -} - -// Configuration parsed from command-line flags -type webhookFlags struct { - webhookSecretName string - webhookSecretNamespace string - webhookServiceName string - webhookServiceNamespace string - webhookConfigName string - webhookPort int - webhookFailOnError bool - webhookNamespaceSelector string - webhookObjectSelector string -} - -var userConfig webhookFlags - -func init() { - flag.StringVar(&userConfig.webhookSecretName, "webhook-secret-name", "spark-operator-tls", "The name of the secret that contains the webhook server's TLS certificate and key.") - flag.StringVar(&userConfig.webhookSecretNamespace, "webhook-secret-namespace", "spark-operator", "The namespace of the secret that contains the webhook server's TLS certificate and key.") - flag.StringVar(&userConfig.webhookServiceName, "webhook-svc-name", "spark-webhook", "The name of the Service for the webhook server.") - flag.StringVar(&userConfig.webhookServiceNamespace, "webhook-svc-namespace", "spark-operator", "The namespace of the Service for the webhook server.") - flag.StringVar(&userConfig.webhookConfigName, "webhook-config-name", "spark-webhook-config", "The name of the MutatingWebhookConfiguration object to create.") - flag.IntVar(&userConfig.webhookPort, "webhook-port", 8080, "Service port of the webhook server.") - flag.BoolVar(&userConfig.webhookFailOnError, "webhook-fail-on-error", false, "Whether Kubernetes should reject requests when the webhook fails.") - flag.StringVar(&userConfig.webhookNamespaceSelector, "webhook-namespace-selector", "", "The webhook will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Required if webhook-fail-on-error is true.") - flag.StringVar(&userConfig.webhookObjectSelector, "webhook-object-selector", "", "The webhook will only operate on pods with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2).") -} - -// New creates a new WebHook instance. -func New( - clientset kubernetes.Interface, - informerFactory crinformers.SharedInformerFactory, - jobNamespace string, - deregisterOnExit bool, - enableResourceQuotaEnforcement bool, - coreV1InformerFactory informers.SharedInformerFactory, - webhookTimeout *int, -) (*WebHook, error) { - certProvider, err := NewCertProvider( - userConfig.webhookServiceName, - userConfig.webhookServiceNamespace, - ) - if err != nil { - return nil, fmt.Errorf("failed to create certificate provider: %v", err) - } - - path := "/webhook" - serviceRef := &arv1.ServiceReference{ - Namespace: userConfig.webhookServiceNamespace, - Name: userConfig.webhookServiceName, - Path: &path, - } - - hook := &WebHook{ - clientset: clientset, - informerFactory: informerFactory, - lister: informerFactory.Sparkoperator().V1beta2().SparkApplications().Lister(), - certProvider: certProvider, - serviceRef: serviceRef, - sparkJobNamespace: jobNamespace, - deregisterOnExit: deregisterOnExit, - failurePolicy: arv1.Ignore, - coreV1InformerFactory: coreV1InformerFactory, - enableResourceQuotaEnforcement: enableResourceQuotaEnforcement, - timeoutSeconds: func(b int32) *int32 { return &b }(int32(*webhookTimeout)), - } - - if userConfig.webhookFailOnError { - hook.failurePolicy = arv1.Fail - } - - if userConfig.webhookNamespaceSelector == "" { - if userConfig.webhookFailOnError { - return nil, fmt.Errorf("webhook-namespace-selector must be set when webhook-fail-on-error is true") - } - } else { - selector, err := parseSelector(userConfig.webhookNamespaceSelector) - if err != nil { - return nil, err - } - hook.selector = selector - } - - if userConfig.webhookObjectSelector != "" { - selector, err := metav1.ParseToLabelSelector(userConfig.webhookObjectSelector) - if err != nil { - return nil, err - } - hook.objectSelector = selector - } - - if enableResourceQuotaEnforcement { - hook.resourceQuotaEnforcer = resourceusage.NewResourceQuotaEnforcer(informerFactory, coreV1InformerFactory) - } - - mux := http.NewServeMux() - mux.HandleFunc(path, hook.serve) - hook.server = &http.Server{ - Addr: fmt.Sprintf(":%d", userConfig.webhookPort), - Handler: mux, - } - - return hook, nil -} - -func parseSelector(selectorArg string) (*metav1.LabelSelector, error) { - selector := &metav1.LabelSelector{ - MatchLabels: make(map[string]string), - } - - selectorStrs := strings.Split(selectorArg, ",") - for _, selectorStr := range selectorStrs { - kv := strings.SplitN(selectorStr, "=", 2) - if len(kv) != 2 || kv[0] == "" || kv[1] == "" { - return nil, fmt.Errorf("webhook selector must be in the form key1=value1,key2=value2") - } - selector.MatchLabels[kv[0]] = kv[1] - } - - return selector, nil -} - -// Start starts the admission webhook server and registers itself to the API server. -func (wh *WebHook) Start(stopCh <-chan struct{}) error { - wh.updateSecret(userConfig.webhookSecretName, userConfig.webhookSecretNamespace) - - tlsCfg, err := wh.certProvider.TLSConfig() - if err != nil { - return fmt.Errorf("failed to get TLS config: %v", err) - } - wh.server.TLSConfig = tlsCfg - - if wh.enableResourceQuotaEnforcement { - err := wh.resourceQuotaEnforcer.WaitForCacheSync(stopCh) - if err != nil { - return err - } - } - - go func() { - glog.Info("Starting the Spark admission webhook server") - if err := wh.server.ListenAndServeTLS("", ""); err != nil && err != http.ErrServerClosed { - glog.Errorf("error while serving the Spark admission webhook: %v\n", err) - } - }() - - return wh.selfRegistration(userConfig.webhookConfigName) -} - -// Stop deregisters itself with the API server and stops the admission webhook server. -func (wh *WebHook) Stop() error { - // Do not deregister if strict error handling is enabled; pod deletions are common, and we - // don't want to create windows where pods can be created without being subject to the webhook. - if wh.failurePolicy != arv1.Fail { - if err := wh.selfDeregistration(userConfig.webhookConfigName); err != nil { - return err - } - glog.Infof("Webhook %s deregistered", userConfig.webhookConfigName) - } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - glog.Info("Stopping the Spark pod admission webhook server") - return wh.server.Shutdown(ctx) -} - -func (wh *WebHook) serve(w http.ResponseWriter, r *http.Request) { - glog.V(2).Info("Serving admission request") - var body []byte - if r.Body != nil { - data, err := io.ReadAll(r.Body) - if err != nil { - internalError(w, fmt.Errorf("failed to read the request body")) - return - } - body = data - } - - if len(body) == 0 { - denyRequest(w, "empty request body", http.StatusBadRequest) - return - } - - contentType := r.Header.Get("Content-Type") - if contentType != "application/json" { - denyRequest(w, "invalid Content-Type, expected `application/json`", http.StatusUnsupportedMediaType) - return - } - - review := &admissionv1.AdmissionReview{} - deserializer := codecs.UniversalDeserializer() - if _, _, err := deserializer.Decode(body, nil, review); err != nil { - internalError(w, err) - return - } - var whErr error - var reviewResponse *admissionv1.AdmissionResponse - switch review.Request.Resource { - case podResource: - reviewResponse, whErr = mutatePods(review, wh.lister, wh.sparkJobNamespace) - case sparkApplicationResource: - if !wh.enableResourceQuotaEnforcement { - unexpectedResourceType(w, review.Request.Resource.String()) - return - } - reviewResponse, whErr = admitSparkApplications(review, wh.resourceQuotaEnforcer) - case scheduledSparkApplicationResource: - if !wh.enableResourceQuotaEnforcement { - unexpectedResourceType(w, review.Request.Resource.String()) - return - } - reviewResponse, whErr = admitScheduledSparkApplications(review, wh.resourceQuotaEnforcer) - default: - unexpectedResourceType(w, review.Request.Resource.String()) - return - } - if whErr != nil { - internalError(w, whErr) - return - } - - response := admissionv1.AdmissionReview{ - TypeMeta: metav1.TypeMeta{APIVersion: "admission.k8s.io/v1", Kind: "AdmissionReview"}, - Response: reviewResponse, - } - - if reviewResponse != nil { - if review.Request != nil { - response.Response.UID = review.Request.UID - } - } - - resp, err := json.Marshal(response) - if err != nil { - internalError(w, err) - return - } - if _, err := w.Write(resp); err != nil { - internalError(w, err) - } -} - -func (wh *WebHook) updateSecret(name, namespace string) error { - secret, err := wh.clientset.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("failed to get webhook secret: %v", err) - } - - caKey, err := wh.certProvider.CAKey() - if err != nil { - return fmt.Errorf("failed to get CA key: %v", err) - } - - caCert, err := wh.certProvider.CACert() - if err != nil { - return fmt.Errorf("failed to get CA cert: %v", err) - } - - serverKey, err := wh.certProvider.ServerKey() - if err != nil { - return fmt.Errorf("failed to get server key: %v", err) - } - - serverCert, err := wh.certProvider.ServerCert() - if err != nil { - return fmt.Errorf("failed to get server cert: %v", err) - } - - newSecret := corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Data: map[string][]byte{ - "ca-key.pem": caKey, - "ca-cert.pem": caCert, - "server-key.pem": serverKey, - "server-cert.pem": serverCert, - }, - } - - if !equality.Semantic.DeepEqual(newSecret, secret) { - secret.Data = newSecret.Data - _, err := wh.clientset.CoreV1().Secrets(namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("failed to update webhook secret: %v", err) - } - } - - glog.Infof("Updated webhook secret %s/%s", namespace, name) - return nil -} - -func unexpectedResourceType(w http.ResponseWriter, kind string) { - denyRequest(w, fmt.Sprintf("unexpected resource type: %v", kind), http.StatusUnsupportedMediaType) -} - -func internalError(w http.ResponseWriter, err error) { - glog.Errorf("internal error: %v", err) - denyRequest(w, err.Error(), 500) -} - -func denyRequest(w http.ResponseWriter, reason string, code int) { - response := &admissionv1.AdmissionReview{ - Response: &admissionv1.AdmissionResponse{ - Allowed: false, - Result: &metav1.Status{ - Code: int32(code), - Message: reason, - }, - }, - } - resp, err := json.Marshal(response) - if err != nil { - glog.Error(err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.WriteHeader(code) - _, err = w.Write(resp) - if err != nil { - glog.Errorf("failed to write response body: %v", err) - } -} - -func (wh *WebHook) selfRegistration(webhookConfigName string) error { - caBundle, err := wh.certProvider.CACert() - if err != nil { - return fmt.Errorf("failed to get CA certificate: %v", err) - } - - mwcClient := wh.clientset.AdmissionregistrationV1().MutatingWebhookConfigurations() - vwcClient := wh.clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations() - - mutatingRules := []arv1.RuleWithOperations{ - { - Operations: []arv1.OperationType{arv1.Create}, - Rule: arv1.Rule{ - APIGroups: []string{""}, - APIVersions: []string{"v1"}, - Resources: []string{"pods"}, - }, - }, - } - - validatingRules := []arv1.RuleWithOperations{ - { - Operations: []arv1.OperationType{arv1.Create, arv1.Update}, - Rule: arv1.Rule{ - APIGroups: []string{crdapi.GroupName}, - APIVersions: []string{crdv1beta2.Version}, - Resources: []string{sparkApplicationResource.Resource, scheduledSparkApplicationResource.Resource}, - }, - }, - } - - sideEffect := arv1.SideEffectClassNoneOnDryRun - - mutatingWebhook := arv1.MutatingWebhook{ - Name: webhookName, - Rules: mutatingRules, - ClientConfig: arv1.WebhookClientConfig{ - Service: wh.serviceRef, - CABundle: caBundle, - }, - FailurePolicy: &wh.failurePolicy, - NamespaceSelector: wh.selector, - ObjectSelector: wh.objectSelector, - TimeoutSeconds: wh.timeoutSeconds, - SideEffects: &sideEffect, - AdmissionReviewVersions: []string{"v1"}, - } - - validatingWebhook := arv1.ValidatingWebhook{ - Name: quotaWebhookName, - Rules: validatingRules, - ClientConfig: arv1.WebhookClientConfig{ - Service: wh.serviceRef, - CABundle: caBundle, - }, - FailurePolicy: &wh.failurePolicy, - NamespaceSelector: wh.selector, - ObjectSelector: wh.objectSelector, - TimeoutSeconds: wh.timeoutSeconds, - SideEffects: &sideEffect, - AdmissionReviewVersions: []string{"v1"}, - } - - mutatingWebhooks := []arv1.MutatingWebhook{mutatingWebhook} - validatingWebhooks := []arv1.ValidatingWebhook{validatingWebhook} - - mutatingExisting, mutatingGetErr := mwcClient.Get(context.TODO(), webhookConfigName, metav1.GetOptions{}) - if mutatingGetErr != nil { - if !errors.IsNotFound(mutatingGetErr) { - return mutatingGetErr - } - // Create case. - glog.Info("Creating a MutatingWebhookConfiguration for the Spark pod admission webhook") - webhookConfig := &arv1.MutatingWebhookConfiguration{ - ObjectMeta: metav1.ObjectMeta{ - Name: webhookConfigName, - }, - Webhooks: mutatingWebhooks, - } - if _, err := mwcClient.Create(context.TODO(), webhookConfig, metav1.CreateOptions{}); err != nil { - return err - } - } else { - // Update case. - glog.Info("Updating existing MutatingWebhookConfiguration for the Spark pod admission webhook") - if !equality.Semantic.DeepEqual(mutatingWebhooks, mutatingExisting.Webhooks) { - mutatingExisting.Webhooks = mutatingWebhooks - if _, err := mwcClient.Update(context.TODO(), mutatingExisting, metav1.UpdateOptions{}); err != nil { - return err - } - } - } - - if wh.enableResourceQuotaEnforcement { - validatingExisting, validatingGetErr := vwcClient.Get(context.TODO(), webhookConfigName, metav1.GetOptions{}) - if validatingGetErr != nil { - if !errors.IsNotFound(validatingGetErr) { - return validatingGetErr - } - // Create case. - glog.Info("Creating a ValidatingWebhookConfiguration for the SparkApplication resource quota enforcement webhook") - webhookConfig := &arv1.ValidatingWebhookConfiguration{ - ObjectMeta: metav1.ObjectMeta{ - Name: webhookConfigName, - }, - Webhooks: validatingWebhooks, - } - if _, err := vwcClient.Create(context.TODO(), webhookConfig, metav1.CreateOptions{}); err != nil { - return err - } - - } else { - // Update case. - glog.Info("Updating existing ValidatingWebhookConfiguration for the SparkApplication resource quota enforcement webhook") - if !equality.Semantic.DeepEqual(validatingWebhooks, validatingExisting.Webhooks) { - validatingExisting.Webhooks = validatingWebhooks - if _, err := vwcClient.Update(context.TODO(), validatingExisting, metav1.UpdateOptions{}); err != nil { - return err - } - } - } - } - return nil -} - -func (wh *WebHook) selfDeregistration(webhookConfigName string) error { - mutatingConfigs := wh.clientset.AdmissionregistrationV1().MutatingWebhookConfigurations() - validatingConfigs := wh.clientset.AdmissionregistrationV1().ValidatingWebhookConfigurations() - if wh.enableResourceQuotaEnforcement { - err := validatingConfigs.Delete(context.TODO(), webhookConfigName, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) - if err != nil { - return err - } - } - return mutatingConfigs.Delete(context.TODO(), webhookConfigName, metav1.DeleteOptions{GracePeriodSeconds: int64ptr(0)}) -} - -func admitSparkApplications(review *admissionv1.AdmissionReview, enforcer resourceusage.ResourceQuotaEnforcer) (*admissionv1.AdmissionResponse, error) { - if review.Request.Resource != sparkApplicationResource { - return nil, fmt.Errorf("expected resource to be %s, got %s", sparkApplicationResource, review.Request.Resource) - } - - raw := review.Request.Object.Raw - app := &crdv1beta2.SparkApplication{} - if err := json.Unmarshal(raw, app); err != nil { - return nil, fmt.Errorf("failed to unmarshal a SparkApplication from the raw data in the admission request: %v", err) - } - - reason, err := enforcer.AdmitSparkApplication(*app) - if err != nil { - return nil, fmt.Errorf("resource quota enforcement failed for SparkApplication: %v", err) - } - response := &admissionv1.AdmissionResponse{Allowed: reason == ""} - if reason != "" { - response.Result = &metav1.Status{ - Message: reason, - Code: 400, - } - } - return response, nil -} - -func admitScheduledSparkApplications(review *admissionv1.AdmissionReview, enforcer resourceusage.ResourceQuotaEnforcer) (*admissionv1.AdmissionResponse, error) { - if review.Request.Resource != scheduledSparkApplicationResource { - return nil, fmt.Errorf("expected resource to be %s, got %s", scheduledSparkApplicationResource, review.Request.Resource) - } - - raw := review.Request.Object.Raw - app := &crdv1beta2.ScheduledSparkApplication{} - if err := json.Unmarshal(raw, app); err != nil { - return nil, fmt.Errorf("failed to unmarshal a ScheduledSparkApplication from the raw data in the admission request: %v", err) - } - - response := &admissionv1.AdmissionResponse{Allowed: true} - reason, err := enforcer.AdmitScheduledSparkApplication(*app) - if err != nil { - return nil, fmt.Errorf("resource quota enforcement failed for ScheduledSparkApplication: %v", err) - } else if reason != "" { - response.Allowed = false - response.Result = &metav1.Status{ - Message: reason, - Code: 400, - } - } - return response, nil -} - -func mutatePods( - review *admissionv1.AdmissionReview, - lister crdlisters.SparkApplicationLister, - sparkJobNs string, -) (*admissionv1.AdmissionResponse, error) { - raw := review.Request.Object.Raw - pod := &corev1.Pod{} - if err := json.Unmarshal(raw, pod); err != nil { - return nil, fmt.Errorf("failed to unmarshal a Pod from the raw data in the admission request: %v", err) - } - - response := &admissionv1.AdmissionResponse{Allowed: true} - - if !isSparkPod(pod) || !inSparkJobNamespace(review.Request.Namespace, sparkJobNs) { - glog.V(2).Infof("Pod %s in namespace %s is not subject to mutation", pod.GetObjectMeta().GetName(), review.Request.Namespace) - return response, nil - } - - // Try getting the SparkApplication name from the annotation for that. - appName := pod.Labels[config.SparkAppNameLabel] - if appName == "" { - return response, nil - } - app, err := lister.SparkApplications(review.Request.Namespace).Get(appName) - if err != nil { - return nil, fmt.Errorf("failed to get SparkApplication %s/%s: %v", review.Request.Namespace, appName, err) - } - - patchOps := patchSparkPod(pod, app) - if len(patchOps) > 0 { - glog.V(2).Infof("Pod %s in namespace %s is subject to mutation", pod.GetObjectMeta().GetName(), review.Request.Namespace) - patchBytes, err := json.Marshal(patchOps) - if err != nil { - return nil, fmt.Errorf("failed to marshal patch operations %v: %v", patchOps, err) - } - glog.V(3).Infof("Pod %s mutation/patch result %s", pod.GetObjectMeta().GetName(), patchBytes) - response.Patch = patchBytes - patchType := admissionv1.PatchTypeJSONPatch - response.PatchType = &patchType - } - - return response, nil -} - -func inSparkJobNamespace(podNs string, sparkJobNamespace string) bool { - if sparkJobNamespace == corev1.NamespaceAll { - return true - } - return podNs == sparkJobNamespace -} - -func isSparkPod(pod *corev1.Pod) bool { - return util.IsLaunchedBySparkOperator(pod) && (util.IsDriverPod(pod) || util.IsExecutorPod(pod)) -} - -func int64ptr(n int64) *int64 { - return &n -} diff --git a/pkg/webhook/webhook_test.go b/pkg/webhook/webhook_test.go deleted file mode 100644 index 6f2e2f088a..0000000000 --- a/pkg/webhook/webhook_test.go +++ /dev/null @@ -1,310 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package webhook - -import ( - "context" - "encoding/json" - "testing" - "time" - - "github.com/stretchr/testify/assert" - admissionv1 "k8s.io/api/admission/v1" - arv1 "k8s.io/api/admissionregistration/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes/fake" - gotest "k8s.io/client-go/testing" - - spov1beta2 "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientfake "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned/fake" - crdinformers "github.com/kubeflow/spark-operator/pkg/client/informers/externalversions" - "github.com/kubeflow/spark-operator/pkg/config" -) - -func TestMutatePod(t *testing.T) { - crdClient := crdclientfake.NewSimpleClientset() - informerFactory := crdinformers.NewSharedInformerFactory(crdClient, 0*time.Second) - informer := informerFactory.Sparkoperator().V1beta2().SparkApplications() - lister := informer.Lister() - - pod1 := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-driver", - Namespace: "default", - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: config.SparkDriverContainerName, - Image: "spark-driver:latest", - }, - }, - }, - } - - // 1. Testing processing non-Spark pod. - podBytes, err := serializePod(pod1) - if err != nil { - t.Error(err) - } - review := &admissionv1.AdmissionReview{ - Request: &admissionv1.AdmissionRequest{ - Resource: metav1.GroupVersionResource{ - Group: corev1.SchemeGroupVersion.Group, - Version: corev1.SchemeGroupVersion.Version, - Resource: "pods", - }, - Object: runtime.RawExtension{ - Raw: podBytes, - }, - Namespace: "default", - }, - } - response, _ := mutatePods(review, lister, "default") - assert.True(t, response.Allowed) - - // 2. Test processing Spark pod with only one patch: adding an OwnerReference. - app1 := &spov1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-app1", - Namespace: "default", - }, - } - crdClient.SparkoperatorV1beta2().SparkApplications(app1.Namespace).Create(context.TODO(), app1, metav1.CreateOptions{}) - informer.Informer().GetIndexer().Add(app1) - pod1.Labels = map[string]string{ - config.SparkRoleLabel: config.SparkDriverRole, - config.LaunchedBySparkOperatorLabel: "true", - config.SparkAppNameLabel: app1.Name, - } - podBytes, err = serializePod(pod1) - if err != nil { - t.Error(err) - } - review.Request.Object.Raw = podBytes - response, _ = mutatePods(review, lister, "default") - assert.True(t, response.Allowed) - assert.Equal(t, admissionv1.PatchTypeJSONPatch, *response.PatchType) - assert.True(t, len(response.Patch) > 0) - - // 3. Test processing Spark pod with patches. - var user int64 = 1000 - app2 := &spov1beta2.SparkApplication{ - ObjectMeta: metav1.ObjectMeta{ - Name: "spark-app2", - Namespace: "default", - }, - Spec: spov1beta2.SparkApplicationSpec{ - Volumes: []corev1.Volume{ - { - Name: "spark", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/spark", - }, - }, - }, - { - Name: "unused", // Expect this to not be added to the driver. - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, - }, - }, - }, - Driver: spov1beta2.DriverSpec{ - SparkPodSpec: spov1beta2.SparkPodSpec{ - VolumeMounts: []corev1.VolumeMount{ - { - Name: "spark", - MountPath: "/mnt/spark", - }, - }, - Affinity: &corev1.Affinity{ - PodAffinity: &corev1.PodAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ - { - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{config.SparkRoleLabel: config.SparkDriverRole}, - }, - TopologyKey: "kubernetes.io/hostname", - }, - }, - }, - }, - Tolerations: []corev1.Toleration{ - { - Key: "Key", - Operator: "Equal", - Value: "Value", - Effect: "NoEffect", - }, - }, - SecurityContext: &corev1.SecurityContext{ - RunAsUser: &user, - }, - }, - }, - }, - } - crdClient.SparkoperatorV1beta2().SparkApplications(app2.Namespace).Update(context.TODO(), app2, metav1.UpdateOptions{}) - informer.Informer().GetIndexer().Add(app2) - - pod1.Labels[config.SparkAppNameLabel] = app2.Name - podBytes, err = serializePod(pod1) - if err != nil { - t.Error(err) - } - review.Request.Object.Raw = podBytes - response, _ = mutatePods(review, lister, "default") - assert.True(t, response.Allowed) - assert.Equal(t, admissionv1.PatchTypeJSONPatch, *response.PatchType) - assert.True(t, len(response.Patch) > 0) - var patchOps []*patchOperation - json.Unmarshal(response.Patch, &patchOps) - assert.Equal(t, 6, len(patchOps)) -} - -func serializePod(pod *corev1.Pod) ([]byte, error) { - return json.Marshal(pod) -} - -func TestSelfRegistrationWithObjectSelector(t *testing.T) { - clientset := fake.NewSimpleClientset() - informerFactory := crdinformers.NewSharedInformerFactory(nil, 0) - coreV1InformerFactory := informers.NewSharedInformerFactory(nil, 0) - - // Setup userConfig with object selector - userConfig.webhookObjectSelector = "spark-role in (driver,executor)" - webhookTimeout := 30 - - // Create webhook instance - webhook, err := New(clientset, informerFactory, "default", false, false, coreV1InformerFactory, &webhookTimeout) - assert.NoError(t, err) - - // Mock the clientset's Create function to capture the MutatingWebhookConfiguration object - var createdWebhookConfig *arv1.MutatingWebhookConfiguration - clientset.PrependReactor("create", "mutatingwebhookconfigurations", func(action gotest.Action) (handled bool, ret runtime.Object, err error) { - createAction := action.(gotest.CreateAction) - createdWebhookConfig = createAction.GetObject().(*arv1.MutatingWebhookConfiguration) - return true, createdWebhookConfig, nil - }) - - // Call the selfRegistration method - err = webhook.selfRegistration("test-webhook-config") - assert.NoError(t, err) - - // Verify the MutatingWebhookConfiguration was created with the expected object selector - assert.NotNil(t, createdWebhookConfig, "MutatingWebhookConfiguration should have been created") - - expectedSelector := &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "spark-role", - Operator: metav1.LabelSelectorOpIn, - Values: []string{"driver", "executor"}, - }, - }, - } - actualSelector := createdWebhookConfig.Webhooks[0].ObjectSelector - - assert.True(t, labelSelectorsEqual(expectedSelector, actualSelector), "ObjectSelectors should be equal") -} - -func labelSelectorsEqual(expected, actual *metav1.LabelSelector) bool { - if expected == nil || actual == nil { - return expected == nil && actual == nil - } - - if len(expected.MatchLabels) != len(actual.MatchLabels) { - return false - } - - for k, v := range expected.MatchLabels { - if actual.MatchLabels[k] != v { - return false - } - } - - if len(expected.MatchExpressions) != len(actual.MatchExpressions) { - return false - } - - for i, expr := range expected.MatchExpressions { - if expr.Key != actual.MatchExpressions[i].Key || - expr.Operator != actual.MatchExpressions[i].Operator || - !equalStringSlices(expr.Values, actual.MatchExpressions[i].Values) { - return false - } - } - - return true -} - -func equalStringSlices(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} - -func testSelector(input string, expected *metav1.LabelSelector, t *testing.T) { - selector, err := parseSelector(input) - - if expected == nil { - if err == nil { - t.Errorf("Expected error parsing '%s', but got %v", input, selector) - } - } else { - if err != nil { - t.Errorf("Parsing '%s' failed: %v", input, err) - return - } - if !equality.Semantic.DeepEqual(*selector, *expected) { - t.Errorf("Parsing '%s' failed: expected %v, got %v", input, expected, selector) - } - } -} - -func TestNamespaceSelectorParsing(t *testing.T) { - testSelector("invalid", nil, t) - testSelector("=invalid", nil, t) - testSelector("invalid=", nil, t) - testSelector("in,val,id", nil, t) - testSelector(",inval=id,inval2=id2", nil, t) - testSelector("inval=id,inval2=id2,", nil, t) - testSelector("val=id,invalid", nil, t) - testSelector("val=id", &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "val": "id", - }, - }, t) - testSelector("val=id,val2=id2", &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "val": "id", - "val2": "id2", - }, - }, t) -} diff --git a/sparkctl/README.md b/sparkctl/README.md index 70bd03535e..188006e135 100644 --- a/sparkctl/README.md +++ b/sparkctl/README.md @@ -5,14 +5,15 @@ To build `sparkctl`, make sure you followed build steps [here](https://github.com/kubeflow/spark-operator/blob/master/docs/developer-guide.md#build-the-operator) and have all the dependencies, then run the following command from within `sparkctl/`: ```bash -$ go build -o sparkctl +go build -o sparkctl ``` ## Flags The following global flags are available for all the sub commands: + * `--namespace`: the Kubernetes namespace of the `SparkApplication`(s). Defaults to `default`. -* `--kubeconfig`: the path to the file storing configuration for accessing the Kubernetes API server. Defaults to +* `--kubeconfig`: the path to the file storing configuration for accessing the Kubernetes API server. Defaults to `$HOME/.kube/config` ## Available Commands @@ -22,18 +23,21 @@ The following global flags are available for all the sub commands: `create` is a sub command of `sparkctl` for creating a `SparkApplication` object. There are two ways to create a `SparkApplication` object. One is parsing and creating a `SparkApplication` object in namespace specified by `--namespace` the from a given YAML file. In this way, `create` parses the YAML file, and sends the parsed `SparkApplication` object parsed to the Kubernetes API server. Usage of this way looks like the following: Usage: + ```bash -$ sparkctl create +sparkctl create ``` + The other way is creating a `SparkApplication` object from a named `ScheduledSparkApplication` to manually force a run of the `ScheduledSparkApplication`. Usage of this way looks like the following: Usage: + ```bash -$ sparkctl create --from +sparkctl create --from ``` -The `create` command also supports shipping local Hadoop configuration files into the driver and executor pods. Specifically, it detects local Hadoop configuration files located at the path specified by the -environment variable `HADOOP_CONF_DIR`, create a Kubernetes `ConfigMap` from the files, and adds the `ConfigMap` to the `SparkApplication` object so it gets mounted into the driver and executor pods by the operator. The environment variable `HADOOP_CONF_DIR` is also set in the driver and executor containers. +The `create` command also supports shipping local Hadoop configuration files into the driver and executor pods. Specifically, it detects local Hadoop configuration files located at the path specified by the +environment variable `HADOOP_CONF_DIR`, create a Kubernetes `ConfigMap` from the files, and adds the `ConfigMap` to the `SparkApplication` object so it gets mounted into the driver and executor pods by the operator. The environment variable `HADOOP_CONF_DIR` is also set in the driver and executor containers. #### Staging local dependencies @@ -41,26 +45,27 @@ The `create` command also supports staging local application dependencies, thoug ##### Uploading to GCS -For uploading to GCS, the value should be in the form of `gs://`. The bucket must exist and uploading fails if otherwise. The local dependencies will be uploaded to the path -`spark-app-dependencies//` in the given bucket. It replaces the file path of each local dependency with the URI of the remote copy in the parsed `SparkApplication` object if uploading is successful. +For uploading to GCS, the value should be in the form of `gs://`. The bucket must exist and uploading fails if otherwise. The local dependencies will be uploaded to the path +`spark-app-dependencies//` in the given bucket. It replaces the file path of each local dependency with the URI of the remote copy in the parsed `SparkApplication` object if uploading is successful. -Note that uploading to GCS requires a GCP service account with the necessary IAM permission to use the GCP project specified by service account JSON key file (`serviceusage.services.use`) and the permission to create GCS objects (`storage.object.create`). -The service account JSON key file must be locally available and be pointed to by the environment variable -`GOOGLE_APPLICATION_CREDENTIALS`. For more information on IAM authentication, please check +Note that uploading to GCS requires a GCP service account with the necessary IAM permission to use the GCP project specified by service account JSON key file (`serviceusage.services.use`) and the permission to create GCS objects (`storage.object.create`). +The service account JSON key file must be locally available and be pointed to by the environment variable +`GOOGLE_APPLICATION_CREDENTIALS`. For more information on IAM authentication, please check [Getting Started with Authentication](https://cloud.google.com/docs/authentication/getting-started). Usage: + ```bash -$ export GOOGLE_APPLICATION_CREDENTIALS="[PATH]/[FILE_NAME].json" -$ sparkctl create --upload-to gs:// +export GOOGLE_APPLICATION_CREDENTIALS="[PATH]/[FILE_NAME].json" +sparkctl create --upload-to gs:// ``` -By default, the uploaded dependencies are not made publicly accessible and are referenced using URIs in the form of `gs://bucket/path/to/file`. Such dependencies are referenced through URIs of the form `gs://bucket/path/to/file`. To download the dependencies from GCS, a custom-built Spark init-container with the [GCS connector](https://cloud.google.com/dataproc/docs/concepts/connectors/cloud-storage) installed and necessary Hadoop configuration properties specified is needed. An example Docker file of such an init-container can be found [here](https://gist.github.com/liyinan926/f9e81f7b54d94c05171a663345eb58bf). +By default, the uploaded dependencies are not made publicly accessible and are referenced using URIs in the form of `gs://bucket/path/to/file`. Such dependencies are referenced through URIs of the form `gs://bucket/path/to/file`. To download the dependencies from GCS, a custom-built Spark init-container with the [GCS connector](https://cloud.google.com/dataproc/docs/concepts/connectors/cloud-storage) installed and necessary Hadoop configuration properties specified is needed. An example Docker file of such an init-container can be found [here](https://gist.github.com/liyinan926/f9e81f7b54d94c05171a663345eb58bf). If you want to make uploaded dependencies publicly available so they can be downloaded by the built-in init-container, simply add `--public` to the `create` command, as the following example shows: ```bash -$ sparkctl create --upload-to gs:// --public +sparkctl create --upload-to gs:// --public ``` Publicly available files are referenced through URIs of the form `https://storage.googleapis.com/bucket/path/to/file`. @@ -71,67 +76,71 @@ For uploading to S3, the value should be in the form of `s3://`. The buc `spark-app-dependencies//` in the given bucket. It replaces the file path of each local dependency with the URI of the remote copy in the parsed `SparkApplication` object if uploading is successful. Note that uploading to S3 with [AWS SDK](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html) requires credentials to be specified. For GCP, the S3 Interoperability credentials can be retrieved as described [here](https://cloud.google.com/storage/docs/migrating#keys). -SDK uses the default credential provider chain to find AWS credentials. +SDK uses the default credential provider chain to find AWS credentials. The SDK uses the first provider in the chain that returns credentials without an error. The default provider chain looks for credentials in the following order: -- Environment variables +* Environment variables + ``` AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY ``` + - Shared credentials file (.aws/credentials) For more information about AWS SDK authentication, please check [Specifying Credentials](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials). Usage: + ```bash -$ export AWS_ACCESS_KEY_ID=[KEY] -$ export AWS_SECRET_ACCESS_KEY=[SECRET] -$ sparkctl create --upload-to s3:// +export AWS_ACCESS_KEY_ID=[KEY] +export AWS_SECRET_ACCESS_KEY=[SECRET] +sparkctl create --upload-to s3:// ``` By default, the uploaded dependencies are not made publicly accessible and are referenced using URIs in the form of `s3a://bucket/path/to/file`. To download the dependencies from S3, a custom-built Spark Docker image with the required jars for `S3A Connector` (`hadoop-aws-2.7.6.jar`, `aws-java-sdk-1.7.6.jar` for Spark build with Hadoop2.7 profile, or `hadoop-aws-3.1.0.jar`, `aws-java-sdk-bundle-1.11.271.jar` for Hadoop3.1) need to be available in the classpath, and `spark-default.conf` with the AWS keys and the S3A FileSystemClass needs to be set (you can also use `spec.hadoopConf` in the SparkApplication YAML): -``` +```properties spark.hadoop.fs.s3a.endpoint https://storage.googleapis.com spark.hadoop.fs.s3a.access.key [KEY] spark.hadoop.fs.s3a.secret.key [SECRET] spark.hadoop.fs.s3a.impl org.apache.hadoop.fs.s3a.S3AFileSystem ``` -NOTE: In Spark 2.3 init-containers are used for downloading remote application dependencies. In future versions, init-containers are removed. -It is recommended to use Apache Spark 2.4 for staging local dependencies with `s3`, which currently requires building a custom Docker image from the Spark master branch. Additionally, since Spark 2.4.0 +NOTE: In Spark 2.3 init-containers are used for downloading remote application dependencies. In future versions, init-containers are removed. +It is recommended to use Apache Spark 2.4 for staging local dependencies with `s3`, which currently requires building a custom Docker image from the Spark master branch. Additionally, since Spark 2.4.0 there are two available build profiles, Hadoop2.7 and Hadoop3.1. For use of Spark with `S3A Connector`, Hadoop3.1 profile is recommended as this allows to use newer version of `aws-java-sdk-bundle`. -If you want to use custom S3 endpoint or region, add `--upload-to-endpoint` and `--upload-to-region`: +If you want to use custom S3 endpoint or region, add `--upload-to-endpoint` and `--upload-to-region`: ```bash -$ sparkctl create --upload-to-endpoint https:// --upload-to-region --upload-to s3:// +sparkctl create --upload-to-endpoint https:// --upload-to-region --upload-to s3:// ``` -If you want to force path style URLs for S3 objects add `--s3-force-path-style`: +If you want to force path style URLs for S3 objects add `--s3-force-path-style`: ```bash -$ sparkctl create --s3-force-path-style +sparkctl create --s3-force-path-style ``` If you want to make uploaded dependencies publicly available, add `--public` to the `create` command, as the following example shows: ```bash -$ sparkctl create --upload-to s3:// --public +sparkctl create --upload-to s3:// --public ``` Publicly available files are referenced through URIs in the default form `https:///bucket/path/to/file`. ### List -`list` is a sub command of `sparkctl` for listing `SparkApplication` objects in the namespace specified by +`list` is a sub command of `sparkctl` for listing `SparkApplication` objects in the namespace specified by `--namespace`. Usage: + ```bash -$ sparkctl list +sparkctl list ``` ### Status @@ -139,21 +148,23 @@ $ sparkctl list `status` is a sub command of `sparkctl` for checking and printing the status of a `SparkApplication` in the namespace specified by `--namespace`. Usage: + ```bash -$ sparkctl status +sparkctl status ``` ### Event -`event` is a sub command of `sparkctl` for listing `SparkApplication` events in the namespace -specified by `--namespace`. +`event` is a sub command of `sparkctl` for listing `SparkApplication` events in the namespace +specified by `--namespace`. -The `event` command also supports streaming the events with the `--follow` or `-f` flag. +The `event` command also supports streaming the events with the `--follow` or `-f` flag. The command will display events since last creation of the `SparkApplication` for the specific `name`, and continues to stream events even if `ResourceVersion` changes. Usage: + ```bash -$ sparkctl event [-f] +sparkctl event [-f] ``` ### Log @@ -163,8 +174,9 @@ $ sparkctl event [-f] The `log` command also supports streaming the driver or executor logs with the `--follow` or `-f` flag. It works in the same way as `kubectl logs -f`, i.e., it streams logs until no more logs are available. Usage: + ```bash -$ sparkctl log [-e ] [-f] +sparkctl log [-e ] [-f] ``` ### Delete @@ -172,17 +184,19 @@ $ sparkctl log [-e ] [-f] `delete` is a sub command of `sparkctl` for deleting a `SparkApplication` with the given name in the namespace specified by `--namespace`. Usage: + ```bash -$ sparkctl delete +sparkctl delete ``` ### Forward -`forward` is a sub command of `sparkctl` for doing port forwarding from a local port to the Spark web UI port on the driver. It allows the Spark web UI served in the driver pod to be accessed locally. By default, it forwards from local port `4040` to remote port `4040`, which is the default Spark web UI port. Users can specify different local port and remote port using the flags `--local-port` and `--remote-port`, respectively. +`forward` is a sub command of `sparkctl` for doing port forwarding from a local port to the Spark web UI port on the driver. It allows the Spark web UI served in the driver pod to be accessed locally. By default, it forwards from local port `4040` to remote port `4040`, which is the default Spark web UI port. Users can specify different local port and remote port using the flags `--local-port` and `--remote-port`, respectively. Usage: + ```bash -$ sparkctl forward [--local-port ] [--remote-port ] +sparkctl forward [--local-port ] [--remote-port ] ``` Once port forwarding starts, users can open `127.0.0.1:` or `localhost:` in a browser to access the Spark web UI. Forwarding continues until it is interrupted or the driver pod terminates. diff --git a/sparkctl/build.sh b/sparkctl/build.sh index 669a86ce78..f4cca33ae2 100755 --- a/sparkctl/build.sh +++ b/sparkctl/build.sh @@ -13,15 +13,15 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -SCRIPT=`basename ${BASH_SOURCE[0]}` -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P )" + +SCRIPT=$(basename ${BASH_SOURCE[0]}) +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P)" set -e platforms=("linux:amd64" "darwin:amd64") -for platform in "${platforms[@]}" -do - GOOS="${platform%%:*}" - GOARCH="${platform#*:}" - echo $GOOS - echo $GOARCH - CGO_ENABLED=0 GOOS=$GOOS GOARCH=$GOARCH go build -buildvcs=false -o sparkctl-${GOOS}-${GOARCH} +for platform in "${platforms[@]}"; do + GOOS="${platform%%:*}" + GOARCH="${platform#*:}" + echo $GOOS + echo $GOARCH + CGO_ENABLED=0 GOOS=$GOOS GOARCH=$GOARCH go build -buildvcs=false -o sparkctl-${GOOS}-${GOARCH} done diff --git a/sparkctl/cmd/client.go b/sparkctl/cmd/client.go index b280045045..e22d26afa1 100644 --- a/sparkctl/cmd/client.go +++ b/sparkctl/cmd/client.go @@ -25,7 +25,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/api/v1beta2" crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) diff --git a/sparkctl/cmd/create.go b/sparkctl/cmd/create.go index 1809b3d8ff..49ac4bee0f 100644 --- a/sparkctl/cmd/create.go +++ b/sparkctl/cmd/create.go @@ -19,7 +19,6 @@ package cmd import ( "context" "fmt" - "io/ioutil" "net/url" "os" "path/filepath" @@ -36,7 +35,7 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" clientset "k8s.io/client-go/kubernetes" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/api/v1beta2" crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) @@ -293,12 +292,12 @@ func filterLocalFiles(files []string) ([]string, error) { } func isLocalFile(file string) (bool, error) { - fileUrl, err := url.Parse(file) + fileURL, err := url.Parse(file) if err != nil { return false, err } - if fileUrl.Scheme == "file" || fileUrl.Scheme == "" { + if fileURL.Scheme == "file" || fileURL.Scheme == "" { return true, nil } @@ -332,7 +331,7 @@ func (uh uploadHandler) uploadToBucket(uploadPath, localFilePath string) (string fmt.Printf("uploading local file: %s\n", fileName) // Prepare the file for upload. - data, err := ioutil.ReadFile(localFilePath) + data, err := os.ReadFile(localFilePath) if err != nil { return "", fmt.Errorf("failed to read file: %s", err) } @@ -387,21 +386,21 @@ func uploadLocalDependencies(app *v1beta2.SparkApplication, files []string) ([]s "unable to upload local dependencies: no upload location specified via --upload-to") } - uploadLocationUrl, err := url.Parse(UploadToPath) + uploadLocationURL, err := url.Parse(UploadToPath) if err != nil { return nil, err } - uploadBucket := uploadLocationUrl.Host + uploadBucket := uploadLocationURL.Host var uh *uploadHandler ctx := context.Background() - switch uploadLocationUrl.Scheme { + switch uploadLocationURL.Scheme { case "gs": uh, err = newGCSBlob(ctx, uploadBucket, UploadToEndpoint, UploadToRegion) case "s3": uh, err = newS3Blob(ctx, uploadBucket, UploadToEndpoint, UploadToRegion, S3ForcePathStyle) default: - return nil, fmt.Errorf("unsupported upload location URL scheme: %s", uploadLocationUrl.Scheme) + return nil, fmt.Errorf("unsupported upload location URL scheme: %s", uploadLocationURL.Scheme) } // Check if bucket has been successfully setup @@ -457,7 +456,7 @@ func buildHadoopConfigMap(appName string, hadoopConfDir string) (*apiv1.ConfigMa return nil, fmt.Errorf("%s is not a directory", hadoopConfDir) } - files, err := ioutil.ReadDir(hadoopConfDir) + files, err := os.ReadDir(hadoopConfDir) if err != nil { return nil, err } @@ -472,7 +471,7 @@ func buildHadoopConfigMap(appName string, hadoopConfDir string) (*apiv1.ConfigMa if file.IsDir() { continue } - content, err := ioutil.ReadFile(filepath.Join(hadoopConfDir, file.Name())) + content, err := os.ReadFile(filepath.Join(hadoopConfDir, file.Name())) if err != nil { return nil, err } diff --git a/sparkctl/cmd/create_test.go b/sparkctl/cmd/create_test.go index e319ddfb2e..aa3d89615d 100644 --- a/sparkctl/cmd/create_test.go +++ b/sparkctl/cmd/create_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/api/v1beta2" ) func TestIsLocalFile(t *testing.T) { @@ -84,9 +84,9 @@ func TestValidateSpec(t *testing.T) { testFn := func(test testcase, t *testing.T) { err := validateSpec(test.spec) if test.expectsValidationError { - assert.True(t, err != nil, "%s: expected error got nothing", test.name) + assert.Error(t, err, "%s: expected error got nothing", test.name) } else { - assert.True(t, err == nil, "%s: did not expect error got %v", test.name, err) + assert.NoError(t, err, "%s: did not expect error got %v", test.name, err) } } @@ -161,12 +161,12 @@ func TestLoadFromYAML(t *testing.T) { t.Fatal(err) } - assert.Equal(t, app.Name, "example") - assert.Equal(t, *app.Spec.MainClass, "org.examples.SparkExample") - assert.Equal(t, *app.Spec.MainApplicationFile, "local:///path/to/example.jar") - assert.Equal(t, *app.Spec.Driver.Image, "spark") - assert.Equal(t, *app.Spec.Executor.Image, "spark") - assert.Equal(t, int(*app.Spec.Executor.Instances), 1) + assert.Equal(t, "example", app.Name) + assert.Equal(t, "org.examples.SparkExample", *app.Spec.MainClass) + assert.Equal(t, "local:///path/to/example.jar", *app.Spec.MainApplicationFile) + assert.Equal(t, "spark", *app.Spec.Driver.Image) + assert.Equal(t, "spark", *app.Spec.Executor.Image) + assert.Equal(t, 1, int(*app.Spec.Executor.Instances)) } func TestHandleHadoopConfiguration(t *testing.T) { @@ -175,8 +175,8 @@ func TestHandleHadoopConfiguration(t *testing.T) { t.Fatal(err) } - assert.Equal(t, configMap.Name, "test-hadoop-config") - assert.Equal(t, len(configMap.BinaryData), 1) - assert.Equal(t, len(configMap.Data), 1) + assert.Equal(t, "test-hadoop-config", configMap.Name) + assert.Len(t, configMap.BinaryData, 1) + assert.Len(t, configMap.Data, 1) assert.True(t, strings.Contains(configMap.Data["core-site.xml"], "fs.gs.impl")) } diff --git a/sparkctl/cmd/delete.go b/sparkctl/cmd/delete.go index d6366c7472..f75dc65df5 100644 --- a/sparkctl/cmd/delete.go +++ b/sparkctl/cmd/delete.go @@ -22,7 +22,6 @@ import ( "os" "github.com/spf13/cobra" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" @@ -32,7 +31,7 @@ var deleteCmd = &cobra.Command{ Use: "delete ", Short: "Delete a SparkApplication object", Long: `Delete a SparkApplication object with a given name`, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, args []string) { if len(args) != 1 { fmt.Fprintln(os.Stderr, "must specify a SparkApplication name") return diff --git a/sparkctl/cmd/event.go b/sparkctl/cmd/event.go index 5553c9c276..38559b505f 100644 --- a/sparkctl/cmd/event.go +++ b/sparkctl/cmd/event.go @@ -25,8 +25,7 @@ import ( "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" - - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" @@ -147,7 +146,7 @@ func streamEvents(events watch.Interface, streamSince int64) error { table.Render() // Set 10 minutes inactivity timeout - watchExpire := time.Duration(10 * time.Minute) + watchExpire := 10 * time.Minute intr := interrupt.New(nil, events.Stop) return intr.Run(func() error { // Start rendering contents of the table without table header as it is already printed diff --git a/sparkctl/cmd/forward.go b/sparkctl/cmd/forward.go index dbaeb9c673..6af80de3eb 100644 --- a/sparkctl/cmd/forward.go +++ b/sparkctl/cmd/forward.go @@ -69,7 +69,7 @@ var forwardCmd = &cobra.Command{ } restClient := kubeClientset.CoreV1().RESTClient() - driverPodUrl, driverPodName, err := getDriverPodUrlAndName(args[0], restClient, crdClientset) + driverPodURL, driverPodName, err := getDriverPodURLAndName(args[0], restClient, crdClientset) if err != nil { fmt.Fprintf(os.Stderr, "failed to get an API server URL of the driver pod of SparkApplication %s: %v\n", @@ -80,7 +80,7 @@ var forwardCmd = &cobra.Command{ stopCh := make(chan struct{}, 1) readyCh := make(chan struct{}) - forwarder, err := newPortForwarder(config, driverPodUrl, stopCh, readyCh) + forwarder, err := newPortForwarder(config, driverPodURL, stopCh, readyCh) if err != nil { fmt.Fprintf(os.Stderr, "failed to get a port forwarder: %v\n", err) return @@ -120,7 +120,7 @@ func newPortForwarder( return fw, nil } -func getDriverPodUrlAndName( +func getDriverPodURLAndName( name string, restClient rest.Interface, crdClientset crdclientset.Interface) (*url.URL, string, error) { diff --git a/sparkctl/cmd/gcs.go b/sparkctl/cmd/gcs.go index 3fa2c35ff1..fc807f8927 100644 --- a/sparkctl/cmd/gcs.go +++ b/sparkctl/cmd/gcs.go @@ -26,7 +26,7 @@ import ( ) type blobGCS struct { - projectId string + projectID string endpoint string region string } @@ -41,7 +41,7 @@ func (blob blobGCS) setPublicACL( } defer client.Close() - handle := client.Bucket(bucket).UserProject(blob.projectId) + handle := client.Bucket(bucket).UserProject(blob.projectID) if err = handle.Object(filePath).ACL().Set(ctx, storage.AllUsers, storage.RoleReader); err != nil { return fmt.Errorf("failed to set ACL on GCS object %s: %v", filePath, err) } @@ -58,7 +58,7 @@ func newGCSBlob( return nil, err } - projectId, err := gcp.DefaultProjectID(creds) + projectID, err := gcp.DefaultProjectID(creds) if err != nil { return nil, err } @@ -70,7 +70,7 @@ func newGCSBlob( b, err := gcsblob.OpenBucket(ctx, c, bucket, nil) return &uploadHandler{ - blob: blobGCS{endpoint: endpoint, region: region, projectId: string(projectId)}, + blob: blobGCS{endpoint: endpoint, region: region, projectID: string(projectID)}, ctx: ctx, b: b, blobUploadBucket: bucket, diff --git a/sparkctl/cmd/list.go b/sparkctl/cmd/list.go index 0ecbe16bb7..5777d14db9 100644 --- a/sparkctl/cmd/list.go +++ b/sparkctl/cmd/list.go @@ -23,7 +23,6 @@ import ( "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" @@ -33,7 +32,7 @@ var listCmd = &cobra.Command{ Use: "list", Short: "List SparkApplication objects", Long: `List SparkApplication objects in a given namespaces.`, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, args []string) { crdClientset, err := getSparkApplicationClient() if err != nil { fmt.Fprintf(os.Stderr, "failed to get SparkApplication client: %v\n", err) @@ -56,7 +55,7 @@ func doList(crdClientset crdclientset.Interface) error { table.SetHeader([]string{"Name", "State", "Submission Age", "Termination Age"}) for _, app := range apps.Items { table.Append([]string{ - string(app.Name), + app.Name, string(app.Status.AppState.State), getSinceTime(app.Status.LastSubmissionAttemptTime), getSinceTime(app.Status.TerminationTime), diff --git a/sparkctl/cmd/log.go b/sparkctl/cmd/log.go index 764c21484c..f86b25ecd0 100644 --- a/sparkctl/cmd/log.go +++ b/sparkctl/cmd/log.go @@ -24,15 +24,14 @@ import ( "time" "github.com/spf13/cobra" - - apiv1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) -var ExecutorId int32 +var ExecutorID int32 var FollowLogs bool var logCommand = &cobra.Command{ @@ -64,7 +63,7 @@ var logCommand = &cobra.Command{ } func init() { - logCommand.Flags().Int32VarP(&ExecutorId, "executor", "e", -1, + logCommand.Flags().Int32VarP(&ExecutorID, "executor", "e", -1, "id of the executor to fetch logs for") logCommand.Flags().BoolVarP(&FollowLogs, "follow", "f", false, "whether to stream the logs") } @@ -96,9 +95,8 @@ func doLog( if followLogs { return streamLogs(os.Stdout, kubeClient, podName) - } else { - return printLogs(os.Stdout, kubeClient, podName) } + return printLogs(os.Stdout, kubeClient, podName) } func getPodNameChannel( @@ -107,7 +105,7 @@ func getPodNameChannel( channel := make(chan string, 1) go func() { - for true { + for { app, _ := crdClient.SparkoperatorV1beta2().SparkApplications(Namespace).Get( context.TODO(), sparkApplicationName, @@ -125,12 +123,12 @@ func getPodNameChannel( func waitForLogsFromPodChannel( podName string, kubeClient clientset.Interface, - crdClient crdclientset.Interface) chan bool { + _ crdclientset.Interface) chan bool { channel := make(chan bool, 1) go func() { - for true { - _, err := kubeClient.CoreV1().Pods(Namespace).GetLogs(podName, &apiv1.PodLogOptions{}).Do(context.TODO()).Raw() + for { + _, err := kubeClient.CoreV1().Pods(Namespace).GetLogs(podName, &corev1.PodLogOptions{}).Do(context.TODO()).Raw() if err == nil { channel <- true @@ -143,7 +141,7 @@ func waitForLogsFromPodChannel( // printLogs is a one time operation that prints the fetched logs of the given pod. func printLogs(out io.Writer, kubeClientset clientset.Interface, podName string) error { - rawLogs, err := kubeClientset.CoreV1().Pods(Namespace).GetLogs(podName, &apiv1.PodLogOptions{}).Do(context.TODO()).Raw() + rawLogs, err := kubeClientset.CoreV1().Pods(Namespace).GetLogs(podName, &corev1.PodLogOptions{}).Do(context.TODO()).Raw() if err != nil { return err } @@ -153,7 +151,7 @@ func printLogs(out io.Writer, kubeClientset clientset.Interface, podName string) // streamLogs streams the logs of the given pod until there are no more logs available. func streamLogs(out io.Writer, kubeClientset clientset.Interface, podName string) error { - request := kubeClientset.CoreV1().Pods(Namespace).GetLogs(podName, &apiv1.PodLogOptions{Follow: true}) + request := kubeClientset.CoreV1().Pods(Namespace).GetLogs(podName, &corev1.PodLogOptions{Follow: true}) reader, err := request.Stream(context.TODO()) if err != nil { return err diff --git a/sparkctl/cmd/s3.go b/sparkctl/cmd/s3.go index abc92cc4c9..28e9350ae1 100644 --- a/sparkctl/cmd/s3.go +++ b/sparkctl/cmd/s3.go @@ -53,7 +53,7 @@ func newS3Blob( if region == "" { region = "us-east1" } - endpointResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { + endpointResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, _ ...interface{}) (aws.Endpoint, error) { if service == s3.ServiceID && endpoint != "" { return aws.Endpoint{ PartitionID: "aws", diff --git a/sparkctl/cmd/status.go b/sparkctl/cmd/status.go index 8502e72b03..cd773454a0 100644 --- a/sparkctl/cmd/status.go +++ b/sparkctl/cmd/status.go @@ -23,7 +23,7 @@ import ( "github.com/olekukonko/tablewriter" "github.com/spf13/cobra" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + "github.com/kubeflow/spark-operator/api/v1beta2" crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" ) @@ -31,7 +31,7 @@ var statusCmd = &cobra.Command{ Use: "status ", Short: "Check status of a SparkApplication", Long: `Check status of a SparkApplication with a given name`, - Run: func(cmd *cobra.Command, args []string) { + Run: func(_ *cobra.Command, args []string) { if len(args) != 1 { fmt.Fprintln(os.Stderr, "must specify a SparkApplication name") return diff --git a/test/e2e/README.md b/test/e2e/README.md deleted file mode 100644 index d29f882aa5..0000000000 --- a/test/e2e/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# E2E Testing - -End-to-end (e2e) testing is automated testing for real user scenarios. - -## Build and Run Tests - -Prerequisites: -- A running k8s cluster and kube config. We will need to pass kube config as arguments. -- Have kubeconfig file ready. -- Have a Kubernetes Operator for Spark image ready. - -e2e tests are written as Go test. All go test techniques apply (e.g. picking what to run, timeout length). Let's say I want to run all tests in "test/e2e/": - -```bash -$ docker build -t gcr.io/spark-operator/spark-operator:local . -$ go test -v ./test/e2e/ --kubeconfig "$HOME/.kube/config" --operator-image=gcr.io/spark-operator/spark-operator:local -``` - -### Available Tests - -Note that all tests are run on a live Kubernetes cluster. After the tests are done, the Spark Operator deployment and associated resources (e.g. ClusterRole and ClusterRoleBinding) are deleted from the cluster. - -* `basic_test.go` - - This test submits `spark-pi.yaml` contained in `\examples`. It then checks that the Spark job successfully completes with the correct result of Pi. - -* `volume_mount_test.go` - - This test submits `spark-pi-configmap.yaml` contained in `\examples`. It verifies that a dummy ConfigMap can be mounted in the Spark pods. - -* `lifecycle_test.go` - - This test submits `spark-pi.yaml` contained in `\examples`. It verifies that the created SparkApplication CRD object goes through the correct series of states as dictated by the controller. Once the job is finished, an update operation is performed on the CRD object to trigger a re-run. The transition from a completed job to a new running job is verified for correctness. diff --git a/test/e2e/basic_test.go b/test/e2e/basic_test.go deleted file mode 100644 index f6e2edf216..0000000000 --- a/test/e2e/basic_test.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "context" - "log" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/wait" - - appFramework "github.com/kubeflow/spark-operator/test/e2e/framework" -) - -func TestSubmitSparkPiYaml(t *testing.T) { - t.Parallel() - - appName := "spark-pi" - sa, err := appFramework.MakeSparkApplicationFromYaml("../../examples/spark-pi.yaml") - assert.Equal(t, nil, err) - - if appFramework.SparkTestNamespace != "" { - sa.ObjectMeta.Namespace = appFramework.SparkTestNamespace - } - - if appFramework.SparkTestServiceAccount != "" { - sa.Spec.Driver.ServiceAccount = &appFramework.SparkTestServiceAccount - } - - if appFramework.SparkTestImage != "" { - sa.Spec.Image = &appFramework.SparkTestImage - } - - err = appFramework.CreateSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, sa) - assert.Equal(t, nil, err) - - status := GetJobStatus(t, appName) - - err = wait.Poll(INTERVAL, TIMEOUT, func() (done bool, err error) { - if status == "COMPLETED" { - return true, nil - } - status = GetJobStatus(t, appName) - return false, nil - }) - assert.Equal(t, nil, err) - - app, _ := appFramework.GetSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - podName := app.Status.DriverInfo.PodName - log.Printf("LABELS: %v", app.ObjectMeta.GetLabels()) - rawLogs, err := framework.KubeClient.CoreV1().Pods(appFramework.SparkTestNamespace).GetLogs(podName, &v1.PodLogOptions{}).Do(context.TODO()).Raw() - assert.Equal(t, nil, err) - assert.NotEqual(t, -1, strings.Index(string(rawLogs), "Pi is roughly 3")) - - err = appFramework.DeleteSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - assert.Equal(t, nil, err) -} - -func TestSubmitSparkPiCustomResourceYaml(t *testing.T) { - t.Parallel() - - appName := "spark-pi-custom-resource" - sa, err := appFramework.MakeSparkApplicationFromYaml("../../examples/spark-pi-custom-resource.yaml") - assert.Equal(t, nil, err) - - if appFramework.SparkTestNamespace != "" { - sa.ObjectMeta.Namespace = appFramework.SparkTestNamespace - } - - if appFramework.SparkTestServiceAccount != "" { - sa.Spec.Driver.ServiceAccount = &appFramework.SparkTestServiceAccount - } - - if appFramework.SparkTestImage != "" { - sa.Spec.Image = &appFramework.SparkTestImage - } - - err = appFramework.CreateSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, sa) - assert.Equal(t, nil, err) - - status := GetJobStatus(t, appName) - - err = wait.Poll(INTERVAL, TIMEOUT, func() (done bool, err error) { - if status == "COMPLETED" { - return true, nil - } - status = GetJobStatus(t, appName) - return false, nil - }) - assert.Equal(t, nil, err) - - app, _ := appFramework.GetSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - podName := app.Status.DriverInfo.PodName - rawLogs, err := framework.KubeClient.CoreV1().Pods(appFramework.SparkTestNamespace).GetLogs(podName, &v1.PodLogOptions{}).Do(context.TODO()).Raw() - assert.Equal(t, nil, err) - assert.NotEqual(t, -1, strings.Index(string(rawLogs), "Pi is roughly 3")) - - err = appFramework.DeleteSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - assert.Equal(t, nil, err) -} diff --git a/test/e2e/framework/cluster_role.go b/test/e2e/framework/cluster_role.go deleted file mode 100644 index df7adf2f6d..0000000000 --- a/test/e2e/framework/cluster_role.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "io" - "os" - - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" -) - -func CreateClusterRole(kubeClient kubernetes.Interface, relativePath string) error { - clusterRole, err := parseClusterRoleYaml(relativePath) - if err != nil { - return err - } - - _, err = kubeClient.RbacV1().ClusterRoles().Get(context.TODO(), clusterRole.Name, metav1.GetOptions{}) - - if err == nil { - // ClusterRole already exists -> Update - _, err = kubeClient.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole, metav1.UpdateOptions{}) - if err != nil { - return err - } - - } else { - // ClusterRole doesn't exists -> Create - _, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole, metav1.CreateOptions{}) - if err != nil { - return err - } - } - - return nil -} - -func DeleteClusterRole(kubeClient kubernetes.Interface, relativePath string) error { - clusterRole, err := parseClusterRoleYaml(relativePath) - if err != nil { - return err - } - - if err := kubeClient.RbacV1().ClusterRoles().Delete(context.TODO(), clusterRole.Name, metav1.DeleteOptions{}); err != nil { - return err - } - - return nil -} - -func parseClusterRoleYaml(relativePath string) (*rbacv1.ClusterRole, error) { - var manifest *os.File - var err error - - var clusterRole rbacv1.ClusterRole - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "ClusterRole" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &clusterRole) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &clusterRole, nil -} diff --git a/test/e2e/framework/cluster_role_binding.go b/test/e2e/framework/cluster_role_binding.go deleted file mode 100644 index e3224c3aaf..0000000000 --- a/test/e2e/framework/cluster_role_binding.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "io" - "os" - - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" -) - -func CreateClusterRoleBinding(kubeClient kubernetes.Interface, relativePath string) (finalizerFn, error) { - finalizerFn := func() error { - return DeleteClusterRoleBinding(kubeClient, relativePath) - } - clusterRoleBinding, err := parseClusterRoleBindingYaml(relativePath) - if err != nil { - return finalizerFn, err - } - - _, err = kubeClient.RbacV1().ClusterRoleBindings().Get(context.TODO(), clusterRoleBinding.Name, metav1.GetOptions{}) - - if err == nil { - // ClusterRoleBinding already exists -> Update - _, err = kubeClient.RbacV1().ClusterRoleBindings().Update(context.TODO(), clusterRoleBinding, metav1.UpdateOptions{}) - if err != nil { - return finalizerFn, err - } - } else { - // ClusterRoleBinding doesn't exists -> Create - _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding, metav1.CreateOptions{}) - if err != nil { - return finalizerFn, err - } - } - - return finalizerFn, err -} - -func DeleteClusterRoleBinding(kubeClient kubernetes.Interface, relativePath string) error { - clusterRoleBinding, err := parseClusterRoleYaml(relativePath) - if err != nil { - return err - } - - if err := kubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRoleBinding.Name, metav1.DeleteOptions{}); err != nil { - return err - } - - return nil -} - -func parseClusterRoleBindingYaml(relativePath string) (*rbacv1.ClusterRoleBinding, error) { - var manifest *os.File - var err error - - var clusterRoleBinding rbacv1.ClusterRoleBinding - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "ClusterRoleBinding" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &clusterRoleBinding) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &clusterRoleBinding, nil -} diff --git a/test/e2e/framework/config_map.go b/test/e2e/framework/config_map.go deleted file mode 100644 index 01061b8277..0000000000 --- a/test/e2e/framework/config_map.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "fmt" - "github.com/pkg/errors" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -func CreateConfigMap(kubeClient kubernetes.Interface, name string, namespace string) (*v1.ConfigMap, error) { - configMap := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Data: map[string]string{ - "testKey": "testValue", - }, - } - - _, err := kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - - if err == nil { - // ConfigMap already exists -> Update - configMap, err = kubeClient.CoreV1().ConfigMaps(namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}) - if err != nil { - return nil, err - } - } else { - // ConfigMap doesn't exists -> Create - configMap, err = kubeClient.CoreV1().ConfigMaps(namespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - } - - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to create ConfigMap with name %v", name)) - } - return configMap, nil -} - -func DeleteConfigMap(kubeClient kubernetes.Interface, name string, namespace string) error { - return kubeClient.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} diff --git a/test/e2e/framework/context.go b/test/e2e/framework/context.go deleted file mode 100644 index 4d422cb825..0000000000 --- a/test/e2e/framework/context.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "strconv" - "strings" - "testing" - "time" - - "golang.org/x/sync/errgroup" -) - -type TestCtx struct { - ID string - cleanUpFns []finalizerFn -} - -type finalizerFn func() error - -func (f *Framework) NewTestCtx(t *testing.T) TestCtx { - // TestCtx is used among others for namespace names where '/' is forbidden - prefix := strings.TrimPrefix( - strings.Replace( - strings.ToLower(t.Name()), - "/", - "-", - -1, - ), - "test", - ) - - id := prefix + "-" + strconv.FormatInt(time.Now().Unix(), 36) - return TestCtx{ - ID: id, - } -} - -// GetObjID returns an ascending ID based on the length of cleanUpFns. It is -// based on the premise that every new object also appends a new finalizerFn on -// cleanUpFns. This can e.g. be used to create multiple namespaces in the same -// test context. -func (ctx *TestCtx) GetObjID() string { - return ctx.ID + "-" + strconv.Itoa(len(ctx.cleanUpFns)) -} - -func (ctx *TestCtx) Cleanup(t *testing.T) { - var eg errgroup.Group - - for i := len(ctx.cleanUpFns) - 1; i >= 0; i-- { - eg.Go(ctx.cleanUpFns[i]) - } - - if err := eg.Wait(); err != nil { - t.Fatal(err) - } -} - -func (ctx *TestCtx) AddFinalizerFn(fn finalizerFn) { - ctx.cleanUpFns = append(ctx.cleanUpFns, fn) -} diff --git a/test/e2e/framework/deployment.go b/test/e2e/framework/deployment.go deleted file mode 100644 index e79c5c635d..0000000000 --- a/test/e2e/framework/deployment.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "fmt" - "time" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" -) - -func MakeDeployment(pathToYaml string) (*appsv1.Deployment, error) { - manifest, err := PathToOSFile(pathToYaml) - if err != nil { - return nil, err - } - deployment := appsv1.Deployment{} - if err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&deployment); err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to decode file %s", pathToYaml)) - } - - return &deployment, nil -} - -func CreateDeployment(kubeClient kubernetes.Interface, namespace string, d *appsv1.Deployment) error { - _, err := kubeClient.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to create deployment %s", d.Name)) - } - return nil -} - -func DeleteDeployment(kubeClient kubernetes.Interface, namespace, name string) error { - d, err := kubeClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return err - } - - zero := int32(0) - d.Spec.Replicas = &zero - - d, err = kubeClient.AppsV1().Deployments(namespace).Update(context.TODO(), d, metav1.UpdateOptions{}) - if err != nil { - return err - } - return kubeClient.AppsV1().Deployments(namespace).Delete(context.TODO(), d.Name, metav1.DeleteOptions{}) -} - -func WaitUntilDeploymentGone(kubeClient kubernetes.Interface, namespace, name string, timeout time.Duration) error { - return wait.Poll(time.Second, timeout, func() (bool, error) { - _, err := kubeClient. - AppsV1().Deployments(namespace). - Get(context.TODO(), name, metav1.GetOptions{}) - - if err != nil { - if apierrors.IsNotFound(err) { - return true, nil - } - - return false, err - } - - return false, nil - }) -} diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go deleted file mode 100644 index a3d7c17a78..0000000000 --- a/test/e2e/framework/framework.go +++ /dev/null @@ -1,216 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "fmt" - "time" - - crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" - "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/client-go/kubernetes" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/tools/clientcmd" - - "github.com/pkg/errors" -) - -// Framework contains all components required to run the test framework. -type Framework struct { - KubeClient kubernetes.Interface - SparkApplicationClient crdclientset.Interface - MasterHost string - Namespace *v1.Namespace - SparkTestNamespace *v1.Namespace - OperatorPod *v1.Pod - DefaultTimeout time.Duration -} - -var SparkTestNamespace = "" -var SparkTestServiceAccount = "" -var SparkTestImage = "" - -// Sets up a test framework and returns it. -func New(ns, sparkNs, kubeconfig, opImage string, opImagePullPolicy string) (*Framework, error) { - config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) - if err != nil { - return nil, errors.Wrap(err, "build config from flags failed") - } - - cli, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, errors.Wrap(err, "creating new kube-client failed") - } - - namespace, err := CreateNamespace(cli, ns) - if err != nil { - fmt.Println(nil, err, namespace) - } - - sparkTestNamespace, err := CreateNamespace(cli, sparkNs) - if err != nil { - fmt.Println(nil, err, sparkNs) - } - - saClient, err := crdclientset.NewForConfig(config) - if err != nil { - return nil, errors.Wrap(err, "failed to create SparkApplication client") - } - - f := &Framework{ - MasterHost: config.Host, - KubeClient: cli, - SparkApplicationClient: saClient, - Namespace: namespace, - SparkTestNamespace: sparkTestNamespace, - DefaultTimeout: time.Minute, - } - - err = f.Setup(sparkNs, opImage, opImagePullPolicy) - if err != nil { - return nil, errors.Wrap(err, "setup test environment failed") - } - - return f, nil -} - -func (f *Framework) Setup(sparkNs, opImage string, opImagePullPolicy string) error { - if err := f.setupOperator(sparkNs, opImage, opImagePullPolicy); err != nil { - return errors.Wrap(err, "setup operator failed") - } - - return nil -} - -func (f *Framework) setupOperator(sparkNs, opImage string, opImagePullPolicy string) error { - if _, err := CreateServiceAccount(f.KubeClient, f.Namespace.Name, "../../manifest/spark-operator-install/spark-operator-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create operator service account") - } - - if err := CreateClusterRole(f.KubeClient, "../../manifest/spark-operator-install/spark-operator-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create cluster role") - } - - if _, err := CreateClusterRoleBinding(f.KubeClient, "../../manifest/spark-operator-install/spark-operator-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create cluster role binding") - } - - if _, err := CreateServiceAccount(f.KubeClient, f.SparkTestNamespace.Name, "../../manifest/spark-application-rbac/spark-application-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create Spark service account") - } - - if err := CreateRole(f.KubeClient, f.SparkTestNamespace.Name, "../../manifest/spark-application-rbac/spark-application-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create role") - } - - if _, err := CreateRoleBinding(f.KubeClient, f.SparkTestNamespace.Name, "../../manifest/spark-application-rbac/spark-application-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create role binding") - } - - job, err := MakeJob("../../manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml") - if err != nil { - return err - } - - if opImage != "" { - // Override operator image used, if specified when running tests. - job.Spec.Template.Spec.Containers[0].Image = opImage - } - - for _, container := range job.Spec.Template.Spec.Containers { - container.ImagePullPolicy = v1.PullPolicy(opImagePullPolicy) - } - - err = CreateJob(f.KubeClient, f.Namespace.Name, job) - if err != nil { - return errors.Wrap(err, "failed to create job that creates the webhook secret") - } - - err = WaitUntilJobCompleted(f.KubeClient, f.Namespace.Name, job.Name, time.Minute) - if err != nil { - return errors.Wrap(err, "The gencert job failed or timed out") - } - - if err := DeleteJob(f.KubeClient, f.Namespace.Name, job.Name); err != nil { - return errors.Wrap(err, "failed to delete the init job") - } - - if _, err := CreateService(f.KubeClient, f.Namespace.Name, "../../manifest/spark-operator-with-webhook-install/spark-operator-webhook.yaml"); err != nil { - return errors.Wrap(err, "failed to create webhook service") - } - - deploy, err := MakeDeployment("../../manifest/spark-operator-with-webhook-install/spark-operator-with-webhook.yaml") - if err != nil { - return err - } - - if opImage != "" { - // Override operator image used, if specified when running tests. - deploy.Spec.Template.Spec.Containers[0].Image = opImage - } - - for _, container := range deploy.Spec.Template.Spec.Containers { - container.ImagePullPolicy = v1.PullPolicy(opImagePullPolicy) - } - - err = CreateDeployment(f.KubeClient, f.Namespace.Name, deploy) - if err != nil { - return err - } - - opts := metav1.ListOptions{LabelSelector: fields.SelectorFromSet(fields.Set(deploy.Spec.Template.ObjectMeta.Labels)).String()} - err = WaitForPodsReady(f.KubeClient, f.Namespace.Name, f.DefaultTimeout, 1, opts) - if err != nil { - return errors.Wrap(err, "failed to wait for operator to become ready") - } - - pl, err := f.KubeClient.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), opts) - if err != nil { - return err - } - f.OperatorPod = &pl.Items[0] - return nil -} - -// Teardown tears down a previously initialized test environment. -func (f *Framework) Teardown() error { - if err := DeleteClusterRole(f.KubeClient, "../../manifest/spark-operator-install/spark-operator-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to delete operator cluster role") - } - - if err := DeleteClusterRoleBinding(f.KubeClient, "../../manifest/spark-operator-install/spark-operator-rbac.yaml"); err != nil && !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to delete operator cluster role binding") - } - - if err := f.KubeClient.AppsV1().Deployments(f.Namespace.Name).Delete(context.TODO(), "sparkoperator", metav1.DeleteOptions{}); err != nil { - return err - } - - if err := DeleteNamespace(f.KubeClient, f.Namespace.Name); err != nil { - return err - } - - if err := DeleteNamespace(f.KubeClient, f.SparkTestNamespace.Name); err != nil { - return err - } - - return nil -} diff --git a/test/e2e/framework/helpers.go b/test/e2e/framework/helpers.go deleted file mode 100644 index 16e60a7667..0000000000 --- a/test/e2e/framework/helpers.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "fmt" - "net/http" - "os" - "path/filepath" - "time" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - - "github.com/pkg/errors" -) - -// PathToOSFile gets the absolute path from relative path. -func PathToOSFile(relativePath string) (*os.File, error) { - path, err := filepath.Abs(relativePath) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed generate absolute file path of %s", relativePath)) - } - - manifest, err := os.Open(path) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to open file %s", path)) - } - - return manifest, nil -} - -// WaitForPodsReady waits for a selection of Pods to be running and each -// container to pass its readiness check. -func WaitForPodsReady(kubeClient kubernetes.Interface, namespace string, timeout time.Duration, expectedReplicas int, opts metav1.ListOptions) error { - return wait.Poll(time.Second, timeout, func() (bool, error) { - pl, err := kubeClient.CoreV1().Pods(namespace).List(context.TODO(), opts) - if err != nil { - return false, err - } - - runningAndReady := 0 - for _, p := range pl.Items { - isRunningAndReady, err := PodRunningAndReady(p) - if err != nil { - return false, err - } - - if isRunningAndReady { - runningAndReady++ - } - } - - if runningAndReady == expectedReplicas { - return true, nil - } - return false, nil - }) -} - -func WaitForPodsRunImage(kubeClient kubernetes.Interface, namespace string, expectedReplicas int, image string, opts metav1.ListOptions) error { - return wait.Poll(time.Second, time.Minute*5, func() (bool, error) { - pl, err := kubeClient.CoreV1().Pods(namespace).List(context.TODO(), opts) - if err != nil { - return false, err - } - - runningImage := 0 - for _, p := range pl.Items { - if podRunsImage(p, image) { - runningImage++ - } - } - - if runningImage == expectedReplicas { - return true, nil - } - return false, nil - }) -} - -func WaitForHTTPSuccessStatusCode(timeout time.Duration, url string) error { - var resp *http.Response - err := wait.Poll(time.Second, timeout, func() (bool, error) { - var err error - resp, err = http.Get(url) - if err == nil && resp.StatusCode == 200 { - return true, nil - } - return false, nil - }) - - return errors.Wrap(err, fmt.Sprintf( - "waiting for %v to return a successful status code timed out. Last response from server was: %v", - url, - resp, - )) -} - -func podRunsImage(p v1.Pod, image string) bool { - for _, c := range p.Spec.Containers { - if image == c.Image { - return true - } - } - - return false -} - -func GetLogs(kubeClient kubernetes.Interface, namespace string, podName, containerName string) (string, error) { - logs, err := kubeClient.CoreV1().RESTClient().Get(). - Resource("pods"). - Namespace(namespace). - Name(podName).SubResource("log"). - Param("container", containerName). - Do(context.TODO()). - Raw() - if err != nil { - return "", err - } - return string(logs), err -} - -func ProxyGetPod(kubeClient kubernetes.Interface, namespace string, podName string, port string, path string) *rest.Request { - return kubeClient.CoreV1().RESTClient().Get().Prefix("proxy").Namespace(namespace).Resource("pods").Name(podName + ":" + port).Suffix(path) -} diff --git a/test/e2e/framework/job.go b/test/e2e/framework/job.go deleted file mode 100644 index 52d2d4ec27..0000000000 --- a/test/e2e/framework/job.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "fmt" - "github.com/pkg/errors" - "io" - "k8s.io/apimachinery/pkg/util/wait" - "os" - "time" - - batchv1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" -) - -func MakeJob(pathToYaml string) (*batchv1.Job, error) { - job, err := parseJobYaml(pathToYaml) - if err != nil { - return nil, err - } - - return job, nil -} - -func CreateJob(kubeClient kubernetes.Interface, namespace string, job *batchv1.Job) error { - _, err := kubeClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, metav1.CreateOptions{}) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to create job %s", job.Name)) - } - return nil -} - -func DeleteJob(kubeClient kubernetes.Interface, namespace, name string) error { - deleteProp := metav1.DeletePropagationForeground - return kubeClient.BatchV1().Jobs(namespace).Delete( - context.TODO(), - name, - metav1.DeleteOptions{PropagationPolicy: &deleteProp}, - ) -} - -func parseJobYaml(relativePath string) (*batchv1.Job, error) { - var manifest *os.File - var err error - - var job batchv1.Job - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "Job" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &job) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &job, nil -} - -func WaitUntilJobCompleted(kubeClient kubernetes.Interface, namespace, name string, timeout time.Duration) error { - return wait.Poll(time.Second, timeout, func() (bool, error) { - job, _ := kubeClient. - BatchV1().Jobs(namespace). - Get(context.TODO(), name, metav1.GetOptions{}) - - if job.Status.Succeeded == 1 { - return true, nil - } else { - return false, nil - } - }) -} diff --git a/test/e2e/framework/namespace.go b/test/e2e/framework/namespace.go deleted file mode 100644 index cdd15476e8..0000000000 --- a/test/e2e/framework/namespace.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "fmt" - "testing" - - "github.com/pkg/errors" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -func CreateNamespace(kubeClient kubernetes.Interface, name string) (*v1.Namespace, error) { - namespace, err := kubeClient.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - }, - metav1.CreateOptions{}, - ) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to create namespace with name %v", name)) - } - return namespace, nil -} - -func (ctx *TestCtx) CreateNamespace(t *testing.T, kubeClient kubernetes.Interface) string { - name := ctx.GetObjID() - if _, err := CreateNamespace(kubeClient, name); err != nil { - t.Fatal(err) - } - - namespaceFinalizerFn := func() error { - if err := DeleteNamespace(kubeClient, name); err != nil { - return err - } - return nil - } - - ctx.AddFinalizerFn(namespaceFinalizerFn) - - return name -} - -func DeleteNamespace(kubeClient kubernetes.Interface, name string) error { - return kubeClient.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{}) -} diff --git a/test/e2e/framework/operator.go b/test/e2e/framework/operator.go deleted file mode 100644 index 0b1d6467d2..0000000000 --- a/test/e2e/framework/operator.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "fmt" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// CustomResourceDefinitionTypeMeta sets the default kind/apiversion of CRD -var CustomResourceDefinitionTypeMeta = metav1.TypeMeta{ - Kind: "CustomResourceDefinition", - APIVersion: "apiextensions.k8s.io/v1beta1", -} - -// PodRunningAndReady returns whether a pod is running and each container has -// passed it's ready state. -func PodRunningAndReady(pod v1.Pod) (bool, error) { - switch pod.Status.Phase { - case v1.PodFailed, v1.PodSucceeded: - return false, fmt.Errorf("pod completed") - case v1.PodRunning: - for _, cond := range pod.Status.Conditions { - if cond.Type != v1.PodReady { - continue - } - return cond.Status == v1.ConditionTrue, nil - } - return false, fmt.Errorf("pod ready condition not found") - } - return false, nil -} diff --git a/test/e2e/framework/role.go b/test/e2e/framework/role.go deleted file mode 100644 index db4064f3db..0000000000 --- a/test/e2e/framework/role.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2019 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "io" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" - "os" -) - -func CreateRole(kubeClient kubernetes.Interface, ns string, relativePath string) error { - role, err := parseRoleYaml(relativePath) - if err != nil { - return err - } - - _, err = kubeClient.RbacV1().Roles(ns).Get(context.TODO(), role.Name, metav1.GetOptions{}) - - if err == nil { - // Role already exists -> Update - _, err = kubeClient.RbacV1().Roles(ns).Update(context.TODO(), role, metav1.UpdateOptions{}) - if err != nil { - return err - } - - } else { - // Role doesn't exists -> Create - _, err = kubeClient.RbacV1().Roles(ns).Create(context.TODO(), role, metav1.CreateOptions{}) - if err != nil { - return err - } - } - - return nil -} - -func DeleteRole(kubeClient kubernetes.Interface, ns string, relativePath string) error { - role, err := parseRoleYaml(relativePath) - if err != nil { - return err - } - - if err := kubeClient.RbacV1().Roles(ns).Delete(context.TODO(), role.Name, metav1.DeleteOptions{}); err != nil { - return err - } - - return nil -} - -func parseRoleYaml(relativePath string) (*rbacv1.Role, error) { - var manifest *os.File - var err error - - var role rbacv1.Role - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "Role" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &role) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &role, nil -} diff --git a/test/e2e/framework/role_binding.go b/test/e2e/framework/role_binding.go deleted file mode 100644 index 955ad96193..0000000000 --- a/test/e2e/framework/role_binding.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "io" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" - "os" -) - -func CreateRoleBinding(kubeClient kubernetes.Interface, ns string, relativePath string) (finalizerFn, error) { - finalizerFn := func() error { - return DeleteRoleBinding(kubeClient, ns, relativePath) - } - roleBinding, err := parseRoleBindingYaml(relativePath) - if err != nil { - return finalizerFn, err - } - - roleBinding.Namespace = ns - - _, err = kubeClient.RbacV1().RoleBindings(ns).Get(context.TODO(), roleBinding.Name, metav1.GetOptions{}) - - if err == nil { - // RoleBinding already exists -> Update - _, err = kubeClient.RbacV1().RoleBindings(ns).Update(context.TODO(), roleBinding, metav1.UpdateOptions{}) - if err != nil { - return finalizerFn, err - } - } else { - // RoleBinding doesn't exists -> Create - _, err = kubeClient.RbacV1().RoleBindings(ns).Create(context.TODO(), roleBinding, metav1.CreateOptions{}) - if err != nil { - return finalizerFn, err - } - } - - return finalizerFn, err -} - -func DeleteRoleBinding(kubeClient kubernetes.Interface, ns string, relativePath string) error { - roleBinding, err := parseRoleBindingYaml(relativePath) - if err != nil { - return err - } - - if err := kubeClient.RbacV1().RoleBindings(ns).Delete( - context.TODO(), - roleBinding.Name, - metav1.DeleteOptions{}, - ); err != nil { - return err - } - - return nil -} - -func parseRoleBindingYaml(relativePath string) (*rbacv1.RoleBinding, error) { - var manifest *os.File - var err error - - var roleBinding rbacv1.RoleBinding - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "RoleBinding" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &roleBinding) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &roleBinding, nil -} diff --git a/test/e2e/framework/service.go b/test/e2e/framework/service.go deleted file mode 100644 index 09810bbc1e..0000000000 --- a/test/e2e/framework/service.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "fmt" - "io" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "os" - "time" - - "github.com/pkg/errors" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" -) - -func CreateService(kubeClient kubernetes.Interface, ns string, relativePath string) (finalizerFn, error) { - finalizerFn := func() error { - return DeleteService(kubeClient, ns, relativePath) - } - service, err := parseServiceYaml(relativePath) - if err != nil { - return finalizerFn, err - } - - service.Namespace = ns - - _, err = kubeClient.CoreV1().Services(ns).Get(context.TODO(), service.Name, metav1.GetOptions{}) - - if err == nil { - // Service already exists -> Update - _, err = kubeClient.CoreV1().Services(ns).Update(context.TODO(), service, metav1.UpdateOptions{}) - if err != nil { - return finalizerFn, err - } - } else { - // Service doesn't exists -> Create - _, err = kubeClient.CoreV1().Services(ns).Create(context.TODO(), service, metav1.CreateOptions{}) - if err != nil { - return finalizerFn, err - } - } - - return finalizerFn, err -} - -func WaitForServiceReady(kubeClient kubernetes.Interface, namespace string, serviceName string) error { - err := wait.Poll(time.Second, time.Minute*5, func() (bool, error) { - endpoints, err := getEndpoints(kubeClient, namespace, serviceName) - if err != nil { - return false, err - } - if len(endpoints.Subsets) != 0 && len(endpoints.Subsets[0].Addresses) > 0 { - return true, nil - } - return false, nil - }) - return err -} - -func getEndpoints(kubeClient kubernetes.Interface, namespace, serviceName string) (*v1.Endpoints, error) { - endpoints, err := kubeClient.CoreV1().Endpoints(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("requesting endpoints for service %v failed", serviceName)) - } - return endpoints, nil -} - -func parseServiceYaml(relativePath string) (*v1.Service, error) { - var manifest *os.File - var err error - - var service v1.Service - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "Service" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &service) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &service, nil -} - -func DeleteService(kubeClient kubernetes.Interface, name string, namespace string) error { - return kubeClient.CoreV1().Services(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) -} diff --git a/test/e2e/framework/service_account.go b/test/e2e/framework/service_account.go deleted file mode 100644 index ceae8187d6..0000000000 --- a/test/e2e/framework/service_account.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "encoding/json" - "io" - "os" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" -) - -func CreateServiceAccount(kubeClient kubernetes.Interface, namespace string, relativePath string) (finalizerFn, error) { - finalizerFn := func() error { - return DeleteServiceAccount(kubeClient, namespace, relativePath) - } - - serviceAccount, err := parseServiceAccountYaml(relativePath) - if err != nil { - return finalizerFn, err - } - serviceAccount.Namespace = namespace - _, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), serviceAccount, metav1.CreateOptions{}) - if err != nil { - return finalizerFn, err - } - - return finalizerFn, nil -} - -func parseServiceAccountYaml(relativePath string) (*v1.ServiceAccount, error) { - var manifest *os.File - var err error - - var serviceAccount v1.ServiceAccount - if manifest, err = PathToOSFile(relativePath); err != nil { - return nil, err - } - - decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) - for { - var out unstructured.Unstructured - err = decoder.Decode(&out) - if err != nil { - // this would indicate it's malformed YAML. - break - } - - if out.GetKind() == "ServiceAccount" { - var marshaled []byte - marshaled, err = out.MarshalJSON() - json.Unmarshal(marshaled, &serviceAccount) - break - } - } - - if err != io.EOF && err != nil { - return nil, err - } - return &serviceAccount, nil -} - -func DeleteServiceAccount(kubeClient kubernetes.Interface, namespace string, relativePath string) error { - serviceAccount, err := parseServiceAccountYaml(relativePath) - if err != nil { - return err - } - - return kubeClient.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), serviceAccount.Name, metav1.DeleteOptions{}) -} diff --git a/test/e2e/framework/sparkapplication.go b/test/e2e/framework/sparkapplication.go deleted file mode 100644 index b9adab0eb4..0000000000 --- a/test/e2e/framework/sparkapplication.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "fmt" - - "github.com/pkg/errors" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/yaml" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - crdclientset "github.com/kubeflow/spark-operator/pkg/client/clientset/versioned" -) - -func MakeSparkApplicationFromYaml(pathToYaml string) (*v1beta2.SparkApplication, error) { - manifest, err := PathToOSFile(pathToYaml) - if err != nil { - return nil, err - } - tectonicPromOp := v1beta2.SparkApplication{} - if err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&tectonicPromOp); err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to decode file %s", pathToYaml)) - } - - return &tectonicPromOp, nil -} - -func CreateSparkApplication(crdclientset crdclientset.Interface, namespace string, sa *v1beta2.SparkApplication) error { - _, err := crdclientset.SparkoperatorV1beta2().SparkApplications(namespace).Create(context.TODO(), sa, metav1.CreateOptions{}) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to create SparkApplication %s", sa.Name)) - } - return nil -} - -func UpdateSparkApplication(crdclientset crdclientset.Interface, namespace string, sa *v1beta2.SparkApplication) error { - _, err := crdclientset.SparkoperatorV1beta2().SparkApplications(namespace).Update(context.TODO(), sa, metav1.UpdateOptions{}) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed to update SparkApplication %s", sa.Name)) - } - return nil -} - -func GetSparkApplication(crdclientset crdclientset.Interface, namespace, name string) (*v1beta2.SparkApplication, error) { - sa, err := crdclientset.SparkoperatorV1beta2().SparkApplications(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return sa, nil -} - -func DeleteSparkApplication(crdclientset crdclientset.Interface, namespace, name string) error { - err := crdclientset.SparkoperatorV1beta2().SparkApplications(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil { - return err - } - return nil -} diff --git a/test/e2e/lifecycle_test.go b/test/e2e/lifecycle_test.go deleted file mode 100644 index 95b93a76b4..0000000000 --- a/test/e2e/lifecycle_test.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -Copyright 2019 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "container/list" - "context" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/wait" - - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - appFramework "github.com/kubeflow/spark-operator/test/e2e/framework" -) - -func TestLifeCycleManagement(t *testing.T) { - appName := "spark-pi" - app, err := appFramework.MakeSparkApplicationFromYaml("../../examples/spark-pi.yaml") - assert.Equal(t, nil, err) - - if appFramework.SparkTestNamespace != "" { - app.ObjectMeta.Namespace = appFramework.SparkTestNamespace - } - - if appFramework.SparkTestServiceAccount != "" { - app.Spec.Driver.ServiceAccount = &appFramework.SparkTestServiceAccount - } - - if appFramework.SparkTestImage != "" { - app.Spec.Image = &appFramework.SparkTestImage - } - - err = appFramework.CreateSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, app) - assert.Equal(t, nil, err) - - states := list.New() - status := GetJobStatus(t, appName) - states.PushBack(status) - - app = runApp(t, appName, states) - - newNumExecutors := int32(2) - app.Spec.Executor.Instances = &newNumExecutors - err = appFramework.UpdateSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, app) - assert.Equal(t, nil, err) - - status = GetJobStatus(t, appName) - if status != states.Back().Value { - states.PushBack(status) - } - - runApp(t, appName, states) - - assert.Equal(t, len(STATES), states.Len()) - index := 0 - for e := states.Front(); e != nil; e = e.Next() { - assert.Equal(t, STATES[index], string((e.Value).(v1beta2.ApplicationStateType))) - index += 1 - } - - err = appFramework.DeleteSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - assert.Equal(t, nil, err) -} - -func runApp(t *testing.T, appName string, states *list.List) *v1beta2.SparkApplication { - err := wait.Poll(INTERVAL, TIMEOUT, func() (done bool, err error) { - status := GetJobStatus(t, appName) - if status != states.Back().Value { - states.PushBack(status) - } - if status == "COMPLETED" { - return true, nil - } - return false, nil - }) - assert.Equal(t, nil, err) - - app, _ := appFramework.GetSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - podName := app.Status.DriverInfo.PodName - rawLogs, err := framework.KubeClient.CoreV1().Pods(appFramework.SparkTestNamespace).GetLogs(podName, &v1.PodLogOptions{}).Do(context.TODO()).Raw() - assert.Equal(t, nil, err) - assert.NotEqual(t, -1, strings.Index(string(rawLogs), "Pi is roughly 3")) - - return app -} diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go deleted file mode 100644 index 07b0a19ee2..0000000000 --- a/test/e2e/main_test.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "flag" - "github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" - "github.com/stretchr/testify/assert" - "log" - "os" - "testing" - "time" - - operatorFramework "github.com/kubeflow/spark-operator/test/e2e/framework" -) - -var framework *operatorFramework.Framework - -// Wait for test job to finish. Poll for updates once a second. Time out after 240 seconds. -var TIMEOUT = 240 * time.Second -var INTERVAL = 1 * time.Second - -var STATES = [9]string{ - "", - "SUBMITTED", - "RUNNING", - "COMPLETED", - "INVALIDATING", - "PENDING_RERUN", - "SUBMITTED", - "RUNNING", - "COMPLETED", -} - -func GetJobStatus(t *testing.T, sparkAppName string) v1beta2.ApplicationStateType { - app, err := operatorFramework.GetSparkApplication(framework.SparkApplicationClient, operatorFramework.SparkTestNamespace, sparkAppName) - assert.Equal(t, nil, err) - return app.Status.AppState.State -} - -func TestMain(m *testing.M) { - kubeconfig := flag.String("kubeconfig", "", "kube config path, e.g. $HOME/.kube/config") - opImage := flag.String("operator-image", "", "operator image, e.g. image:tag") - opImagePullPolicy := flag.String("operator-image-pullPolicy", "IfNotPresent", "pull policy, e.g. Always") - ns := flag.String("namespace", "spark-operator", "e2e test namespace") - sparkTestNamespace := flag.String("spark", "spark", "e2e test spark-test-namespace") - sparkTestImage := flag.String("spark-test-image", "", "spark test image, e.g. image:tag") - sparkTestServiceAccount := flag.String("spark-test-service-account", "spark", "e2e test spark test service account") - flag.Parse() - - if *kubeconfig == "" { - log.Printf("No kubeconfig found. Bypassing e2e tests") - os.Exit(0) - } - var err error - if framework, err = operatorFramework.New(*ns, *sparkTestNamespace, *kubeconfig, *opImage, *opImagePullPolicy); err != nil { - log.Fatalf("failed to set up framework: %v\n", err) - } - - operatorFramework.SparkTestNamespace = *sparkTestNamespace - operatorFramework.SparkTestImage = *sparkTestImage - operatorFramework.SparkTestServiceAccount = *sparkTestServiceAccount - code := m.Run() - - if err := framework.Teardown(); err != nil { - log.Fatalf("failed to tear down framework: %v\n", err) - } - - os.Exit(code) -} diff --git a/test/e2e/sparkapplication_test.go b/test/e2e/sparkapplication_test.go new file mode 100644 index 0000000000..a3e8829a0d --- /dev/null +++ b/test/e2e/sparkapplication_test.go @@ -0,0 +1,267 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e_test + +import ( + "context" + "os" + "path/filepath" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/util/yaml" + + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/util" +) + +const ( + PollInterval = 1 * time.Second + WaitTimeout = 300 * time.Second +) + +var _ = Describe("Example SparkApplication", func() { + Context("spark-pi", func() { + ctx := context.Background() + path := filepath.Join("..", "..", "examples", "spark-pi.yaml") + app := &v1beta2.SparkApplication{} + + BeforeEach(func() { + By("Parsing SparkApplication from file") + file, err := os.Open(path) + Expect(err).NotTo(HaveOccurred()) + Expect(file).NotTo(BeNil()) + + decoder := yaml.NewYAMLOrJSONDecoder(file, 100) + Expect(decoder).NotTo(BeNil()) + Expect(decoder.Decode(app)).NotTo(HaveOccurred()) + + By("Creating SparkApplication") + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + }) + + AfterEach(func() { + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + By("Deleting SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + }) + + It("should complete successfully", func() { + By("Waiting for SparkApplication to complete") + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + cancelCtx, cancelFunc := context.WithTimeout(ctx, WaitTimeout) + defer cancelFunc() + Expect(wait.PollUntilContextCancel(cancelCtx, PollInterval, true, func(ctx context.Context) (done bool, err error) { + err = k8sClient.Get(ctx, key, app) + if app.Status.AppState.State == v1beta2.ApplicationStateCompleted { + return true, nil + } + return false, err + })).NotTo(HaveOccurred()) + + By("Checking out driver logs") + driverPodName := util.GetDriverPodName(app) + bytes, err := clientset.CoreV1().Pods(app.Namespace).GetLogs(driverPodName, &corev1.PodLogOptions{}).Do(ctx).Raw() + Expect(err).NotTo(HaveOccurred()) + Expect(bytes).NotTo(BeEmpty()) + Expect(strings.Contains(string(bytes), "Pi is roughly 3")).To(BeTrue()) + }) + }) + + Context("spark-pi-configmap", func() { + ctx := context.Background() + path := filepath.Join("..", "..", "examples", "spark-pi-configmap.yaml") + app := &v1beta2.SparkApplication{} + + BeforeEach(func() { + By("Parsing SparkApplication from file") + file, err := os.Open(path) + Expect(err).NotTo(HaveOccurred()) + Expect(file).NotTo(BeNil()) + + decoder := yaml.NewYAMLOrJSONDecoder(file, 100) + Expect(decoder).NotTo(BeNil()) + Expect(decoder.Decode(app)).NotTo(HaveOccurred()) + + By("Creating ConfigMap") + for _, volume := range app.Spec.Volumes { + if volume.ConfigMap != nil { + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: volume.ConfigMap.Name, + Namespace: app.Namespace, + }, + } + Expect(k8sClient.Create(ctx, configMap)).To(Succeed()) + } + } + + By("Creating SparkApplication") + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + }) + + AfterEach(func() { + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + volumes := app.Spec.Volumes + By("Deleting SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + + By("Deleting ConfigMap") + for _, volume := range volumes { + if volume.ConfigMap != nil { + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: volume.ConfigMap.Name, + Namespace: app.Namespace, + }, + } + Expect(k8sClient.Delete(ctx, configMap)).To(Succeed()) + } + } + }) + + It("Should complete successfully", func() { + By("Waiting for SparkApplication to complete") + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + cancelCtx, cancelFunc := context.WithTimeout(ctx, WaitTimeout) + defer cancelFunc() + Expect(wait.PollUntilContextCancel(cancelCtx, PollInterval, true, func(ctx context.Context) (done bool, err error) { + err = k8sClient.Get(ctx, key, app) + if app.Status.AppState.State == v1beta2.ApplicationStateCompleted { + return true, nil + } + return false, err + })).NotTo(HaveOccurred()) + + By("Checking out driver logs") + driverPodName := util.GetDriverPodName(app) + bytes, err := clientset.CoreV1().Pods(app.Namespace).GetLogs(driverPodName, &corev1.PodLogOptions{}).Do(ctx).Raw() + Expect(err).NotTo(HaveOccurred()) + Expect(bytes).NotTo(BeEmpty()) + Expect(strings.Contains(string(bytes), "Pi is roughly 3")).To(BeTrue()) + }) + }) + + Context("spark-pi-custom-resource", func() { + ctx := context.Background() + path := filepath.Join("..", "..", "examples", "spark-pi-custom-resource.yaml") + app := &v1beta2.SparkApplication{} + + BeforeEach(func() { + By("Parsing SparkApplication from file") + file, err := os.Open(path) + Expect(err).NotTo(HaveOccurred()) + Expect(file).NotTo(BeNil()) + + decoder := yaml.NewYAMLOrJSONDecoder(file, 100) + Expect(decoder).NotTo(BeNil()) + Expect(decoder.Decode(app)).NotTo(HaveOccurred()) + + By("Creating SparkApplication") + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + }) + + AfterEach(func() { + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + By("Deleting SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + }) + + It("Should complete successfully", func() { + By("Waiting for SparkApplication to complete") + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + cancelCtx, cancelFunc := context.WithTimeout(ctx, WaitTimeout) + defer cancelFunc() + Expect(wait.PollUntilContextCancel(cancelCtx, PollInterval, true, func(ctx context.Context) (done bool, err error) { + err = k8sClient.Get(ctx, key, app) + if app.Status.AppState.State == v1beta2.ApplicationStateCompleted { + return true, nil + } + return false, err + })).NotTo(HaveOccurred()) + + By("Checking out driver logs") + driverPodName := util.GetDriverPodName(app) + bytes, err := clientset.CoreV1().Pods(app.Namespace).GetLogs(driverPodName, &corev1.PodLogOptions{}).Do(ctx).Raw() + Expect(err).NotTo(HaveOccurred()) + Expect(bytes).NotTo(BeEmpty()) + Expect(strings.Contains(string(bytes), "Pi is roughly 3")).To(BeTrue()) + }) + }) + + Context("spark-pi-python", func() { + ctx := context.Background() + path := filepath.Join("..", "..", "examples", "spark-pi-python.yaml") + app := &v1beta2.SparkApplication{} + + BeforeEach(func() { + By("Parsing SparkApplication from file") + file, err := os.Open(path) + Expect(err).NotTo(HaveOccurred()) + Expect(file).NotTo(BeNil()) + + decoder := yaml.NewYAMLOrJSONDecoder(file, 100) + Expect(decoder).NotTo(BeNil()) + Expect(decoder.Decode(app)).NotTo(HaveOccurred()) + + By("Creating SparkApplication") + Expect(k8sClient.Create(ctx, app)).To(Succeed()) + }) + + AfterEach(func() { + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + Expect(k8sClient.Get(ctx, key, app)).To(Succeed()) + + By("Deleting SparkApplication") + Expect(k8sClient.Delete(ctx, app)).To(Succeed()) + }) + + It("Should complete successfully", func() { + By("Waiting for SparkApplication to complete") + key := types.NamespacedName{Namespace: app.Namespace, Name: app.Name} + cancelCtx, cancelFunc := context.WithTimeout(ctx, WaitTimeout) + defer cancelFunc() + Expect(wait.PollUntilContextCancel(cancelCtx, PollInterval, true, func(ctx context.Context) (done bool, err error) { + err = k8sClient.Get(ctx, key, app) + if app.Status.AppState.State == v1beta2.ApplicationStateCompleted { + return true, nil + } + return false, err + })).NotTo(HaveOccurred()) + + By("Checking out driver logs") + driverPodName := util.GetDriverPodName(app) + bytes, err := clientset.CoreV1().Pods(app.Namespace).GetLogs(driverPodName, &corev1.PodLogOptions{}).Do(ctx).Raw() + Expect(err).NotTo(HaveOccurred()) + Expect(bytes).NotTo(BeEmpty()) + Expect(strings.Contains(string(bytes), "Pi is roughly 3")).To(BeTrue()) + }) + }) +}) diff --git a/test/e2e/suit_test.go b/test/e2e/suit_test.go new file mode 100644 index 0000000000..4c60f97622 --- /dev/null +++ b/test/e2e/suit_test.go @@ -0,0 +1,159 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e_test + +import ( + "context" + "fmt" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/cli" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/kubeflow/spark-operator/api/v1beta1" + "github.com/kubeflow/spark-operator/api/v1beta2" + "github.com/kubeflow/spark-operator/pkg/util" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +const ( + ReleaseName = "spark-operator" + ReleaseNamespace = "spark-operator" +) + +var ( + cfg *rest.Config + testEnv *envtest.Environment + k8sClient client.Client + clientset *kubernetes.Clientset +) + +func TestSparkOperator(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Spark Operator Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + var err error + + By("Bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.29.3-%s-%s", runtime.GOOS, runtime.GOARCH)), + UseExistingCluster: util.BoolPtr(true), + } + + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = v1beta2.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = v1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + clientset, err = kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + Expect(clientset).NotTo(BeNil()) + + By("Creating release namespace") + namespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ReleaseNamespace}} + Expect(k8sClient.Create(context.TODO(), namespace)).NotTo(HaveOccurred()) + + By("Installing the Spark operator helm chart") + envSettings := cli.New() + envSettings.SetNamespace(ReleaseNamespace) + actionConfig := &action.Configuration{} + Expect(actionConfig.Init(envSettings.RESTClientGetter(), envSettings.Namespace(), os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) { + logf.Log.Info(fmt.Sprintf(format, v...)) + })).NotTo(HaveOccurred()) + installAction := action.NewInstall(actionConfig) + Expect(installAction).NotTo(BeNil()) + installAction.ReleaseName = ReleaseName + installAction.Namespace = envSettings.Namespace() + installAction.Wait = true + installAction.Timeout = 5 * time.Minute + chartPath := filepath.Join("..", "..", "charts", "spark-operator-chart") + chart, err := loader.Load(chartPath) + Expect(err).NotTo(HaveOccurred()) + Expect(chart).NotTo(BeNil()) + release, err := installAction.Run(chart, nil) + Expect(err).NotTo(HaveOccurred()) + Expect(release).NotTo(BeNil()) +}) + +var _ = AfterSuite(func() { + By("Uninstalling the Spark operator helm chart") + envSettings := cli.New() + envSettings.SetNamespace(ReleaseNamespace) + actionConfig := &action.Configuration{} + Expect(actionConfig.Init(envSettings.RESTClientGetter(), envSettings.Namespace(), os.Getenv("HELM_DRIVER"), func(format string, v ...interface{}) { + logf.Log.Info(fmt.Sprintf(format, v...)) + })).NotTo(HaveOccurred()) + uninstallAction := action.NewUninstall(actionConfig) + Expect(uninstallAction).NotTo(BeNil()) + uninstallAction.Wait = true + uninstallAction.Timeout = 5 * time.Minute + resp, err := uninstallAction.Run(ReleaseName) + Expect(err).To(BeNil()) + Expect(resp).NotTo(BeNil()) + + By("Deleting release namespace") + namespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ReleaseNamespace}} + Expect(k8sClient.Delete(context.TODO(), namespace)).NotTo(HaveOccurred()) + + By("Tearing down the test environment") + err = testEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) diff --git a/test/e2e/volume_mount_test.go b/test/e2e/volume_mount_test.go deleted file mode 100644 index 2bb78a5012..0000000000 --- a/test/e2e/volume_mount_test.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This integration test verifies that a volume can be successfully -// mounted in the driver and executor pods. - -package e2e - -import ( - "regexp" - "testing" - - "github.com/stretchr/testify/assert" - - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/kubectl/pkg/describe" - - appFramework "github.com/kubeflow/spark-operator/test/e2e/framework" -) - -type describeClient struct { - T *testing.T - Namespace string - Err error - kubernetes.Interface -} - -func TestMountConfigMap(t *testing.T) { - appName := "spark-pi" - - sa, err := appFramework.MakeSparkApplicationFromYaml("../../examples/spark-pi-configmap.yaml") - assert.Equal(t, nil, err) - - if appFramework.SparkTestNamespace != "" { - sa.ObjectMeta.Namespace = appFramework.SparkTestNamespace - } - - if appFramework.SparkTestServiceAccount != "" { - sa.Spec.Driver.ServiceAccount = &appFramework.SparkTestServiceAccount - } - - if appFramework.SparkTestImage != "" { - sa.Spec.Image = &appFramework.SparkTestImage - } - - _, err = appFramework.CreateConfigMap(framework.KubeClient, "dummy-cm", appFramework.SparkTestNamespace) - assert.Equal(t, nil, err) - - err = appFramework.CreateSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, sa) - assert.Equal(t, nil, err) - - status := GetJobStatus(t, appName) - err = wait.Poll(INTERVAL, TIMEOUT, func() (done bool, err error) { - if status == "RUNNING" { - return true, nil - } - status = GetJobStatus(t, appName) - return false, nil - }) - assert.Equal(t, nil, err) - - app, err := appFramework.GetSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - assert.Equal(t, nil, err) - podName := app.Status.DriverInfo.PodName - - describeClient := &describeClient{T: t, Namespace: appFramework.SparkTestNamespace, Interface: framework.KubeClient} - describer := describe.PodDescriber{Interface: describeClient} - - podDesc, err := describer.Describe(appFramework.SparkTestNamespace, podName, describe.DescriberSettings{ShowEvents: true}) - assert.Equal(t, nil, err) - - matched, err := regexp.MatchString(`dummy-cm`, podDesc) - assert.Equal(t, true, matched) - assert.Equal(t, nil, err) - - err = appFramework.DeleteSparkApplication(framework.SparkApplicationClient, appFramework.SparkTestNamespace, appName) - assert.Equal(t, nil, err) -} diff --git a/version.go b/version.go new file mode 100644 index 0000000000..e08232a516 --- /dev/null +++ b/version.go @@ -0,0 +1,90 @@ +/* +Copyright 2024 The Kubeflow authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sparkoperator + +import ( + "fmt" + "runtime" +) + +type VersionInfo struct { + Version string + BuildDate string + GitCommit string + GitTag string + GitTreeState string + GoVersion string + Compiler string + Platform string +} + +var ( + version = "0.0.0" // value from VERSION file + buildDate = "1970-01-01T00:00:00Z" // output from `date -u +'%Y-%m-%dT%H:%M:%SZ'` + gitCommit = "" // output from `git rev-parse HEAD` + gitTag = "" // output from `git describe --exact-match --tags HEAD` (if clean tree state) + gitTreeState = "" // determined from `git status --porcelain`. either 'clean' or 'dirty' +) + +func getVersion() VersionInfo { + var versionStr string + if gitCommit != "" && gitTag != "" && gitTreeState == "clean" { + // if we have a clean tree state and the current commit is tagged, + // this is an official release. + versionStr = gitTag + } else { + // otherwise formulate a query version string based on as much metadata + // information we have available. + versionStr = version + if len(gitCommit) >= 7 { + versionStr += "+" + gitCommit[0:7] + if gitTreeState != "clean" { + versionStr += ".dirty" + } + } else { + versionStr += "+unknown" + } + } + return VersionInfo{ + Version: versionStr, + BuildDate: buildDate, + GitCommit: gitCommit, + GitTag: gitTag, + GitTreeState: gitTreeState, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} + +// PrintVersion info directly by command +func PrintVersion(short bool) { + v := getVersion() + fmt.Printf("Spark Operator Version: %s\n", v.Version) + if short { + return + } + fmt.Printf("Build Date: %s\n", v.BuildDate) + fmt.Printf("Git Commit ID: %s\n", v.GitCommit) + if v.GitTag != "" { + fmt.Printf("Git Tag: %s\n", v.GitTag) + } + fmt.Printf("Git Tree State: %s\n", v.GitTreeState) + fmt.Printf("Go Version: %s\n", v.GoVersion) + fmt.Printf("Compiler: %s\n", v.Compiler) + fmt.Printf("Platform: %s\n", v.Platform) +} From 6ff204a6abd2f89172028ff5f2b3e0a1da8192ea Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Fri, 2 Aug 2024 05:27:06 +0800 Subject: [PATCH 85/87] Fix broken integration test CI (#2109) Signed-off-by: Yi Chen --- .github/workflows/integration.yaml | 32 ++++++++++++++++-------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml index be5200f9e2..2d927f9cac 100644 --- a/.github/workflows/integration.yaml +++ b/.github/workflows/integration.yaml @@ -98,7 +98,7 @@ jobs: run: | BRANCH="" if [ "${{ github.event_name }}" == "push" ]; then - BRANCH=${{ github.ref }} + BRANCH=${{ github.ref_name }} elif [ "${{ github.event_name }}" == "pull_request" ]; then BRANCH=${{ github.base_ref }} fi @@ -115,23 +115,9 @@ jobs: with: version: v3.14.3 - - name: Produce the helm documentation - run: | - make helm-docs - if ! git diff --quiet -- charts/spark-operator-chart/README.md; then - echo "Need to re-run 'make helm-docs' and commit the changes" - false - fi - - name: Set up chart-testing uses: helm/chart-testing-action@v2.6.1 - - name: Print chart-testing version information - run: ct version - - - name: Run chart-testing (lint) - run: ct lint --check-version-increment=false - - name: Run chart-testing (list-changed) id: list-changed env: @@ -142,10 +128,25 @@ jobs: echo "changed=true" >> "$GITHUB_OUTPUT" fi + - name: Run chart-testing (lint) + if: steps.list-changed.outputs.changed == 'true' + run: ct lint --check-version-increment=false --target-branch $BRANCH + - name: Detect CRDs drift between chart and manifest + if: steps.list-changed.outputs.changed == 'true' run: make detect-crds-drift + - name: Produce the helm documentation + if: steps.list-changed.outputs.changed == 'true' + run: | + make helm-docs + if ! git diff --quiet -- charts/spark-operator-chart/README.md; then + echo "Need to re-run 'make helm-docs' and commit the changes" + false + fi + - name: setup minikube + if: steps.list-changed.outputs.changed == 'true' uses: manusa/actions-setup-minikube@v2.11.0 with: minikube version: v1.33.0 @@ -154,6 +155,7 @@ jobs: github token: ${{ inputs.github-token }} - name: Run chart-testing (install) + if: steps.list-changed.outputs.changed == 'true' run: | docker build -t docker.io/kubeflow/spark-operator:local . minikube image load docker.io/kubeflow/spark-operator:local From e2693f19c5f06b8e1c4a100a605d1c80a987e140 Mon Sep 17 00:00:00 2001 From: Yi Chen Date: Fri, 2 Aug 2024 11:34:06 +0800 Subject: [PATCH 86/87] Fix CI: environment variable BRANCH is missed (#2111) Signed-off-by: Yi Chen --- .github/workflows/integration.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml index 2d927f9cac..9380dfb2d0 100644 --- a/.github/workflows/integration.yaml +++ b/.github/workflows/integration.yaml @@ -130,6 +130,8 @@ jobs: - name: Run chart-testing (lint) if: steps.list-changed.outputs.changed == 'true' + env: + BRANCH: ${{ steps.get_branch.outputs.BRANCH }} run: ct lint --check-version-increment=false --target-branch $BRANCH - name: Detect CRDs drift between chart and manifest From c42456aa2a98f34817a0784ef9ebe299f869ecc5 Mon Sep 17 00:00:00 2001 From: Sigmar Stefansson Date: Wed, 7 Aug 2024 07:26:04 +0000 Subject: [PATCH 87/87] libcap2 --- Dockerfile | 2 +- Dockerfile.rh | 1 - go.mod | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) delete mode 100644 Dockerfile.rh diff --git a/Dockerfile b/Dockerfile index 22489e304b..cf85dcd3cb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,7 +18,7 @@ ARG SPARK_IMAGE=spark:3.5.0 FROM golang:1.22.5 AS builder -RUN apk update && apk add --no-cache libcap +RUN apt update && apt install libcap2 && apt clean WORKDIR /workspace diff --git a/Dockerfile.rh b/Dockerfile.rh deleted file mode 100644 index 8b13789179..0000000000 --- a/Dockerfile.rh +++ /dev/null @@ -1 +0,0 @@ - diff --git a/go.mod b/go.mod index 72c1d25480..2f849a676d 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,7 @@ require ( go.uber.org/zap v1.27.0 gocloud.dev v0.37.0 golang.org/x/net v0.27.0 + golang.org/x/time v0.5.0 helm.sh/helm/v3 v3.15.3 k8s.io/api v0.30.2 k8s.io/apiextensions-apiserver v0.30.2 @@ -187,7 +188,6 @@ require ( golang.org/x/sys v0.22.0 // indirect golang.org/x/term v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect - golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.23.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
    (Optional) -

    ServiceLables is a map of key,value pairs of labels that might be added to the service object.

    +

    ServiceLabels is a map of key,value pairs of labels that might be added to the service object.