Add spark
This commit is contained in:
parent
e96d558fe1
commit
7c6e4e568e
38
spark/base/charts/spark-operator/.helmignore
Normal file
38
spark/base/charts/spark-operator/.helmignore
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
# Patterns to ignore when building packages.
|
||||||
|
# This supports shell glob matching, relative path matching, and
|
||||||
|
# negation (prefixed with !). Only one pattern per line.
|
||||||
|
|
||||||
|
ci/
|
||||||
|
|
||||||
|
# Common VCS dirs
|
||||||
|
.git/
|
||||||
|
.gitignore
|
||||||
|
.bzr/
|
||||||
|
.bzrignore
|
||||||
|
.hg/
|
||||||
|
.hgignore
|
||||||
|
.svn/
|
||||||
|
|
||||||
|
# Common backup files
|
||||||
|
*.swp
|
||||||
|
*.bak
|
||||||
|
*.tmp
|
||||||
|
*.orig
|
||||||
|
*~
|
||||||
|
|
||||||
|
# Various IDEs
|
||||||
|
.project
|
||||||
|
.idea/
|
||||||
|
*.tmproj
|
||||||
|
.vscode/
|
||||||
|
|
||||||
|
# MacOS
|
||||||
|
.DS_Store
|
||||||
|
|
||||||
|
# helm-unittest
|
||||||
|
./tests
|
||||||
|
.debug
|
||||||
|
__snapshot__
|
||||||
|
|
||||||
|
# helm-docs
|
||||||
|
README.md.gotmpl
|
11
spark/base/charts/spark-operator/Chart.yaml
Normal file
11
spark/base/charts/spark-operator/Chart.yaml
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
apiVersion: v2
|
||||||
|
appVersion: v1beta2-1.6.1-3.5.0
|
||||||
|
description: A Helm chart for Spark on Kubernetes operator
|
||||||
|
home: https://github.com/kubeflow/spark-operator
|
||||||
|
keywords:
|
||||||
|
- spark
|
||||||
|
maintainers:
|
||||||
|
- email: yuchaoran2011@gmail.com
|
||||||
|
name: yuchaoran2011
|
||||||
|
name: spark-operator
|
||||||
|
version: 1.4.2
|
146
spark/base/charts/spark-operator/README.md
Normal file
146
spark/base/charts/spark-operator/README.md
Normal file
|
@ -0,0 +1,146 @@
|
||||||
|
# spark-operator
|
||||||
|
|
||||||
|
![Version: 1.4.2](https://img.shields.io/badge/Version-1.4.2-informational?style=flat-square) ![AppVersion: v1beta2-1.6.1-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.1--3.5.0-informational?style=flat-square)
|
||||||
|
|
||||||
|
A Helm chart for Spark on Kubernetes operator
|
||||||
|
|
||||||
|
**Homepage:** <https://github.com/kubeflow/spark-operator>
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This chart bootstraps a [Kubernetes Operator for Apache Spark](https://github.com/kubeflow/spark-operator) deployment using the [Helm](https://helm.sh) package manager.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Helm >= 3
|
||||||
|
- Kubernetes >= 1.16
|
||||||
|
|
||||||
|
## Previous Helm Chart
|
||||||
|
|
||||||
|
The previous `spark-operator` Helm chart hosted at [helm/charts](https://github.com/helm/charts) has been moved to this repository in accordance with the [Deprecation timeline](https://github.com/helm/charts#deprecation-timeline). Note that a few things have changed between this version and the old version:
|
||||||
|
|
||||||
|
- This repository **only** supports Helm chart installations using Helm 3+ since the `apiVersion` on the chart has been marked as `v2`.
|
||||||
|
- Previous versions of the Helm chart have not been migrated, and the version has been set to `1.0.0` at the onset. If you are looking for old versions of the chart, it's best to run `helm pull incubator/sparkoperator --version <your-version>` until you are ready to move to this repository's version.
|
||||||
|
- Several configuration properties have been changed, carefully review the [values](#values) section below to make sure you're aligned with the new values.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Add Helm Repo
|
||||||
|
|
||||||
|
```shell
|
||||||
|
helm repo add spark-operator https://kubeflow.github.io/spark-operator
|
||||||
|
|
||||||
|
helm repo update
|
||||||
|
```
|
||||||
|
|
||||||
|
See [helm repo](https://helm.sh/docs/helm/helm_repo) for command documentation.
|
||||||
|
|
||||||
|
### Install the chart
|
||||||
|
|
||||||
|
```shell
|
||||||
|
helm install [RELEASE_NAME] spark-operator/spark-operator
|
||||||
|
```
|
||||||
|
|
||||||
|
For example, if you want to create a release with name `spark-operator` in the `default` namespace:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
helm install spark-operator spark-operator/spark-operator
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that `helm` will fail to install if the namespace doesn't exist. Either create the namespace beforehand or pass the `--create-namespace` flag to the `helm install` command.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
helm install spark-operator spark-operator/spark-operator \
|
||||||
|
--namespace spark-operator \
|
||||||
|
--create-namespace
|
||||||
|
```
|
||||||
|
|
||||||
|
See [helm install](https://helm.sh/docs/helm/helm_install) for command documentation.
|
||||||
|
|
||||||
|
### Upgrade the chart
|
||||||
|
|
||||||
|
```shell
|
||||||
|
helm upgrade [RELEASE_NAME] spark-operator/spark-operator [flags]
|
||||||
|
```
|
||||||
|
|
||||||
|
See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade) for command documentation.
|
||||||
|
|
||||||
|
### Uninstall the chart
|
||||||
|
|
||||||
|
```shell
|
||||||
|
helm uninstall [RELEASE_NAME]
|
||||||
|
```
|
||||||
|
|
||||||
|
This removes all the Kubernetes resources associated with the chart and deletes the release, except for the `crds`, those will have to be removed manually.
|
||||||
|
|
||||||
|
See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command documentation.
|
||||||
|
|
||||||
|
## Values
|
||||||
|
|
||||||
|
| Key | Type | Default | Description |
|
||||||
|
|-----|------|---------|-------------|
|
||||||
|
| affinity | object | `{}` | Affinity for pod assignment |
|
||||||
|
| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application |
|
||||||
|
| commonLabels | object | `{}` | Common labels to add to the resources |
|
||||||
|
| controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage |
|
||||||
|
| envFrom | list | `[]` | Pod environment variable sources |
|
||||||
|
| fullnameOverride | string | `""` | String to override release name |
|
||||||
|
| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
|
||||||
|
| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository |
|
||||||
|
| image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. |
|
||||||
|
| imagePullSecrets | list | `[]` | Image pull secrets |
|
||||||
|
| ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. |
|
||||||
|
| istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate |
|
||||||
|
| labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. |
|
||||||
|
| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. |
|
||||||
|
| leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace |
|
||||||
|
| logLevel | int | `2` | Set higher levels for more verbose logging |
|
||||||
|
| metrics.enable | bool | `true` | Enable prometheus metric scraping |
|
||||||
|
| metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint |
|
||||||
|
| metrics.port | int | `10254` | Metrics port |
|
||||||
|
| metrics.portName | string | `"metrics"` | Metrics port name |
|
||||||
|
| metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics |
|
||||||
|
| nameOverride | string | `""` | String to partially override `spark-operator.fullname` template (will maintain the release name) |
|
||||||
|
| nodeSelector | object | `{}` | Node labels for pod assignment |
|
||||||
|
| podAnnotations | object | `{}` | Additional annotations to add to the pod |
|
||||||
|
| podLabels | object | `{}` | Additional labels to add to the pod |
|
||||||
|
| podMonitor | object | `{"enable":false,"jobLabel":"spark-operator-podmonitor","labels":{},"podMetricsEndpoint":{"interval":"5s","scheme":"http"}}` | Prometheus pod monitor for operator's pod. |
|
||||||
|
| podMonitor.enable | bool | `false` | If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. |
|
||||||
|
| podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from |
|
||||||
|
| podMonitor.labels | object | `{}` | Pod monitor labels |
|
||||||
|
| podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port |
|
||||||
|
| podSecurityContext | object | `{}` | Pod security context |
|
||||||
|
| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. |
|
||||||
|
| rbac.annotations | object | `{}` | Optional annotations for rbac |
|
||||||
|
| rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` |
|
||||||
|
| rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources |
|
||||||
|
| rbac.createRole | bool | `true` | Create and use RBAC `Role` resources |
|
||||||
|
| replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 |
|
||||||
|
| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. |
|
||||||
|
| resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. |
|
||||||
|
| resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting |
|
||||||
|
| securityContext | object | `{}` | Operator container security context |
|
||||||
|
| serviceAccounts.spark.annotations | object | `{}` | Optional annotations for the spark service account |
|
||||||
|
| serviceAccounts.spark.create | bool | `true` | Create a service account for spark apps |
|
||||||
|
| serviceAccounts.spark.name | string | `""` | Optional name for the spark service account |
|
||||||
|
| serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account |
|
||||||
|
| serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator |
|
||||||
|
| serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account |
|
||||||
|
| sidecars | list | `[]` | Sidecar containers |
|
||||||
|
| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs |
|
||||||
|
| tolerations | list | `[]` | List of node taints to tolerate |
|
||||||
|
| uiService.enable | bool | `true` | Enable UI service creation for Spark application |
|
||||||
|
| volumeMounts | list | `[]` | |
|
||||||
|
| volumes | list | `[]` | |
|
||||||
|
| webhook.enable | bool | `false` | Enable webhook server |
|
||||||
|
| webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces |
|
||||||
|
| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects |
|
||||||
|
| webhook.port | int | `8080` | Webhook service port |
|
||||||
|
| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name |
|
||||||
|
| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade |
|
||||||
|
|
||||||
|
## Maintainers
|
||||||
|
|
||||||
|
| Name | Email | Url |
|
||||||
|
| ---- | ------ | --- |
|
||||||
|
| yuchaoran2011 | <yuchaoran2011@gmail.com> | |
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
79
spark/base/charts/spark-operator/templates/_helpers.tpl
Normal file
79
spark/base/charts/spark-operator/templates/_helpers.tpl
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
{{/* vim: set filetype=mustache: */}}
|
||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "spark-operator.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "spark-operator.fullname" -}}
|
||||||
|
{{- if .Values.fullnameOverride -}}
|
||||||
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||||
|
{{- if contains $name .Release.Name -}}
|
||||||
|
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- else -}}
|
||||||
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create chart name and version as used by the chart label.
|
||||||
|
*/}}
|
||||||
|
{{- define "spark-operator.chart" -}}
|
||||||
|
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Common labels
|
||||||
|
*/}}
|
||||||
|
{{- define "spark-operator.labels" -}}
|
||||||
|
helm.sh/chart: {{ include "spark-operator.chart" . }}
|
||||||
|
{{ include "spark-operator.selectorLabels" . }}
|
||||||
|
{{- if .Values.commonLabels }}
|
||||||
|
{{ toYaml .Values.commonLabels }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Chart.AppVersion }}
|
||||||
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
|
{{- end }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "spark-operator.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "spark-operator.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to be used by the operator
|
||||||
|
*/}}
|
||||||
|
{{- define "spark-operator.serviceAccountName" -}}
|
||||||
|
{{- if .Values.serviceAccounts.sparkoperator.create -}}
|
||||||
|
{{ default (include "spark-operator.fullname" .) .Values.serviceAccounts.sparkoperator.name }}
|
||||||
|
{{- else -}}
|
||||||
|
{{ default "default" .Values.serviceAccounts.sparkoperator.name }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to be used by spark apps
|
||||||
|
*/}}
|
||||||
|
{{- define "spark.serviceAccountName" -}}
|
||||||
|
{{- if .Values.serviceAccounts.spark.create -}}
|
||||||
|
{{- $sparkServiceaccount := printf "%s-%s" .Release.Name "spark" -}}
|
||||||
|
{{ default $sparkServiceaccount .Values.serviceAccounts.spark.name }}
|
||||||
|
{{- else -}}
|
||||||
|
{{ default "default" .Values.serviceAccounts.spark.name }}
|
||||||
|
{{- end -}}
|
||||||
|
{{- end -}}
|
||||||
|
|
140
spark/base/charts/spark-operator/templates/deployment.yaml
Normal file
140
spark/base/charts/spark-operator/templates/deployment.yaml
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
# If the admission webhook is enabled, then a post-install step is required
|
||||||
|
# to generate and install the secret in the operator namespace.
|
||||||
|
|
||||||
|
# In the post-install hook, the token corresponding to the operator service account
|
||||||
|
# is used to authenticate with the Kubernetes API server to install the secret bundle.
|
||||||
|
{{- $jobNamespaces := .Values.sparkJobNamespaces | default list }}
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: {{ include "spark-operator.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "spark-operator.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
replicas: {{ .Values.replicaCount }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "spark-operator.selectorLabels" . | nindent 6 }}
|
||||||
|
strategy:
|
||||||
|
type: Recreate
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
{{- if or .Values.podAnnotations .Values.metrics.enable }}
|
||||||
|
annotations:
|
||||||
|
{{- if .Values.metrics.enable }}
|
||||||
|
prometheus.io/scrape: "true"
|
||||||
|
prometheus.io/port: "{{ .Values.metrics.port }}"
|
||||||
|
prometheus.io/path: {{ .Values.metrics.endpoint }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.podAnnotations }}
|
||||||
|
{{- toYaml .Values.podAnnotations | trim | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
labels:
|
||||||
|
{{- include "spark-operator.selectorLabels" . | nindent 8 }}
|
||||||
|
{{- with .Values.podLabels }}
|
||||||
|
{{- toYaml . | trim | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
serviceAccountName: {{ include "spark-operator.serviceAccountName" . }}
|
||||||
|
{{- with .Values.imagePullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||||
|
containers:
|
||||||
|
- name: {{ .Chart.Name }}
|
||||||
|
image: {{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
{{- if gt (int .Values.replicaCount) 1 }}
|
||||||
|
env:
|
||||||
|
- name: POD_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
apiVersion: v1
|
||||||
|
fieldPath: metadata.name
|
||||||
|
{{- end }}
|
||||||
|
envFrom:
|
||||||
|
{{- toYaml .Values.envFrom | nindent 10 }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.securityContext | nindent 10 }}
|
||||||
|
{{- if or .Values.metrics.enable .Values.webhook.enable }}
|
||||||
|
ports:
|
||||||
|
{{ if .Values.metrics.enable -}}
|
||||||
|
- name: {{ .Values.metrics.portName | quote }}
|
||||||
|
containerPort: {{ .Values.metrics.port }}
|
||||||
|
{{- end }}
|
||||||
|
{{ if .Values.webhook.enable -}}
|
||||||
|
- name: {{ .Values.webhook.portName | quote }}
|
||||||
|
containerPort: {{ .Values.webhook.port }}
|
||||||
|
{{- end }}
|
||||||
|
{{ end -}}
|
||||||
|
args:
|
||||||
|
- -v={{ .Values.logLevel }}
|
||||||
|
- -logtostderr
|
||||||
|
{{- if eq (len $jobNamespaces) 1 }}
|
||||||
|
- -namespace={{ index $jobNamespaces 0 }}
|
||||||
|
{{- end }}
|
||||||
|
- -enable-ui-service={{ .Values.uiService.enable}}
|
||||||
|
- -ingress-url-format={{ .Values.ingressUrlFormat }}
|
||||||
|
- -controller-threads={{ .Values.controllerThreads }}
|
||||||
|
- -resync-interval={{ .Values.resyncInterval }}
|
||||||
|
- -enable-batch-scheduler={{ .Values.batchScheduler.enable }}
|
||||||
|
- -label-selector-filter={{ .Values.labelSelectorFilter }}
|
||||||
|
{{- if .Values.metrics.enable }}
|
||||||
|
- -enable-metrics=true
|
||||||
|
- -metrics-labels=app_type
|
||||||
|
- -metrics-port={{ .Values.metrics.port }}
|
||||||
|
- -metrics-endpoint={{ .Values.metrics.endpoint }}
|
||||||
|
- -metrics-prefix={{ .Values.metrics.prefix }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.webhook.enable }}
|
||||||
|
- -enable-webhook=true
|
||||||
|
- -webhook-secret-name={{ include "spark-operator.webhookSecretName" . }}
|
||||||
|
- -webhook-secret-namespace={{ .Release.Namespace }}
|
||||||
|
- -webhook-svc-name={{ include "spark-operator.webhookServiceName" . }}
|
||||||
|
- -webhook-svc-namespace={{ .Release.Namespace }}
|
||||||
|
- -webhook-config-name={{ include "spark-operator.fullname" . }}-webhook-config
|
||||||
|
- -webhook-port={{ .Values.webhook.port }}
|
||||||
|
- -webhook-timeout={{ .Values.webhook.timeout }}
|
||||||
|
- -webhook-namespace-selector={{ .Values.webhook.namespaceSelector }}
|
||||||
|
- -webhook-object-selector={{ .Values.webhook.objectSelector }}
|
||||||
|
{{- end }}
|
||||||
|
- -enable-resource-quota-enforcement={{ .Values.resourceQuotaEnforcement.enable }}
|
||||||
|
{{- if gt (int .Values.replicaCount) 1 }}
|
||||||
|
- -leader-election=true
|
||||||
|
- -leader-election-lock-namespace={{ default .Release.Namespace .Values.leaderElection.lockNamespace }}
|
||||||
|
- -leader-election-lock-name={{ .Values.leaderElection.lockName }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.resources }}
|
||||||
|
resources:
|
||||||
|
{{- toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.volumeMounts }}
|
||||||
|
volumeMounts:
|
||||||
|
{{- toYaml . | nindent 10 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.sidecars }}
|
||||||
|
{{- toYaml . | nindent 6 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.volumes }}
|
||||||
|
volumes:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.priorityClassName }}
|
||||||
|
priorityClassName: {{ .Values.priorityClassName }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
|
@ -0,0 +1,19 @@
|
||||||
|
{{ if and .Values.metrics.enable .Values.podMonitor.enable }}
|
||||||
|
apiVersion: monitoring.coreos.com/v1
|
||||||
|
kind: PodMonitor
|
||||||
|
metadata:
|
||||||
|
name: {{ include "spark-operator.name" . -}}-podmonitor
|
||||||
|
labels: {{ toYaml .Values.podMonitor.labels | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
podMetricsEndpoints:
|
||||||
|
- interval: {{ .Values.podMonitor.podMetricsEndpoint.interval }}
|
||||||
|
port: {{ .Values.metrics.portName | quote }}
|
||||||
|
scheme: {{ .Values.podMonitor.podMetricsEndpoint.scheme }}
|
||||||
|
jobLabel: {{ .Values.podMonitor.jobLabel }}
|
||||||
|
namespaceSelector:
|
||||||
|
matchNames:
|
||||||
|
- {{ .Release.Namespace }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "spark-operator.selectorLabels" . | nindent 6 }}
|
||||||
|
{{ end }}
|
148
spark/base/charts/spark-operator/templates/rbac.yaml
Normal file
148
spark/base/charts/spark-operator/templates/rbac.yaml
Normal file
|
@ -0,0 +1,148 @@
|
||||||
|
{{- if or .Values.rbac.create .Values.rbac.createClusterRole -}}
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: {{ include "spark-operator.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "spark-operator.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.rbac.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- persistentvolumeclaims
|
||||||
|
verbs:
|
||||||
|
- "*"
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- services
|
||||||
|
- configmaps
|
||||||
|
- secrets
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- get
|
||||||
|
- delete
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- apiGroups:
|
||||||
|
- extensions
|
||||||
|
- networking.k8s.io
|
||||||
|
resources:
|
||||||
|
- ingresses
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- get
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- nodes
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- events
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- resourcequotas
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- apiextensions.k8s.io
|
||||||
|
resources:
|
||||||
|
- customresourcedefinitions
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- apiGroups:
|
||||||
|
- admissionregistration.k8s.io
|
||||||
|
resources:
|
||||||
|
- mutatingwebhookconfigurations
|
||||||
|
- validatingwebhookconfigurations
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- get
|
||||||
|
- update
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- sparkoperator.k8s.io
|
||||||
|
resources:
|
||||||
|
- sparkapplications
|
||||||
|
- sparkapplications/status
|
||||||
|
- sparkapplications/finalizers
|
||||||
|
- scheduledsparkapplications
|
||||||
|
- scheduledsparkapplications/status
|
||||||
|
- scheduledsparkapplications/finalizers
|
||||||
|
verbs:
|
||||||
|
- "*"
|
||||||
|
{{- if .Values.batchScheduler.enable }}
|
||||||
|
# required for the `volcano` batch scheduler
|
||||||
|
- apiGroups:
|
||||||
|
- scheduling.incubator.k8s.io
|
||||||
|
- scheduling.sigs.dev
|
||||||
|
- scheduling.volcano.sh
|
||||||
|
resources:
|
||||||
|
- podgroups
|
||||||
|
verbs:
|
||||||
|
- "*"
|
||||||
|
{{- end }}
|
||||||
|
{{ if .Values.webhook.enable }}
|
||||||
|
- apiGroups:
|
||||||
|
- batch
|
||||||
|
resources:
|
||||||
|
- jobs
|
||||||
|
verbs:
|
||||||
|
- delete
|
||||||
|
{{- end }}
|
||||||
|
{{- if gt (int .Values.replicaCount) 1 }}
|
||||||
|
- apiGroups:
|
||||||
|
- coordination.k8s.io
|
||||||
|
resources:
|
||||||
|
- leases
|
||||||
|
resourceNames:
|
||||||
|
- {{ .Values.leaderElection.lockName }}
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- update
|
||||||
|
- patch
|
||||||
|
- delete
|
||||||
|
- apiGroups:
|
||||||
|
- coordination.k8s.io
|
||||||
|
resources:
|
||||||
|
- leases
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
{{- end }}
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: {{ include "spark-operator.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "spark-operator.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.rbac.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "spark-operator.serviceAccountName" . }}
|
||||||
|
namespace: {{ .Release.Namespace }}
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: {{ include "spark-operator.fullname" . }}
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
{{- end }}
|
|
@ -0,0 +1,12 @@
|
||||||
|
{{- if .Values.serviceAccounts.sparkoperator.create }}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ include "spark-operator.serviceAccountName" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "spark-operator.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.serviceAccounts.sparkoperator.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
39
spark/base/charts/spark-operator/templates/spark-rbac.yaml
Normal file
39
spark/base/charts/spark-operator/templates/spark-rbac.yaml
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
{{- if or .Values.rbac.create .Values.rbac.createRole }}
|
||||||
|
{{- $jobNamespaces := .Values.sparkJobNamespaces | default list }}
|
||||||
|
{{- range $jobNamespace := $jobNamespaces }}
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
metadata:
|
||||||
|
name: spark-role
|
||||||
|
namespace: {{ $jobNamespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "spark-operator.labels" $ | nindent 4 }}
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- services
|
||||||
|
- configmaps
|
||||||
|
- persistentvolumeclaims
|
||||||
|
verbs:
|
||||||
|
- "*"
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
metadata:
|
||||||
|
name: spark
|
||||||
|
namespace: {{ $jobNamespace }}
|
||||||
|
labels:
|
||||||
|
{{- include "spark-operator.labels" $ | nindent 4 }}
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: {{ include "spark.serviceAccountName" $ }}
|
||||||
|
namespace: {{ $jobNamespace }}
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: spark-role
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
|
@ -0,0 +1,14 @@
|
||||||
|
{{- if .Values.serviceAccounts.spark.create }}
|
||||||
|
{{- range $sparkJobNamespace := .Values.sparkJobNamespaces | default (list .Release.Namespace) }}
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ include "spark.serviceAccountName" $ }}
|
||||||
|
namespace: {{ $sparkJobNamespace }}
|
||||||
|
{{- with $.Values.serviceAccounts.spark.annotations }}
|
||||||
|
annotations: {{ toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
labels: {{ include "spark-operator.labels" $ | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
|
@ -0,0 +1,14 @@
|
||||||
|
{{/*
|
||||||
|
Create the name of the secret to be used by webhook
|
||||||
|
*/}}
|
||||||
|
{{- define "spark-operator.webhookSecretName" -}}
|
||||||
|
{{ include "spark-operator.fullname" . }}-webhook-certs
|
||||||
|
{{- end -}}
|
||||||
|
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service to be used by webhook
|
||||||
|
*/}}
|
||||||
|
{{- define "spark-operator.webhookServiceName" -}}
|
||||||
|
{{ include "spark-operator.fullname" . }}-webhook-svc
|
||||||
|
{{- end -}}
|
|
@ -0,0 +1,13 @@
|
||||||
|
{{- if .Values.webhook.enable -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: {{ include "spark-operator.webhookSecretName" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "spark-operator.labels" . | nindent 4 }}
|
||||||
|
data:
|
||||||
|
ca-key.pem: ""
|
||||||
|
ca-cert.pem: ""
|
||||||
|
server-key.pem: ""
|
||||||
|
server-cert.pem: ""
|
||||||
|
{{- end }}
|
|
@ -0,0 +1,15 @@
|
||||||
|
{{- if .Values.webhook.enable -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "spark-operator.webhookServiceName" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "spark-operator.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
{{- include "spark-operator.selectorLabels" . | nindent 4 }}
|
||||||
|
ports:
|
||||||
|
- port: 443
|
||||||
|
targetPort: {{ .Values.webhook.portName | quote }}
|
||||||
|
name: {{ .Values.webhook.portName }}
|
||||||
|
{{- end }}
|
301
spark/base/charts/spark-operator/tests/deployment_test.yaml
Normal file
301
spark/base/charts/spark-operator/tests/deployment_test.yaml
Normal file
|
@ -0,0 +1,301 @@
|
||||||
|
suite: Test spark operator deployment
|
||||||
|
|
||||||
|
templates:
|
||||||
|
- deployment.yaml
|
||||||
|
|
||||||
|
release:
|
||||||
|
name: spark-operator
|
||||||
|
|
||||||
|
tests:
|
||||||
|
- it: Should contain namespace arg when sparkJobNamespaces is equal to 1
|
||||||
|
set:
|
||||||
|
sparkJobNamespaces:
|
||||||
|
- ns1
|
||||||
|
asserts:
|
||||||
|
- contains:
|
||||||
|
path: spec.template.spec.containers[0].args
|
||||||
|
content: -namespace=ns1
|
||||||
|
|
||||||
|
- it: Should add pod annotations if podAnnotations is set
|
||||||
|
set:
|
||||||
|
podAnnotations:
|
||||||
|
key1: value1
|
||||||
|
key2: value2
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: spec.template.metadata.annotations.key1
|
||||||
|
value: value1
|
||||||
|
- equal:
|
||||||
|
path: spec.template.metadata.annotations.key2
|
||||||
|
value: value2
|
||||||
|
|
||||||
|
- it: Should add prometheus annotations if metrics.enable is true
|
||||||
|
set:
|
||||||
|
metrics:
|
||||||
|
enable: true
|
||||||
|
port: 10254
|
||||||
|
endpoint: /metrics
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: spec.template.metadata.annotations["prometheus.io/scrape"]
|
||||||
|
value: "true"
|
||||||
|
- equal:
|
||||||
|
path: spec.template.metadata.annotations["prometheus.io/port"]
|
||||||
|
value: "10254"
|
||||||
|
- equal:
|
||||||
|
path: spec.template.metadata.annotations["prometheus.io/path"]
|
||||||
|
value: /metrics
|
||||||
|
|
||||||
|
- it: Should add secrets if imagePullSecrets is set
|
||||||
|
set:
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: test-secret1
|
||||||
|
- name: test-secret2
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.imagePullSecrets[0].name
|
||||||
|
value: test-secret1
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.imagePullSecrets[1].name
|
||||||
|
value: test-secret2
|
||||||
|
|
||||||
|
- it: Should add pod securityContext if podSecurityContext is set
|
||||||
|
set:
|
||||||
|
podSecurityContext:
|
||||||
|
runAsUser: 1000
|
||||||
|
runAsGroup: 2000
|
||||||
|
fsGroup: 3000
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.securityContext.runAsUser
|
||||||
|
value: 1000
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.securityContext.runAsGroup
|
||||||
|
value: 2000
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.securityContext.fsGroup
|
||||||
|
value: 3000
|
||||||
|
|
||||||
|
- it: Should use the specified image repository if image.repository and image.tag is set
|
||||||
|
set:
|
||||||
|
image:
|
||||||
|
repository: test-repository
|
||||||
|
tag: test-tag
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.containers[0].image
|
||||||
|
value: test-repository:test-tag
|
||||||
|
|
||||||
|
- it: Should use the specified image pull policy if image.pullPolicy is set
|
||||||
|
set:
|
||||||
|
image:
|
||||||
|
pullPolicy: Always
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.containers[0].imagePullPolicy
|
||||||
|
value: Always
|
||||||
|
|
||||||
|
- it: Should add container securityContext if securityContext is set
|
||||||
|
set:
|
||||||
|
securityContext:
|
||||||
|
runAsUser: 1000
|
||||||
|
runAsGroup: 2000
|
||||||
|
fsGroup: 3000
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.containers[0].securityContext.runAsUser
|
||||||
|
value: 1000
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.containers[0].securityContext.runAsGroup
|
||||||
|
value: 2000
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.containers[0].securityContext.fsGroup
|
||||||
|
value: 3000
|
||||||
|
|
||||||
|
- it: Should add metric ports if metrics.enable is true
|
||||||
|
set:
|
||||||
|
metrics:
|
||||||
|
enable: true
|
||||||
|
port: 10254
|
||||||
|
portName: metrics
|
||||||
|
asserts:
|
||||||
|
- contains:
|
||||||
|
path: spec.template.spec.containers[0].ports
|
||||||
|
content:
|
||||||
|
name: metrics
|
||||||
|
containerPort: 10254
|
||||||
|
count: 1
|
||||||
|
|
||||||
|
- it: Should add webhook ports if webhook.enable is true
|
||||||
|
set:
|
||||||
|
webhook:
|
||||||
|
enable: true
|
||||||
|
port: 8080
|
||||||
|
portName: webhook
|
||||||
|
asserts:
|
||||||
|
- contains:
|
||||||
|
path: spec.template.spec.containers[0].ports
|
||||||
|
content:
|
||||||
|
name: webhook
|
||||||
|
containerPort: 8080
|
||||||
|
count: 1
|
||||||
|
|
||||||
|
- it: Should add resources if resources is set
|
||||||
|
set:
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
memory: "64Mi"
|
||||||
|
cpu: "250m"
|
||||||
|
limits:
|
||||||
|
memory: "128Mi"
|
||||||
|
cpu: "500m"
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.containers[0].resources
|
||||||
|
value:
|
||||||
|
requests:
|
||||||
|
memory: "64Mi"
|
||||||
|
cpu: "250m"
|
||||||
|
limits:
|
||||||
|
memory: "128Mi"
|
||||||
|
cpu: "500m"
|
||||||
|
|
||||||
|
- it: Should add sidecars if sidecars is set
|
||||||
|
set:
|
||||||
|
sidecars:
|
||||||
|
- name: sidecar1
|
||||||
|
image: sidecar-image1
|
||||||
|
- name: sidecar2
|
||||||
|
image: sidecar-image2
|
||||||
|
asserts:
|
||||||
|
- contains:
|
||||||
|
path: spec.template.spec.containers
|
||||||
|
content:
|
||||||
|
name: sidecar1
|
||||||
|
image: sidecar-image1
|
||||||
|
count: 1
|
||||||
|
- contains:
|
||||||
|
path: spec.template.spec.containers
|
||||||
|
content:
|
||||||
|
name: sidecar2
|
||||||
|
image: sidecar-image2
|
||||||
|
count: 1
|
||||||
|
|
||||||
|
- it: Should add volumes if volumes is set
|
||||||
|
set:
|
||||||
|
volumes:
|
||||||
|
- name: volume1
|
||||||
|
emptyDir: {}
|
||||||
|
- name: volume2
|
||||||
|
emptyDir: {}
|
||||||
|
asserts:
|
||||||
|
- contains:
|
||||||
|
path: spec.template.spec.volumes
|
||||||
|
content:
|
||||||
|
name: volume1
|
||||||
|
emptyDir: {}
|
||||||
|
count: 1
|
||||||
|
- contains:
|
||||||
|
path: spec.template.spec.volumes
|
||||||
|
content:
|
||||||
|
name: volume2
|
||||||
|
emptyDir: {}
|
||||||
|
count: 1
|
||||||
|
|
||||||
|
- it: Should add volume mounts if volumeMounts is set
|
||||||
|
set:
|
||||||
|
volumeMounts:
|
||||||
|
- name: volume1
|
||||||
|
mountPath: /volume1
|
||||||
|
- name: volume2
|
||||||
|
mountPath: /volume2
|
||||||
|
asserts:
|
||||||
|
- contains:
|
||||||
|
path: spec.template.spec.containers[0].volumeMounts
|
||||||
|
content:
|
||||||
|
name: volume1
|
||||||
|
mountPath: /volume1
|
||||||
|
count: 1
|
||||||
|
- contains:
|
||||||
|
path: spec.template.spec.containers[0].volumeMounts
|
||||||
|
content:
|
||||||
|
name: volume2
|
||||||
|
mountPath: /volume2
|
||||||
|
count: 1
|
||||||
|
|
||||||
|
- it: Should add nodeSelector if nodeSelector is set
|
||||||
|
set:
|
||||||
|
nodeSelector:
|
||||||
|
key1: value1
|
||||||
|
key2: value2
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.nodeSelector.key1
|
||||||
|
value: value1
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.nodeSelector.key2
|
||||||
|
value: value2
|
||||||
|
|
||||||
|
- it: Should add affinity if affinity is set
|
||||||
|
set:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: topology.kubernetes.io/zone
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- antarctica-east1
|
||||||
|
- antarctica-west1
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 1
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: another-node-label-key
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- another-node-label-value
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.affinity
|
||||||
|
value:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: topology.kubernetes.io/zone
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- antarctica-east1
|
||||||
|
- antarctica-west1
|
||||||
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- weight: 1
|
||||||
|
preference:
|
||||||
|
matchExpressions:
|
||||||
|
- key: another-node-label-key
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- another-node-label-value
|
||||||
|
|
||||||
|
- it: Should add tolerations if tolerations is set
|
||||||
|
set:
|
||||||
|
tolerations:
|
||||||
|
- key: key1
|
||||||
|
operator: Equal
|
||||||
|
value: value1
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: key2
|
||||||
|
operator: Exists
|
||||||
|
effect: NoSchedule
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: spec.template.spec.tolerations
|
||||||
|
value:
|
||||||
|
- key: key1
|
||||||
|
operator: Equal
|
||||||
|
value: value1
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: key2
|
||||||
|
operator: Exists
|
||||||
|
effect: NoSchedule
|
90
spark/base/charts/spark-operator/tests/rbac_test.yaml
Normal file
90
spark/base/charts/spark-operator/tests/rbac_test.yaml
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
suite: Test spark operator rbac
|
||||||
|
|
||||||
|
templates:
|
||||||
|
- rbac.yaml
|
||||||
|
|
||||||
|
release:
|
||||||
|
name: spark-operator
|
||||||
|
|
||||||
|
tests:
|
||||||
|
- it: Should not render spark operator rbac resources if rbac.create is false and rbac.createClusterRole is false
|
||||||
|
set:
|
||||||
|
rbac:
|
||||||
|
create: false
|
||||||
|
createClusterRole: false
|
||||||
|
asserts:
|
||||||
|
- hasDocuments:
|
||||||
|
count: 0
|
||||||
|
|
||||||
|
- it: Should render spark operator cluster role if rbac.create is true
|
||||||
|
set:
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
documentIndex: 0
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
name: spark-operator
|
||||||
|
|
||||||
|
- it: Should render spark operator cluster role if rbac.createClusterRole is true
|
||||||
|
set:
|
||||||
|
rbac:
|
||||||
|
createClusterRole: true
|
||||||
|
documentIndex: 0
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
name: spark-operator
|
||||||
|
|
||||||
|
- it: Should render spark operator cluster role binding if rbac.create is true
|
||||||
|
set:
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
documentIndex: 1
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
name: spark-operator
|
||||||
|
|
||||||
|
- it: Should render spark operator cluster role binding correctly if rbac.createClusterRole is true
|
||||||
|
set:
|
||||||
|
rbac:
|
||||||
|
createClusterRole: true
|
||||||
|
release:
|
||||||
|
documentIndex: 1
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
name: spark-operator
|
||||||
|
- contains:
|
||||||
|
path: subjects
|
||||||
|
content:
|
||||||
|
kind: ServiceAccount
|
||||||
|
name: spark-operator
|
||||||
|
namespace: NAMESPACE
|
||||||
|
count: 1
|
||||||
|
- equal:
|
||||||
|
path: roleRef
|
||||||
|
value:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: spark-operator
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|
||||||
|
- it: Should add extra annotations to spark operator cluster role if rbac.annotations is set
|
||||||
|
set:
|
||||||
|
rbac:
|
||||||
|
annotations:
|
||||||
|
key1: value1
|
||||||
|
key2: value2
|
||||||
|
documentIndex: 0
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: metadata.annotations.key1
|
||||||
|
value: value1
|
||||||
|
- equal:
|
||||||
|
path: metadata.annotations.key2
|
||||||
|
value: value2
|
|
@ -0,0 +1,54 @@
|
||||||
|
suite: Test spark operator service account
|
||||||
|
|
||||||
|
templates:
|
||||||
|
- serviceaccount.yaml
|
||||||
|
|
||||||
|
release:
|
||||||
|
name: spark-operator
|
||||||
|
|
||||||
|
tests:
|
||||||
|
- it: Should not render service account if serviceAccounts.sparkoperator.create is false
|
||||||
|
set:
|
||||||
|
serviceAccounts:
|
||||||
|
sparkoperator:
|
||||||
|
create: false
|
||||||
|
asserts:
|
||||||
|
- hasDocuments:
|
||||||
|
count: 0
|
||||||
|
|
||||||
|
- it: Should render service account if serviceAccounts.sparkoperator.create is true
|
||||||
|
set:
|
||||||
|
serviceAccounts:
|
||||||
|
sparkoperator:
|
||||||
|
create: true
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
name: spark-operator
|
||||||
|
|
||||||
|
- it: Should use the specified service account name if serviceAccounts.sparkoperator.name is set
|
||||||
|
set:
|
||||||
|
serviceAccounts:
|
||||||
|
sparkoperator:
|
||||||
|
name: custom-service-account
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
name: custom-service-account
|
||||||
|
|
||||||
|
- it: Should add extra annotations if serviceAccounts.sparkoperator.annotations is set
|
||||||
|
set:
|
||||||
|
serviceAccounts:
|
||||||
|
sparkoperator:
|
||||||
|
annotations:
|
||||||
|
key1: value1
|
||||||
|
key2: value2
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: metadata.annotations.key1
|
||||||
|
value: value1
|
||||||
|
- equal:
|
||||||
|
path: metadata.annotations.key2
|
||||||
|
value: value2
|
133
spark/base/charts/spark-operator/tests/spark-rbac_test.yaml
Normal file
133
spark/base/charts/spark-operator/tests/spark-rbac_test.yaml
Normal file
|
@ -0,0 +1,133 @@
|
||||||
|
suite: Test spark rbac
|
||||||
|
|
||||||
|
templates:
|
||||||
|
- spark-rbac.yaml
|
||||||
|
|
||||||
|
release:
|
||||||
|
name: spark-operator
|
||||||
|
|
||||||
|
tests:
|
||||||
|
- it: Should not render spark rbac resources if rbac.create is false and rbac.createRole is false
|
||||||
|
set:
|
||||||
|
rbac:
|
||||||
|
create: false
|
||||||
|
createRole: false
|
||||||
|
asserts:
|
||||||
|
- hasDocuments:
|
||||||
|
count: 0
|
||||||
|
|
||||||
|
- it: Should render spark role if rbac.create is true
|
||||||
|
set:
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
documentIndex: 0
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
name: spark-role
|
||||||
|
|
||||||
|
- it: Should render spark role if rbac.createRole is true
|
||||||
|
set:
|
||||||
|
rbac:
|
||||||
|
createRole: true
|
||||||
|
documentIndex: 0
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
name: spark-role
|
||||||
|
|
||||||
|
- it: Should render spark role binding if rbac.create is true
|
||||||
|
set:
|
||||||
|
rbac:
|
||||||
|
create: true
|
||||||
|
documentIndex: 1
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
name: spark
|
||||||
|
|
||||||
|
- it: Should render spark role binding if rbac.createRole is true
|
||||||
|
set:
|
||||||
|
rbac:
|
||||||
|
createRole: true
|
||||||
|
documentIndex: 1
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
name: spark
|
||||||
|
|
||||||
|
- it: Should create a single spark role with namespace "" by default
|
||||||
|
documentIndex: 0
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
name: spark-role
|
||||||
|
namespace: ""
|
||||||
|
|
||||||
|
- it: Should create a single spark role binding with namespace "" by default
|
||||||
|
values:
|
||||||
|
- ../values.yaml
|
||||||
|
documentIndex: 1
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
name: spark
|
||||||
|
namespace: ""
|
||||||
|
|
||||||
|
- it: Should render multiple spark roles if sparkJobNamespaces is set with multiple values
|
||||||
|
set:
|
||||||
|
sparkJobNamespaces:
|
||||||
|
- ns1
|
||||||
|
- ns2
|
||||||
|
documentIndex: 0
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
name: spark-role
|
||||||
|
namespace: ns1
|
||||||
|
|
||||||
|
- it: Should render multiple spark role bindings if sparkJobNamespaces is set with multiple values
|
||||||
|
set:
|
||||||
|
sparkJobNamespaces:
|
||||||
|
- ns1
|
||||||
|
- ns2
|
||||||
|
documentIndex: 1
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
name: spark
|
||||||
|
namespace: ns1
|
||||||
|
|
||||||
|
- it: Should render multiple spark roles if sparkJobNamespaces is set with multiple values
|
||||||
|
set:
|
||||||
|
sparkJobNamespaces:
|
||||||
|
- ns1
|
||||||
|
- ns2
|
||||||
|
documentIndex: 2
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: Role
|
||||||
|
name: spark-role
|
||||||
|
namespace: ns2
|
||||||
|
|
||||||
|
- it: Should render multiple spark role bindings if sparkJobNamespaces is set with multiple values
|
||||||
|
set:
|
||||||
|
sparkJobNamespaces:
|
||||||
|
- ns1
|
||||||
|
- ns2
|
||||||
|
documentIndex: 3
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: RoleBinding
|
||||||
|
name: spark
|
||||||
|
namespace: ns2
|
|
@ -0,0 +1,112 @@
|
||||||
|
suite: Test spark service account
|
||||||
|
|
||||||
|
templates:
|
||||||
|
- spark-serviceaccount.yaml
|
||||||
|
|
||||||
|
release:
|
||||||
|
name: spark-operator
|
||||||
|
|
||||||
|
tests:
|
||||||
|
- it: Should not render service account if serviceAccounts.spark.create is false
|
||||||
|
set:
|
||||||
|
serviceAccounts:
|
||||||
|
spark:
|
||||||
|
create: false
|
||||||
|
asserts:
|
||||||
|
- hasDocuments:
|
||||||
|
count: 0
|
||||||
|
|
||||||
|
- it: Should render service account if serviceAccounts.spark.create is true
|
||||||
|
set:
|
||||||
|
serviceAccounts:
|
||||||
|
spark:
|
||||||
|
create: true
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
name: spark-operator-spark
|
||||||
|
|
||||||
|
- it: Should use the specified service account name if serviceAccounts.spark.name is set
|
||||||
|
set:
|
||||||
|
serviceAccounts:
|
||||||
|
spark:
|
||||||
|
name: spark
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
name: spark
|
||||||
|
|
||||||
|
- it: Should add extra annotations if serviceAccounts.spark.annotations is set
|
||||||
|
set:
|
||||||
|
serviceAccounts:
|
||||||
|
spark:
|
||||||
|
annotations:
|
||||||
|
key1: value1
|
||||||
|
key2: value2
|
||||||
|
asserts:
|
||||||
|
- equal:
|
||||||
|
path: metadata.annotations.key1
|
||||||
|
value: value1
|
||||||
|
- equal:
|
||||||
|
path: metadata.annotations.key2
|
||||||
|
value: value2
|
||||||
|
|
||||||
|
- it: Should create multiple service accounts if sparkJobNamespaces is set
|
||||||
|
set:
|
||||||
|
serviceAccounts:
|
||||||
|
spark:
|
||||||
|
name: spark
|
||||||
|
sparkJobNamespaces:
|
||||||
|
- ns1
|
||||||
|
- ns2
|
||||||
|
- ns3
|
||||||
|
documentIndex: 0
|
||||||
|
asserts:
|
||||||
|
- hasDocuments:
|
||||||
|
count: 3
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
name: spark
|
||||||
|
namespace: ns1
|
||||||
|
|
||||||
|
|
||||||
|
- it: Should create multiple service accounts if sparkJobNamespaces is set
|
||||||
|
set:
|
||||||
|
serviceAccounts:
|
||||||
|
spark:
|
||||||
|
name: spark
|
||||||
|
sparkJobNamespaces:
|
||||||
|
- ns1
|
||||||
|
- ns2
|
||||||
|
- ns3
|
||||||
|
documentIndex: 1
|
||||||
|
asserts:
|
||||||
|
- hasDocuments:
|
||||||
|
count: 3
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
name: spark
|
||||||
|
namespace: ns2
|
||||||
|
|
||||||
|
- it: Should create multiple service accounts if sparkJobNamespaces is set
|
||||||
|
set:
|
||||||
|
serviceAccounts:
|
||||||
|
spark:
|
||||||
|
name: spark
|
||||||
|
sparkJobNamespaces:
|
||||||
|
- ns1
|
||||||
|
- ns2
|
||||||
|
- ns3
|
||||||
|
documentIndex: 2
|
||||||
|
asserts:
|
||||||
|
- hasDocuments:
|
||||||
|
count: 3
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
name: spark
|
||||||
|
namespace: ns3
|
|
@ -0,0 +1,31 @@
|
||||||
|
suite: Test spark operator webhook secret
|
||||||
|
|
||||||
|
templates:
|
||||||
|
- webhook/secret.yaml
|
||||||
|
|
||||||
|
release:
|
||||||
|
name: spark-operator
|
||||||
|
namespace: spark-operator
|
||||||
|
|
||||||
|
tests:
|
||||||
|
- it: Should not render the webhook secret if webhook.enable is false
|
||||||
|
asserts:
|
||||||
|
- hasDocuments:
|
||||||
|
count: 0
|
||||||
|
|
||||||
|
- it: Should render the webhook secret with empty data fields
|
||||||
|
set:
|
||||||
|
webhook:
|
||||||
|
enable: true
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
name: spark-operator-webhook-certs
|
||||||
|
- equal:
|
||||||
|
path: data
|
||||||
|
value:
|
||||||
|
ca-key.pem: ""
|
||||||
|
ca-cert.pem: ""
|
||||||
|
server-key.pem: ""
|
||||||
|
server-cert.pem: ""
|
|
@ -0,0 +1,33 @@
|
||||||
|
suite: Test spark operator webhook service
|
||||||
|
|
||||||
|
templates:
|
||||||
|
- webhook/service.yaml
|
||||||
|
|
||||||
|
release:
|
||||||
|
name: spark-operator
|
||||||
|
|
||||||
|
tests:
|
||||||
|
- it: Should not render the webhook service if webhook.enable is false
|
||||||
|
set:
|
||||||
|
webhook:
|
||||||
|
enable: false
|
||||||
|
asserts:
|
||||||
|
- hasDocuments:
|
||||||
|
count: 0
|
||||||
|
|
||||||
|
- it: Should render the webhook service correctly if webhook.enable is true
|
||||||
|
set:
|
||||||
|
webhook:
|
||||||
|
enable: true
|
||||||
|
portName: webhook
|
||||||
|
asserts:
|
||||||
|
- containsDocument:
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
name: spark-operator-webhook-svc
|
||||||
|
- equal:
|
||||||
|
path: spec.ports[0]
|
||||||
|
value:
|
||||||
|
port: 443
|
||||||
|
targetPort: webhook
|
||||||
|
name: webhook
|
189
spark/base/charts/spark-operator/values.yaml
Normal file
189
spark/base/charts/spark-operator/values.yaml
Normal file
|
@ -0,0 +1,189 @@
|
||||||
|
# Default values for spark-operator.
|
||||||
|
# This is a YAML-formatted file.
|
||||||
|
# Declare variables to be passed into your templates.
|
||||||
|
|
||||||
|
# -- Common labels to add to the resources
|
||||||
|
commonLabels: {}
|
||||||
|
|
||||||
|
# replicaCount -- Desired number of pods, leaderElection will be enabled
|
||||||
|
# if this is greater than 1
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
image:
|
||||||
|
# -- Image repository
|
||||||
|
repository: docker.io/kubeflow/spark-operator
|
||||||
|
# -- Image pull policy
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
# -- if set, override the image tag whose default is the chart appVersion.
|
||||||
|
tag: ""
|
||||||
|
|
||||||
|
# -- Image pull secrets
|
||||||
|
imagePullSecrets: []
|
||||||
|
|
||||||
|
# -- String to partially override `spark-operator.fullname` template (will maintain the release name)
|
||||||
|
nameOverride: ""
|
||||||
|
|
||||||
|
# -- String to override release name
|
||||||
|
fullnameOverride: ""
|
||||||
|
|
||||||
|
rbac:
|
||||||
|
# -- **DEPRECATED** use `createRole` and `createClusterRole`
|
||||||
|
create: false
|
||||||
|
# -- Create and use RBAC `Role` resources
|
||||||
|
createRole: true
|
||||||
|
# -- Create and use RBAC `ClusterRole` resources
|
||||||
|
createClusterRole: true
|
||||||
|
# -- Optional annotations for rbac
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
serviceAccounts:
|
||||||
|
spark:
|
||||||
|
# -- Create a service account for spark apps
|
||||||
|
create: true
|
||||||
|
# -- Optional name for the spark service account
|
||||||
|
name: ""
|
||||||
|
# -- Optional annotations for the spark service account
|
||||||
|
annotations: {}
|
||||||
|
sparkoperator:
|
||||||
|
# -- Create a service account for the operator
|
||||||
|
create: true
|
||||||
|
# -- Optional name for the operator service account
|
||||||
|
name: ""
|
||||||
|
# -- Optional annotations for the operator service account
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
# -- List of namespaces where to run spark jobs
|
||||||
|
sparkJobNamespaces:
|
||||||
|
- ""
|
||||||
|
# - ns1
|
||||||
|
|
||||||
|
# -- Operator concurrency, higher values might increase memory usage
|
||||||
|
controllerThreads: 10
|
||||||
|
|
||||||
|
# -- Operator resync interval. Note that the operator will respond to events (e.g. create, update)
|
||||||
|
# unrelated to this setting
|
||||||
|
resyncInterval: 30
|
||||||
|
|
||||||
|
uiService:
|
||||||
|
# -- Enable UI service creation for Spark application
|
||||||
|
enable: true
|
||||||
|
|
||||||
|
# -- Ingress URL format.
|
||||||
|
# Requires the UI service to be enabled by setting `uiService.enable` to true.
|
||||||
|
ingressUrlFormat: ""
|
||||||
|
|
||||||
|
# -- Set higher levels for more verbose logging
|
||||||
|
logLevel: 2
|
||||||
|
|
||||||
|
# -- Pod environment variable sources
|
||||||
|
envFrom: []
|
||||||
|
|
||||||
|
# podSecurityContext -- Pod security context
|
||||||
|
podSecurityContext: {}
|
||||||
|
|
||||||
|
# securityContext -- Operator container security context
|
||||||
|
securityContext: {}
|
||||||
|
|
||||||
|
# sidecars -- Sidecar containers
|
||||||
|
sidecars: []
|
||||||
|
|
||||||
|
# volumes - Operator volumes
|
||||||
|
volumes: []
|
||||||
|
|
||||||
|
# volumeMounts - Operator volumeMounts
|
||||||
|
volumeMounts: []
|
||||||
|
|
||||||
|
webhook:
|
||||||
|
# -- Enable webhook server
|
||||||
|
enable: false
|
||||||
|
# -- Webhook service port
|
||||||
|
port: 8080
|
||||||
|
# -- Webhook container port name and service target port name
|
||||||
|
portName: webhook
|
||||||
|
# -- The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2.
|
||||||
|
# Empty string (default) will operate on all namespaces
|
||||||
|
namespaceSelector: ""
|
||||||
|
# -- The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2).
|
||||||
|
# Empty string (default) will operate on all objects
|
||||||
|
objectSelector: ""
|
||||||
|
# -- The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade
|
||||||
|
timeout: 30
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
# -- Enable prometheus metric scraping
|
||||||
|
enable: true
|
||||||
|
# -- Metrics port
|
||||||
|
port: 10254
|
||||||
|
# -- Metrics port name
|
||||||
|
portName: metrics
|
||||||
|
# -- Metrics serving endpoint
|
||||||
|
endpoint: /metrics
|
||||||
|
# -- Metric prefix, will be added to all exported metrics
|
||||||
|
prefix: ""
|
||||||
|
|
||||||
|
# -- Prometheus pod monitor for operator's pod.
|
||||||
|
podMonitor:
|
||||||
|
# -- If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well.
|
||||||
|
enable: false
|
||||||
|
# -- Pod monitor labels
|
||||||
|
labels: {}
|
||||||
|
# -- The label to use to retrieve the job name from
|
||||||
|
jobLabel: spark-operator-podmonitor
|
||||||
|
# -- Prometheus metrics endpoint properties. `metrics.portName` will be used as a port
|
||||||
|
podMetricsEndpoint:
|
||||||
|
scheme: http
|
||||||
|
interval: 5s
|
||||||
|
|
||||||
|
# nodeSelector -- Node labels for pod assignment
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
# tolerations -- List of node taints to tolerate
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
# affinity -- Affinity for pod assignment
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
# podAnnotations -- Additional annotations to add to the pod
|
||||||
|
podAnnotations: {}
|
||||||
|
|
||||||
|
# podLabels -- Additional labels to add to the pod
|
||||||
|
podLabels: {}
|
||||||
|
|
||||||
|
# resources -- Pod resource requests and limits
|
||||||
|
# Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m".
|
||||||
|
# Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error:
|
||||||
|
# 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits.
|
||||||
|
resources: {}
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 300Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 300Mi
|
||||||
|
|
||||||
|
batchScheduler:
|
||||||
|
# -- Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application
|
||||||
|
enable: false
|
||||||
|
|
||||||
|
resourceQuotaEnforcement:
|
||||||
|
# -- Whether to enable the ResourceQuota enforcement for SparkApplication resources.
|
||||||
|
# Requires the webhook to be enabled by setting `webhook.enable` to true.
|
||||||
|
# Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement.
|
||||||
|
enable: false
|
||||||
|
|
||||||
|
leaderElection:
|
||||||
|
# -- Leader election lock name.
|
||||||
|
# Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability.
|
||||||
|
lockName: "spark-operator-lock"
|
||||||
|
# -- Optionally store the lock in another namespace. Defaults to operator's namespace
|
||||||
|
lockNamespace: ""
|
||||||
|
|
||||||
|
istio:
|
||||||
|
# -- When using `istio`, spark jobs need to run without a sidecar to properly terminate
|
||||||
|
enabled: false
|
||||||
|
|
||||||
|
# labelSelectorFilter -- A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels.
|
||||||
|
labelSelectorFilter: ""
|
||||||
|
|
||||||
|
# priorityClassName -- A priority class to be used for running spark-operator pod.
|
||||||
|
priorityClassName: ""
|
12
spark/base/kustomization.yaml
Normal file
12
spark/base/kustomization.yaml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
resources:
|
||||||
|
- spark-master-controller.yml
|
||||||
|
- spark-master-service.yml
|
||||||
|
- spark-ui-proxy-controller.yml
|
||||||
|
- spark-ui-proxy-ingress.yml
|
||||||
|
- spark-ui-proxy-service.yml
|
||||||
|
- spark-worker-controller.yml
|
||||||
|
- spark-cert-issuer.yaml
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
|
15
spark/base/spark-cert-issuer.yaml
Normal file
15
spark/base/spark-cert-issuer.yaml
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: Issuer
|
||||||
|
metadata:
|
||||||
|
name: letsencrypt
|
||||||
|
spec:
|
||||||
|
acme:
|
||||||
|
server: https://acme-v02.api.letsencrypt.org/directory
|
||||||
|
email: drive@sunet.se
|
||||||
|
privateKeySecretRef:
|
||||||
|
name: letsencrypt
|
||||||
|
solvers:
|
||||||
|
- http01:
|
||||||
|
ingress:
|
||||||
|
class: nginx
|
||||||
|
|
23
spark/base/spark-master-controller.yml
Normal file
23
spark/base/spark-master-controller.yml
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
kind: ReplicationController
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: spark-master-controller
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
component: spark-master
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
component: spark-master
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: spark-master
|
||||||
|
image: registry.k8s.io/spark:1.5.2_v1
|
||||||
|
command: ["/start-master"]
|
||||||
|
ports:
|
||||||
|
- containerPort: 7077
|
||||||
|
- containerPort: 8080
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
14
spark/base/spark-master-service.yml
Normal file
14
spark/base/spark-master-service.yml
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: spark-master
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- port: 7077
|
||||||
|
targetPort: 7077
|
||||||
|
name: spark
|
||||||
|
- port: 8080
|
||||||
|
targetPort: 8080
|
||||||
|
name: http
|
||||||
|
selector:
|
||||||
|
component: spark-master
|
29
spark/base/spark-ui-proxy-controller.yml
Normal file
29
spark/base/spark-ui-proxy-controller.yml
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
kind: ReplicationController
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: spark-ui-proxy-controller
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
component: spark-ui-proxy
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
component: spark-ui-proxy
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: spark-ui-proxy
|
||||||
|
image: iguaziodocker/spark-ui-proxy:0.1.0
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
args:
|
||||||
|
- spark-master:8080
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: 80
|
||||||
|
initialDelaySeconds: 120
|
||||||
|
timeoutSeconds: 5
|
32
spark/base/spark-ui-proxy-ingress.yml
Normal file
32
spark/base/spark-ui-proxy-ingress.yml
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: spark-ui-proxy-ingress
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/ingress.class: nginx
|
||||||
|
nginx.ingress.kubernetes.io/auth-type: basic
|
||||||
|
nginx.ingress.kubernetes.io/auth-secret: htpasswd
|
||||||
|
nginx.ingress.kubernetes.io/auth-realm: Enter your credentials
|
||||||
|
cert-manager.io/issuer: "letsencrypt"
|
||||||
|
spec:
|
||||||
|
ingressClassName: nginx
|
||||||
|
defaultBackend:
|
||||||
|
service:
|
||||||
|
name: spark-ui-proxy
|
||||||
|
port:
|
||||||
|
number: 80
|
||||||
|
tls:
|
||||||
|
- hosts: [spark-test.streams.sunet.se]
|
||||||
|
secretName: tls-secret
|
||||||
|
rules:
|
||||||
|
- host: spark-test.streams.sunet.se
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: spark-ui-proxy
|
||||||
|
port:
|
||||||
|
number: 80
|
12
spark/base/spark-ui-proxy-service.yml
Normal file
12
spark/base/spark-ui-proxy-service.yml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: spark-ui-proxy
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- port: 80
|
||||||
|
targetPort: 80
|
||||||
|
selector:
|
||||||
|
component: spark-ui-proxy
|
||||||
|
type: LoadBalancer
|
23
spark/base/spark-worker-controller.yml
Normal file
23
spark/base/spark-worker-controller.yml
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
kind: ReplicationController
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: spark-worker-controller
|
||||||
|
spec:
|
||||||
|
replicas: 2
|
||||||
|
selector:
|
||||||
|
component: spark-worker
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
component: spark-worker
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: spark-worker
|
||||||
|
image: registry.k8s.io/spark:1.5.2_v1
|
||||||
|
command: ["/start-worker"]
|
||||||
|
ports:
|
||||||
|
- containerPort: 8081
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
|
15
spark/overlays/test/kustomization.yaml
Normal file
15
spark/overlays/test/kustomization.yaml
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
---
|
||||||
|
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||||
|
kind: Kustomization
|
||||||
|
resources: [../../base/]
|
||||||
|
helmCharts:
|
||||||
|
- includeCRDs: true
|
||||||
|
name: spark-operator
|
||||||
|
releaseName: spark
|
||||||
|
valuesFile: ./values/values.yaml
|
||||||
|
version: 1.4.2
|
||||||
|
namespace: spark
|
||||||
|
helmGlobals:
|
||||||
|
chartHome: ../../base/charts/
|
||||||
|
patches:
|
||||||
|
- path: spark-ui-proxy-ingress.yml
|
32
spark/overlays/test/spark-ui-proxy-ingress.yml
Normal file
32
spark/overlays/test/spark-ui-proxy-ingress.yml
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: spark-ui-proxy-ingress
|
||||||
|
annotations:
|
||||||
|
kubernetes.io/ingress.class: nginx
|
||||||
|
nginx.ingress.kubernetes.io/auth-type: basic
|
||||||
|
nginx.ingress.kubernetes.io/auth-secret: htpasswd
|
||||||
|
nginx.ingress.kubernetes.io/auth-realm: Enter your credentials
|
||||||
|
cert-manager.io/issuer: "letsencrypt"
|
||||||
|
spec:
|
||||||
|
ingressClassName: nginx
|
||||||
|
defaultBackend:
|
||||||
|
service:
|
||||||
|
name: spark-ui-proxy
|
||||||
|
port:
|
||||||
|
number: 80
|
||||||
|
tls:
|
||||||
|
- hosts: [spark-test.streams.sunet.se]
|
||||||
|
secretName: tls-secret
|
||||||
|
rules:
|
||||||
|
- host: spark-test.streams.sunet.se
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: spark-ui-proxy
|
||||||
|
port:
|
||||||
|
number: 80
|
163
spark/overlays/test/values/values.yaml
Normal file
163
spark/overlays/test/values/values.yaml
Normal file
|
@ -0,0 +1,163 @@
|
||||||
|
# -- Common labels to add to the resources
|
||||||
|
commonLabels: {}
|
||||||
|
|
||||||
|
# replicaCount -- Desired number of pods, leaderElection will be enabled
|
||||||
|
# if this is greater than 1
|
||||||
|
replicaCount: 3
|
||||||
|
|
||||||
|
image:
|
||||||
|
# -- Image repository
|
||||||
|
repository: docker.io/kubeflow/spark-operator
|
||||||
|
# -- Image pull policy
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
# -- if set, override the image tag whose default is the chart appVersion.
|
||||||
|
tag: ""
|
||||||
|
|
||||||
|
# -- Image pull secrets
|
||||||
|
imagePullSecrets: []
|
||||||
|
|
||||||
|
rbac:
|
||||||
|
# -- Create and use RBAC `Role` resources
|
||||||
|
createRole: true
|
||||||
|
# -- Create and use RBAC `ClusterRole` resources
|
||||||
|
createClusterRole: true
|
||||||
|
|
||||||
|
serviceAccounts:
|
||||||
|
spark:
|
||||||
|
# -- Create a service account for spark apps
|
||||||
|
create: true
|
||||||
|
# -- Optional name for the spark service account
|
||||||
|
name: ""
|
||||||
|
# -- Optional annotations for the spark service account
|
||||||
|
annotations: {}
|
||||||
|
sparkoperator:
|
||||||
|
# -- Create a service account for the operator
|
||||||
|
create: true
|
||||||
|
# -- Optional name for the operator service account
|
||||||
|
name: ""
|
||||||
|
# -- Optional annotations for the operator service account
|
||||||
|
annotations: {}
|
||||||
|
|
||||||
|
# -- List of namespaces where to run spark jobs
|
||||||
|
sparkJobNamespaces:
|
||||||
|
- "spark"
|
||||||
|
|
||||||
|
# -- Operator concurrency, higher values might increase memory usage
|
||||||
|
controllerThreads: 10
|
||||||
|
|
||||||
|
# -- Operator resync interval. Note that the operator will respond to events (e.g. create, update)
|
||||||
|
# unrelated to this setting
|
||||||
|
resyncInterval: 30
|
||||||
|
|
||||||
|
uiService:
|
||||||
|
# -- Enable UI service creation for Spark application
|
||||||
|
enable: true
|
||||||
|
|
||||||
|
# -- Ingress URL format.
|
||||||
|
# Requires the UI service to be enabled by setting `uiService.enable` to true.
|
||||||
|
ingressUrlFormat: ""
|
||||||
|
|
||||||
|
# -- Set higher levels for more verbose logging
|
||||||
|
logLevel: 2
|
||||||
|
|
||||||
|
# -- Pod environment variable sources
|
||||||
|
envFrom: []
|
||||||
|
|
||||||
|
# podSecurityContext -- Pod security context
|
||||||
|
podSecurityContext: {}
|
||||||
|
|
||||||
|
# securityContext -- Operator container security context
|
||||||
|
securityContext: {}
|
||||||
|
|
||||||
|
# sidecars -- Sidecar containers
|
||||||
|
sidecars: []
|
||||||
|
|
||||||
|
# volumes - Operator volumes
|
||||||
|
volumes: []
|
||||||
|
|
||||||
|
# volumeMounts - Operator volumeMounts
|
||||||
|
volumeMounts: []
|
||||||
|
|
||||||
|
webhook:
|
||||||
|
# -- Enable webhook server
|
||||||
|
enable: false
|
||||||
|
# -- Webhook service port
|
||||||
|
port: 8080
|
||||||
|
# -- Webhook container port name and service target port name
|
||||||
|
portName: webhook
|
||||||
|
# -- The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2.
|
||||||
|
# Empty string (default) will operate on all namespaces
|
||||||
|
namespaceSelector: ""
|
||||||
|
# -- The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2).
|
||||||
|
# Empty string (default) will operate on all objects
|
||||||
|
objectSelector: ""
|
||||||
|
# -- The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade
|
||||||
|
timeout: 30
|
||||||
|
|
||||||
|
metrics:
|
||||||
|
# -- Enable prometheus metric scraping
|
||||||
|
enable: true
|
||||||
|
# -- Metrics port
|
||||||
|
port: 10254
|
||||||
|
# -- Metrics port name
|
||||||
|
portName: metrics
|
||||||
|
# -- Metrics serving endpoint
|
||||||
|
endpoint: /metrics
|
||||||
|
# -- Metric prefix, will be added to all exported metrics
|
||||||
|
prefix: ""
|
||||||
|
|
||||||
|
# -- Prometheus pod monitor for operator's pod.
|
||||||
|
podMonitor:
|
||||||
|
# -- If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well.
|
||||||
|
enable: false
|
||||||
|
# -- Pod monitor labels
|
||||||
|
labels: {}
|
||||||
|
# -- The label to use to retrieve the job name from
|
||||||
|
jobLabel: spark-operator-podmonitor
|
||||||
|
# -- Prometheus metrics endpoint properties. `metrics.portName` will be used as a port
|
||||||
|
podMetricsEndpoint:
|
||||||
|
scheme: http
|
||||||
|
interval: 5s
|
||||||
|
|
||||||
|
# nodeSelector -- Node labels for pod assignment
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
# tolerations -- List of node taints to tolerate
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
# affinity -- Affinity for pod assignment
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
# podAnnotations -- Additional annotations to add to the pod
|
||||||
|
podAnnotations: {}
|
||||||
|
|
||||||
|
# podLabels -- Additional labels to add to the pod
|
||||||
|
podLabels: {}
|
||||||
|
|
||||||
|
# resources -- Pod resource requests and limits
|
||||||
|
# Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m".
|
||||||
|
# Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error:
|
||||||
|
# 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits.
|
||||||
|
resources: {}
|
||||||
|
# limits:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 300Mi
|
||||||
|
# requests:
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 300Mi
|
||||||
|
|
||||||
|
batchScheduler:
|
||||||
|
# -- Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application
|
||||||
|
enable: false
|
||||||
|
|
||||||
|
resourceQuotaEnforcement:
|
||||||
|
# -- Whether to enable the ResourceQuota enforcement for SparkApplication resources.
|
||||||
|
# Requires the webhook to be enabled by setting `webhook.enable` to true.
|
||||||
|
# Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement.
|
||||||
|
enable: false
|
||||||
|
|
||||||
|
leaderElection:
|
||||||
|
# -- Leader election lock name.
|
||||||
|
# Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability.
|
||||||
|
lockName: "spark-operator-lock"
|
||||||
|
|
Loading…
Reference in a new issue