Compare commits

...

7 commits

Author SHA1 Message Date
Micke Nordin a9ec608b3b
Cleanup 2024-10-17 14:58:00 +02:00
Micke Nordin 6895552040
Add trino 2024-10-17 14:57:59 +02:00
Micke Nordin dfa7b7dfb6
Add kafka 2024-10-17 14:57:55 +02:00
Micke Nordin 13189ff516
Add spark 2024-10-17 14:57:54 +02:00
Micke Nordin ed160441df
Formatting 2024-10-17 14:57:50 +02:00
Micke Nordin d7611b64e4
Add cert-manager 2024-10-17 14:57:49 +02:00
Micke Nordin e2f94cf34f
Add argocd and bump-tag 2024-10-17 14:56:50 +02:00
79 changed files with 14980 additions and 106 deletions

View file

@ -0,0 +1,15 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: drive@sunet.se
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View file

@ -3,14 +3,21 @@ kind: Ingress
metadata: metadata:
name: argocd-ingress name: argocd-ingress
namespace: argocd namespace: argocd
annotations:
cert-manager.io/issuer: "letsencrypt"
spec: spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx ingressClassName: nginx
tls: tls:
- hosts: - hosts:
- argocd.streams.sunet.se - argocd-test.streams.sunet.se
secretName: tls-secret secretName: argocd-tls-secret
rules: rules:
- host: argocd.streams.sunet.se - host: argocd-test.streams.sunet.se
http: http:
paths: paths:
- path: / - path: /
@ -19,4 +26,4 @@ spec:
service: service:
name: argocd-server name: argocd-server
port: port:
name: https number: 80

View file

@ -1,2 +1,3 @@
resources: resources:
- argocd-ingress.yaml - argocd-ingress.yaml
- argocd-cert-issuer.yaml

View file

@ -1,27 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- argocd.streams.sunet.se
secretName: tls-secret
rules:
- host: argocd.streams.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -3,6 +3,9 @@ kind: Ingress
metadata: metadata:
name: argocd-ingress name: argocd-ingress
namespace: argocd namespace: argocd
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec: spec:
defaultBackend: defaultBackend:
service: service:
@ -13,7 +16,7 @@ spec:
tls: tls:
- hosts: - hosts:
- argocd-test.streams.sunet.se - argocd-test.streams.sunet.se
secretName: tls-secret secretName: argocd-tls-secret
rules: rules:
- host: argocd-test.streams.sunet.se - host: argocd-test.streams.sunet.se
http: http:

View file

@ -1,17 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/instance: argocd
app.kubernetes.io/name: argocd-cm
app.kubernetes.io/part-of: argocd
name: argocd-cm
namespace: argocd
data:
resource.customizations: |
networking.k8s.io/Ingress:
health.lua: |
hs = {}
hs.status = "Healthy"
return hs
kustomize.buildOptions: --enable-helm

View file

@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: repo-server
app.kubernetes.io/name: argocd-repo-server
app.kubernetes.io/part-of: argocd
name: argocd-repo-server
namespace: argocd
spec:
template:
metadata:
labels:
app.kubernetes.io/name: argocd-repo-server
spec:
containers:
- name: argocd-repo-server
env:
- name: ARGOCD_EXEC_TIMEOUT
value: "300s"

View file

@ -1,22 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/component: server
app.kubernetes.io/name: argocd-server
app.kubernetes.io/part-of: argocd
name: argocd-server
namespace: argocd
spec:
template:
metadata:
labels:
app.kubernetes.io/name: argocd-server
spec:
containers:
- args:
- /usr/local/bin/argocd-server
- --insecure
- --redis
- "argocd-redis-ha-haproxy:6379"
name: argocd-server

View file

@ -1,8 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: argocd-server-deployment.yaml
- path: argocd-repo-server-deployment.yaml
- path: argocd-cm.yaml

View file

@ -0,0 +1,184 @@
# This YAML file contains RBAC API objects,
# which are necessary to run csi controller plugin
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-cinder-controller-sa
namespace: kube-system
---
# external attacher
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-attacher-role
apiGroup: rbac.authorization.k8s.io
---
# external Provisioner
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-provisioner-role
apiGroup: rbac.authorization.k8s.io
---
# external snapshotter
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
# Secret permission is optional.
# Enable it if your driver needs secret.
# For example, `csi.storage.k8s.io/snapshotter-secret-name` is set in VolumeSnapshotClass.
# See https://kubernetes-csi.github.io/docs/secrets-and-credentials.html for more details.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
# External Resizer
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-role
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-controller-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-resizer-role
apiGroup: rbac.authorization.k8s.io

View file

@ -0,0 +1,141 @@
# This YAML file contains CSI Controller Plugin Sidecars
# external-attacher, external-provisioner, external-snapshotter
# external-resize, liveness-probe
kind: Deployment
apiVersion: apps/v1
metadata:
name: csi-cinder-controllerplugin
namespace: kube-system
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
maxSurge: 1
selector:
matchLabels:
app: csi-cinder-controllerplugin
template:
metadata:
labels:
app: csi-cinder-controllerplugin
spec:
serviceAccount: csi-cinder-controller-sa
containers:
- name: csi-attacher
image: registry.k8s.io/sig-storage/csi-attacher:v4.2.0
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
- "--leader-election=true"
- "--default-fstype=ext4"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-provisioner
image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.1
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
- "--default-fstype=ext4"
- "--feature-gates=Topology=true"
- "--extra-create-metadata"
- "--leader-election=true"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-snapshotter
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.2.1
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
- "--extra-create-metadata"
- "--leader-election=true"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: Always
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- name: csi-resizer
image: registry.k8s.io/sig-storage/csi-resizer:v1.8.0
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
- "--handle-volume-inuse-error=false"
- "--leader-election=true"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.9.0
args:
- "--csi-address=$(ADDRESS)"
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
volumeMounts:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- name: cinder-csi-plugin
image: registry.k8s.io/provider-os/cinder-csi-plugin:v1.28.3
args:
- /bin/cinder-csi-plugin
- "--endpoint=$(CSI_ENDPOINT)"
- "--cloud-config=$(CLOUD_CONFIG)"
- "--cluster=$(CLUSTER_NAME)"
- "--v=1"
env:
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- name: CLOUD_CONFIG
value: /etc/config/cloud.conf
- name: CLUSTER_NAME
value: kubernetes
imagePullPolicy: "IfNotPresent"
ports:
- containerPort: 9808
name: healthz
protocol: TCP
# The probe
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 60
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: secret-cinderplugin
mountPath: /etc/config
readOnly: true
# - name: cacert
# mountPath: /etc/cacert
# readOnly: true
volumes:
- name: socket-dir
emptyDir:
- name: secret-cinderplugin
secret:
secretName: cloud-config
# - name: cacert
# hostPath:
# path: /etc/cacert

View file

@ -0,0 +1,30 @@
# This YAML defines all API objects to create RBAC roles for csi node plugin.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-cinder-node-sa
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-nodeplugin-binding
subjects:
- kind: ServiceAccount
name: csi-cinder-node-sa
namespace: kube-system
roleRef:
kind: ClusterRole
name: csi-nodeplugin-role
apiGroup: rbac.authorization.k8s.io

View file

@ -0,0 +1,118 @@
# This YAML file contains driver-registrar & csi driver nodeplugin API objects,
# which are necessary to run csi nodeplugin for cinder.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: csi-cinder-nodeplugin
namespace: kube-system
spec:
selector:
matchLabels:
app: csi-cinder-nodeplugin
template:
metadata:
labels:
app: csi-cinder-nodeplugin
spec:
tolerations:
- operator: Exists
serviceAccount: csi-cinder-node-sa
hostNetwork: true
containers:
- name: node-driver-registrar
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.6.3
args:
- "--csi-address=$(ADDRESS)"
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
env:
- name: ADDRESS
value: /csi/csi.sock
- name: DRIVER_REG_SOCK_PATH
value: /var/snap/microk8s/common/var/lib/kubelet/plugins/cinder.csi.openstack.org/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: liveness-probe
image: registry.k8s.io/sig-storage/livenessprobe:v2.9.0
args:
- --csi-address=/csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: cinder-csi-plugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: registry.k8s.io/provider-os/cinder-csi-plugin:v1.28.3
args:
- /bin/cinder-csi-plugin
- "--endpoint=$(CSI_ENDPOINT)"
- "--cloud-config=$(CLOUD_CONFIG)"
- "--v=1"
env:
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- name: CLOUD_CONFIG
value: /etc/config/cloud.conf
imagePullPolicy: "IfNotPresent"
ports:
- containerPort: 9808
name: healthz
protocol: TCP
# The probe
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 10
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: kubelet-dir
mountPath: /var/snap/microk8s/common/var/lib/kubelet
mountPropagation: "Bidirectional"
- name: pods-probe-dir
mountPath: /dev
mountPropagation: "HostToContainer"
- name: secret-cinderplugin
mountPath: /etc/config
readOnly: true
# - name: cacert
# mountPath: /etc/cacert
# readOnly: true
volumes:
- name: socket-dir
hostPath:
path: /var/snap/microk8s/common/var/lib/kubelet/plugins/cinder.csi.openstack.org
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/snap/microk8s/common/var/lib/kubelet/plugins_registry/
type: Directory
- name: kubelet-dir
hostPath:
path: /var/snap/microk8s/common/var/lib/kubelet
type: Directory
- name: pods-probe-dir
hostPath:
path: /dev
type: Directory
- name: secret-cinderplugin
secret:
secretName: cloud-config
# - name: cacert
# hostPath:
# path: /etc/cacert

View file

@ -0,0 +1,5 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-sc-cinderplugin
provisioner: cinder.csi.openstack.org

View file

@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: cinder.csi.openstack.org
spec:
attachRequired: true
podInfoOnMount: true
volumeLifecycleModes:
- Persistent
- Ephemeral

View file

@ -0,0 +1,6 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kafka
resources:
- https://strimzi.io/install/latest?namespace=kafka

View file

@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base

View file

@ -0,0 +1,38 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
ci/
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
# MacOS
.DS_Store
# helm-unittest
./tests
.debug
__snapshot__
# helm-docs
README.md.gotmpl

View file

@ -0,0 +1,11 @@
apiVersion: v2
appVersion: v1beta2-1.6.1-3.5.0
description: A Helm chart for Spark on Kubernetes operator
home: https://github.com/kubeflow/spark-operator
keywords:
- spark
maintainers:
- email: yuchaoran2011@gmail.com
name: yuchaoran2011
name: spark-operator
version: 1.4.2

View file

@ -0,0 +1,146 @@
# spark-operator
![Version: 1.4.2](https://img.shields.io/badge/Version-1.4.2-informational?style=flat-square) ![AppVersion: v1beta2-1.6.1-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.1--3.5.0-informational?style=flat-square)
A Helm chart for Spark on Kubernetes operator
**Homepage:** <https://github.com/kubeflow/spark-operator>
## Introduction
This chart bootstraps a [Kubernetes Operator for Apache Spark](https://github.com/kubeflow/spark-operator) deployment using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Helm >= 3
- Kubernetes >= 1.16
## Previous Helm Chart
The previous `spark-operator` Helm chart hosted at [helm/charts](https://github.com/helm/charts) has been moved to this repository in accordance with the [Deprecation timeline](https://github.com/helm/charts#deprecation-timeline). Note that a few things have changed between this version and the old version:
- This repository **only** supports Helm chart installations using Helm 3+ since the `apiVersion` on the chart has been marked as `v2`.
- Previous versions of the Helm chart have not been migrated, and the version has been set to `1.0.0` at the onset. If you are looking for old versions of the chart, it's best to run `helm pull incubator/sparkoperator --version <your-version>` until you are ready to move to this repository's version.
- Several configuration properties have been changed, carefully review the [values](#values) section below to make sure you're aligned with the new values.
## Usage
### Add Helm Repo
```shell
helm repo add spark-operator https://kubeflow.github.io/spark-operator
helm repo update
```
See [helm repo](https://helm.sh/docs/helm/helm_repo) for command documentation.
### Install the chart
```shell
helm install [RELEASE_NAME] spark-operator/spark-operator
```
For example, if you want to create a release with name `spark-operator` in the `default` namespace:
```shell
helm install spark-operator spark-operator/spark-operator
```
Note that `helm` will fail to install if the namespace doesn't exist. Either create the namespace beforehand or pass the `--create-namespace` flag to the `helm install` command.
```shell
helm install spark-operator spark-operator/spark-operator \
--namespace spark-operator \
--create-namespace
```
See [helm install](https://helm.sh/docs/helm/helm_install) for command documentation.
### Upgrade the chart
```shell
helm upgrade [RELEASE_NAME] spark-operator/spark-operator [flags]
```
See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade) for command documentation.
### Uninstall the chart
```shell
helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes resources associated with the chart and deletes the release, except for the `crds`, those will have to be removed manually.
See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command documentation.
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | Affinity for pod assignment |
| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application |
| commonLabels | object | `{}` | Common labels to add to the resources |
| controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage |
| envFrom | list | `[]` | Pod environment variable sources |
| fullnameOverride | string | `""` | String to override release name |
| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository |
| image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. |
| imagePullSecrets | list | `[]` | Image pull secrets |
| ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. |
| istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate |
| labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. |
| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. |
| leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace |
| logLevel | int | `2` | Set higher levels for more verbose logging |
| metrics.enable | bool | `true` | Enable prometheus metric scraping |
| metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint |
| metrics.port | int | `10254` | Metrics port |
| metrics.portName | string | `"metrics"` | Metrics port name |
| metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics |
| nameOverride | string | `""` | String to partially override `spark-operator.fullname` template (will maintain the release name) |
| nodeSelector | object | `{}` | Node labels for pod assignment |
| podAnnotations | object | `{}` | Additional annotations to add to the pod |
| podLabels | object | `{}` | Additional labels to add to the pod |
| podMonitor | object | `{"enable":false,"jobLabel":"spark-operator-podmonitor","labels":{},"podMetricsEndpoint":{"interval":"5s","scheme":"http"}}` | Prometheus pod monitor for operator's pod. |
| podMonitor.enable | bool | `false` | If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. |
| podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from |
| podMonitor.labels | object | `{}` | Pod monitor labels |
| podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port |
| podSecurityContext | object | `{}` | Pod security context |
| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. |
| rbac.annotations | object | `{}` | Optional annotations for rbac |
| rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` |
| rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources |
| rbac.createRole | bool | `true` | Create and use RBAC `Role` resources |
| replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 |
| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. |
| resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. |
| resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting |
| securityContext | object | `{}` | Operator container security context |
| serviceAccounts.spark.annotations | object | `{}` | Optional annotations for the spark service account |
| serviceAccounts.spark.create | bool | `true` | Create a service account for spark apps |
| serviceAccounts.spark.name | string | `""` | Optional name for the spark service account |
| serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account |
| serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator |
| serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account |
| sidecars | list | `[]` | Sidecar containers |
| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs |
| tolerations | list | `[]` | List of node taints to tolerate |
| uiService.enable | bool | `true` | Enable UI service creation for Spark application |
| volumeMounts | list | `[]` | |
| volumes | list | `[]` | |
| webhook.enable | bool | `false` | Enable webhook server |
| webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces |
| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects |
| webhook.port | int | `8080` | Webhook service port |
| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name |
| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade |
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| yuchaoran2011 | <yuchaoran2011@gmail.com> | |

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,79 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "spark-operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "spark-operator.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "spark-operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "spark-operator.labels" -}}
helm.sh/chart: {{ include "spark-operator.chart" . }}
{{ include "spark-operator.selectorLabels" . }}
{{- if .Values.commonLabels }}
{{ toYaml .Values.commonLabels }}
{{- end }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "spark-operator.selectorLabels" -}}
app.kubernetes.io/name: {{ include "spark-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to be used by the operator
*/}}
{{- define "spark-operator.serviceAccountName" -}}
{{- if .Values.serviceAccounts.sparkoperator.create -}}
{{ default (include "spark-operator.fullname" .) .Values.serviceAccounts.sparkoperator.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.sparkoperator.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to be used by spark apps
*/}}
{{- define "spark.serviceAccountName" -}}
{{- if .Values.serviceAccounts.spark.create -}}
{{- $sparkServiceaccount := printf "%s-%s" .Release.Name "spark" -}}
{{ default $sparkServiceaccount .Values.serviceAccounts.spark.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.spark.name }}
{{- end -}}
{{- end -}}

View file

@ -0,0 +1,140 @@
# If the admission webhook is enabled, then a post-install step is required
# to generate and install the secret in the operator namespace.
# In the post-install hook, the token corresponding to the operator service account
# is used to authenticate with the Kubernetes API server to install the secret bundle.
{{- $jobNamespaces := .Values.sparkJobNamespaces | default list }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "spark-operator.fullname" . }}
labels:
{{- include "spark-operator.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "spark-operator.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
{{- if or .Values.podAnnotations .Values.metrics.enable }}
annotations:
{{- if .Values.metrics.enable }}
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.metrics.port }}"
prometheus.io/path: {{ .Values.metrics.endpoint }}
{{- end }}
{{- if .Values.podAnnotations }}
{{- toYaml .Values.podAnnotations | trim | nindent 8 }}
{{- end }}
{{- end }}
labels:
{{- include "spark-operator.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | trim | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ include "spark-operator.serviceAccountName" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
image: {{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if gt (int .Values.replicaCount) 1 }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
{{- end }}
envFrom:
{{- toYaml .Values.envFrom | nindent 10 }}
securityContext:
{{- toYaml .Values.securityContext | nindent 10 }}
{{- if or .Values.metrics.enable .Values.webhook.enable }}
ports:
{{ if .Values.metrics.enable -}}
- name: {{ .Values.metrics.portName | quote }}
containerPort: {{ .Values.metrics.port }}
{{- end }}
{{ if .Values.webhook.enable -}}
- name: {{ .Values.webhook.portName | quote }}
containerPort: {{ .Values.webhook.port }}
{{- end }}
{{ end -}}
args:
- -v={{ .Values.logLevel }}
- -logtostderr
{{- if eq (len $jobNamespaces) 1 }}
- -namespace={{ index $jobNamespaces 0 }}
{{- end }}
- -enable-ui-service={{ .Values.uiService.enable}}
- -ingress-url-format={{ .Values.ingressUrlFormat }}
- -controller-threads={{ .Values.controllerThreads }}
- -resync-interval={{ .Values.resyncInterval }}
- -enable-batch-scheduler={{ .Values.batchScheduler.enable }}
- -label-selector-filter={{ .Values.labelSelectorFilter }}
{{- if .Values.metrics.enable }}
- -enable-metrics=true
- -metrics-labels=app_type
- -metrics-port={{ .Values.metrics.port }}
- -metrics-endpoint={{ .Values.metrics.endpoint }}
- -metrics-prefix={{ .Values.metrics.prefix }}
{{- end }}
{{- if .Values.webhook.enable }}
- -enable-webhook=true
- -webhook-secret-name={{ include "spark-operator.webhookSecretName" . }}
- -webhook-secret-namespace={{ .Release.Namespace }}
- -webhook-svc-name={{ include "spark-operator.webhookServiceName" . }}
- -webhook-svc-namespace={{ .Release.Namespace }}
- -webhook-config-name={{ include "spark-operator.fullname" . }}-webhook-config
- -webhook-port={{ .Values.webhook.port }}
- -webhook-timeout={{ .Values.webhook.timeout }}
- -webhook-namespace-selector={{ .Values.webhook.namespaceSelector }}
- -webhook-object-selector={{ .Values.webhook.objectSelector }}
{{- end }}
- -enable-resource-quota-enforcement={{ .Values.resourceQuotaEnforcement.enable }}
{{- if gt (int .Values.replicaCount) 1 }}
- -leader-election=true
- -leader-election-lock-namespace={{ default .Release.Namespace .Values.leaderElection.lockNamespace }}
- -leader-election-lock-name={{ .Values.leaderElection.lockName }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.sidecars }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -0,0 +1,19 @@
{{ if and .Values.metrics.enable .Values.podMonitor.enable }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ include "spark-operator.name" . -}}-podmonitor
labels: {{ toYaml .Values.podMonitor.labels | nindent 4 }}
spec:
podMetricsEndpoints:
- interval: {{ .Values.podMonitor.podMetricsEndpoint.interval }}
port: {{ .Values.metrics.portName | quote }}
scheme: {{ .Values.podMonitor.podMetricsEndpoint.scheme }}
jobLabel: {{ .Values.podMonitor.jobLabel }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{- include "spark-operator.selectorLabels" . | nindent 6 }}
{{ end }}

View file

@ -0,0 +1,148 @@
{{- if or .Values.rbac.create .Values.rbac.createClusterRole -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "spark-operator.fullname" . }}
labels:
{{- include "spark-operator.labels" . | nindent 4 }}
{{- with .Values.rbac.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
rules:
- apiGroups:
- ""
resources:
- pods
- persistentvolumeclaims
verbs:
- "*"
- apiGroups:
- ""
resources:
- services
- configmaps
- secrets
verbs:
- create
- get
- delete
- update
- patch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- create
- get
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- events
verbs:
- create
- update
- patch
- apiGroups:
- ""
resources:
- resourcequotas
verbs:
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
verbs:
- create
- get
- update
- delete
- apiGroups:
- sparkoperator.k8s.io
resources:
- sparkapplications
- sparkapplications/status
- sparkapplications/finalizers
- scheduledsparkapplications
- scheduledsparkapplications/status
- scheduledsparkapplications/finalizers
verbs:
- "*"
{{- if .Values.batchScheduler.enable }}
# required for the `volcano` batch scheduler
- apiGroups:
- scheduling.incubator.k8s.io
- scheduling.sigs.dev
- scheduling.volcano.sh
resources:
- podgroups
verbs:
- "*"
{{- end }}
{{ if .Values.webhook.enable }}
- apiGroups:
- batch
resources:
- jobs
verbs:
- delete
{{- end }}
{{- if gt (int .Values.replicaCount) 1 }}
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- {{ .Values.leaderElection.lockName }}
verbs:
- get
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "spark-operator.fullname" . }}
labels:
{{- include "spark-operator.labels" . | nindent 4 }}
{{- with .Values.rbac.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ include "spark-operator.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "spark-operator.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end }}

View file

@ -0,0 +1,12 @@
{{- if .Values.serviceAccounts.sparkoperator.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "spark-operator.serviceAccountName" . }}
labels:
{{- include "spark-operator.labels" . | nindent 4 }}
{{- with .Values.serviceAccounts.sparkoperator.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,39 @@
{{- if or .Values.rbac.create .Values.rbac.createRole }}
{{- $jobNamespaces := .Values.sparkJobNamespaces | default list }}
{{- range $jobNamespace := $jobNamespaces }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: spark-role
namespace: {{ $jobNamespace }}
labels:
{{- include "spark-operator.labels" $ | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- pods
- services
- configmaps
- persistentvolumeclaims
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: spark
namespace: {{ $jobNamespace }}
labels:
{{- include "spark-operator.labels" $ | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ include "spark.serviceAccountName" $ }}
namespace: {{ $jobNamespace }}
roleRef:
kind: Role
name: spark-role
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- end }}

View file

@ -0,0 +1,14 @@
{{- if .Values.serviceAccounts.spark.create }}
{{- range $sparkJobNamespace := .Values.sparkJobNamespaces | default (list .Release.Namespace) }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "spark.serviceAccountName" $ }}
namespace: {{ $sparkJobNamespace }}
{{- with $.Values.serviceAccounts.spark.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}
labels: {{ include "spark-operator.labels" $ | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,14 @@
{{/*
Create the name of the secret to be used by webhook
*/}}
{{- define "spark-operator.webhookSecretName" -}}
{{ include "spark-operator.fullname" . }}-webhook-certs
{{- end -}}
{{/*
Create the name of the service to be used by webhook
*/}}
{{- define "spark-operator.webhookServiceName" -}}
{{ include "spark-operator.fullname" . }}-webhook-svc
{{- end -}}

View file

@ -0,0 +1,13 @@
{{- if .Values.webhook.enable -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "spark-operator.webhookSecretName" . }}
labels:
{{- include "spark-operator.labels" . | nindent 4 }}
data:
ca-key.pem: ""
ca-cert.pem: ""
server-key.pem: ""
server-cert.pem: ""
{{- end }}

View file

@ -0,0 +1,15 @@
{{- if .Values.webhook.enable -}}
apiVersion: v1
kind: Service
metadata:
name: {{ include "spark-operator.webhookServiceName" . }}
labels:
{{- include "spark-operator.labels" . | nindent 4 }}
spec:
selector:
{{- include "spark-operator.selectorLabels" . | nindent 4 }}
ports:
- port: 443
targetPort: {{ .Values.webhook.portName | quote }}
name: {{ .Values.webhook.portName }}
{{- end }}

View file

@ -0,0 +1,301 @@
suite: Test spark operator deployment
templates:
- deployment.yaml
release:
name: spark-operator
tests:
- it: Should contain namespace arg when sparkJobNamespaces is equal to 1
set:
sparkJobNamespaces:
- ns1
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: -namespace=ns1
- it: Should add pod annotations if podAnnotations is set
set:
podAnnotations:
key1: value1
key2: value2
asserts:
- equal:
path: spec.template.metadata.annotations.key1
value: value1
- equal:
path: spec.template.metadata.annotations.key2
value: value2
- it: Should add prometheus annotations if metrics.enable is true
set:
metrics:
enable: true
port: 10254
endpoint: /metrics
asserts:
- equal:
path: spec.template.metadata.annotations["prometheus.io/scrape"]
value: "true"
- equal:
path: spec.template.metadata.annotations["prometheus.io/port"]
value: "10254"
- equal:
path: spec.template.metadata.annotations["prometheus.io/path"]
value: /metrics
- it: Should add secrets if imagePullSecrets is set
set:
imagePullSecrets:
- name: test-secret1
- name: test-secret2
asserts:
- equal:
path: spec.template.spec.imagePullSecrets[0].name
value: test-secret1
- equal:
path: spec.template.spec.imagePullSecrets[1].name
value: test-secret2
- it: Should add pod securityContext if podSecurityContext is set
set:
podSecurityContext:
runAsUser: 1000
runAsGroup: 2000
fsGroup: 3000
asserts:
- equal:
path: spec.template.spec.securityContext.runAsUser
value: 1000
- equal:
path: spec.template.spec.securityContext.runAsGroup
value: 2000
- equal:
path: spec.template.spec.securityContext.fsGroup
value: 3000
- it: Should use the specified image repository if image.repository and image.tag is set
set:
image:
repository: test-repository
tag: test-tag
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: test-repository:test-tag
- it: Should use the specified image pull policy if image.pullPolicy is set
set:
image:
pullPolicy: Always
asserts:
- equal:
path: spec.template.spec.containers[0].imagePullPolicy
value: Always
- it: Should add container securityContext if securityContext is set
set:
securityContext:
runAsUser: 1000
runAsGroup: 2000
fsGroup: 3000
asserts:
- equal:
path: spec.template.spec.containers[0].securityContext.runAsUser
value: 1000
- equal:
path: spec.template.spec.containers[0].securityContext.runAsGroup
value: 2000
- equal:
path: spec.template.spec.containers[0].securityContext.fsGroup
value: 3000
- it: Should add metric ports if metrics.enable is true
set:
metrics:
enable: true
port: 10254
portName: metrics
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: metrics
containerPort: 10254
count: 1
- it: Should add webhook ports if webhook.enable is true
set:
webhook:
enable: true
port: 8080
portName: webhook
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: webhook
containerPort: 8080
count: 1
- it: Should add resources if resources is set
set:
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
asserts:
- equal:
path: spec.template.spec.containers[0].resources
value:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
- it: Should add sidecars if sidecars is set
set:
sidecars:
- name: sidecar1
image: sidecar-image1
- name: sidecar2
image: sidecar-image2
asserts:
- contains:
path: spec.template.spec.containers
content:
name: sidecar1
image: sidecar-image1
count: 1
- contains:
path: spec.template.spec.containers
content:
name: sidecar2
image: sidecar-image2
count: 1
- it: Should add volumes if volumes is set
set:
volumes:
- name: volume1
emptyDir: {}
- name: volume2
emptyDir: {}
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: volume1
emptyDir: {}
count: 1
- contains:
path: spec.template.spec.volumes
content:
name: volume2
emptyDir: {}
count: 1
- it: Should add volume mounts if volumeMounts is set
set:
volumeMounts:
- name: volume1
mountPath: /volume1
- name: volume2
mountPath: /volume2
asserts:
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
name: volume1
mountPath: /volume1
count: 1
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
name: volume2
mountPath: /volume2
count: 1
- it: Should add nodeSelector if nodeSelector is set
set:
nodeSelector:
key1: value1
key2: value2
asserts:
- equal:
path: spec.template.spec.nodeSelector.key1
value: value1
- equal:
path: spec.template.spec.nodeSelector.key2
value: value2
- it: Should add affinity if affinity is set
set:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- antarctica-east1
- antarctica-west1
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: another-node-label-key
operator: In
values:
- another-node-label-value
asserts:
- equal:
path: spec.template.spec.affinity
value:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- antarctica-east1
- antarctica-west1
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: another-node-label-key
operator: In
values:
- another-node-label-value
- it: Should add tolerations if tolerations is set
set:
tolerations:
- key: key1
operator: Equal
value: value1
effect: NoSchedule
- key: key2
operator: Exists
effect: NoSchedule
asserts:
- equal:
path: spec.template.spec.tolerations
value:
- key: key1
operator: Equal
value: value1
effect: NoSchedule
- key: key2
operator: Exists
effect: NoSchedule

View file

@ -0,0 +1,90 @@
suite: Test spark operator rbac
templates:
- rbac.yaml
release:
name: spark-operator
tests:
- it: Should not render spark operator rbac resources if rbac.create is false and rbac.createClusterRole is false
set:
rbac:
create: false
createClusterRole: false
asserts:
- hasDocuments:
count: 0
- it: Should render spark operator cluster role if rbac.create is true
set:
rbac:
create: true
documentIndex: 0
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
name: spark-operator
- it: Should render spark operator cluster role if rbac.createClusterRole is true
set:
rbac:
createClusterRole: true
documentIndex: 0
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
name: spark-operator
- it: Should render spark operator cluster role binding if rbac.create is true
set:
rbac:
create: true
documentIndex: 1
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
name: spark-operator
- it: Should render spark operator cluster role binding correctly if rbac.createClusterRole is true
set:
rbac:
createClusterRole: true
release:
documentIndex: 1
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
name: spark-operator
- contains:
path: subjects
content:
kind: ServiceAccount
name: spark-operator
namespace: NAMESPACE
count: 1
- equal:
path: roleRef
value:
kind: ClusterRole
name: spark-operator
apiGroup: rbac.authorization.k8s.io
- it: Should add extra annotations to spark operator cluster role if rbac.annotations is set
set:
rbac:
annotations:
key1: value1
key2: value2
documentIndex: 0
asserts:
- equal:
path: metadata.annotations.key1
value: value1
- equal:
path: metadata.annotations.key2
value: value2

View file

@ -0,0 +1,54 @@
suite: Test spark operator service account
templates:
- serviceaccount.yaml
release:
name: spark-operator
tests:
- it: Should not render service account if serviceAccounts.sparkoperator.create is false
set:
serviceAccounts:
sparkoperator:
create: false
asserts:
- hasDocuments:
count: 0
- it: Should render service account if serviceAccounts.sparkoperator.create is true
set:
serviceAccounts:
sparkoperator:
create: true
asserts:
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark-operator
- it: Should use the specified service account name if serviceAccounts.sparkoperator.name is set
set:
serviceAccounts:
sparkoperator:
name: custom-service-account
asserts:
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: custom-service-account
- it: Should add extra annotations if serviceAccounts.sparkoperator.annotations is set
set:
serviceAccounts:
sparkoperator:
annotations:
key1: value1
key2: value2
asserts:
- equal:
path: metadata.annotations.key1
value: value1
- equal:
path: metadata.annotations.key2
value: value2

View file

@ -0,0 +1,133 @@
suite: Test spark rbac
templates:
- spark-rbac.yaml
release:
name: spark-operator
tests:
- it: Should not render spark rbac resources if rbac.create is false and rbac.createRole is false
set:
rbac:
create: false
createRole: false
asserts:
- hasDocuments:
count: 0
- it: Should render spark role if rbac.create is true
set:
rbac:
create: true
documentIndex: 0
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
name: spark-role
- it: Should render spark role if rbac.createRole is true
set:
rbac:
createRole: true
documentIndex: 0
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
name: spark-role
- it: Should render spark role binding if rbac.create is true
set:
rbac:
create: true
documentIndex: 1
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
name: spark
- it: Should render spark role binding if rbac.createRole is true
set:
rbac:
createRole: true
documentIndex: 1
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
name: spark
- it: Should create a single spark role with namespace "" by default
documentIndex: 0
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
name: spark-role
namespace: ""
- it: Should create a single spark role binding with namespace "" by default
values:
- ../values.yaml
documentIndex: 1
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
name: spark
namespace: ""
- it: Should render multiple spark roles if sparkJobNamespaces is set with multiple values
set:
sparkJobNamespaces:
- ns1
- ns2
documentIndex: 0
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
name: spark-role
namespace: ns1
- it: Should render multiple spark role bindings if sparkJobNamespaces is set with multiple values
set:
sparkJobNamespaces:
- ns1
- ns2
documentIndex: 1
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
name: spark
namespace: ns1
- it: Should render multiple spark roles if sparkJobNamespaces is set with multiple values
set:
sparkJobNamespaces:
- ns1
- ns2
documentIndex: 2
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
name: spark-role
namespace: ns2
- it: Should render multiple spark role bindings if sparkJobNamespaces is set with multiple values
set:
sparkJobNamespaces:
- ns1
- ns2
documentIndex: 3
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
name: spark
namespace: ns2

View file

@ -0,0 +1,112 @@
suite: Test spark service account
templates:
- spark-serviceaccount.yaml
release:
name: spark-operator
tests:
- it: Should not render service account if serviceAccounts.spark.create is false
set:
serviceAccounts:
spark:
create: false
asserts:
- hasDocuments:
count: 0
- it: Should render service account if serviceAccounts.spark.create is true
set:
serviceAccounts:
spark:
create: true
asserts:
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark-operator-spark
- it: Should use the specified service account name if serviceAccounts.spark.name is set
set:
serviceAccounts:
spark:
name: spark
asserts:
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
- it: Should add extra annotations if serviceAccounts.spark.annotations is set
set:
serviceAccounts:
spark:
annotations:
key1: value1
key2: value2
asserts:
- equal:
path: metadata.annotations.key1
value: value1
- equal:
path: metadata.annotations.key2
value: value2
- it: Should create multiple service accounts if sparkJobNamespaces is set
set:
serviceAccounts:
spark:
name: spark
sparkJobNamespaces:
- ns1
- ns2
- ns3
documentIndex: 0
asserts:
- hasDocuments:
count: 3
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
namespace: ns1
- it: Should create multiple service accounts if sparkJobNamespaces is set
set:
serviceAccounts:
spark:
name: spark
sparkJobNamespaces:
- ns1
- ns2
- ns3
documentIndex: 1
asserts:
- hasDocuments:
count: 3
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
namespace: ns2
- it: Should create multiple service accounts if sparkJobNamespaces is set
set:
serviceAccounts:
spark:
name: spark
sparkJobNamespaces:
- ns1
- ns2
- ns3
documentIndex: 2
asserts:
- hasDocuments:
count: 3
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
namespace: ns3

View file

@ -0,0 +1,31 @@
suite: Test spark operator webhook secret
templates:
- webhook/secret.yaml
release:
name: spark-operator
namespace: spark-operator
tests:
- it: Should not render the webhook secret if webhook.enable is false
asserts:
- hasDocuments:
count: 0
- it: Should render the webhook secret with empty data fields
set:
webhook:
enable: true
asserts:
- containsDocument:
apiVersion: v1
kind: Secret
name: spark-operator-webhook-certs
- equal:
path: data
value:
ca-key.pem: ""
ca-cert.pem: ""
server-key.pem: ""
server-cert.pem: ""

View file

@ -0,0 +1,33 @@
suite: Test spark operator webhook service
templates:
- webhook/service.yaml
release:
name: spark-operator
tests:
- it: Should not render the webhook service if webhook.enable is false
set:
webhook:
enable: false
asserts:
- hasDocuments:
count: 0
- it: Should render the webhook service correctly if webhook.enable is true
set:
webhook:
enable: true
portName: webhook
asserts:
- containsDocument:
apiVersion: v1
kind: Service
name: spark-operator-webhook-svc
- equal:
path: spec.ports[0]
value:
port: 443
targetPort: webhook
name: webhook

View file

@ -0,0 +1,189 @@
# Default values for spark-operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- Common labels to add to the resources
commonLabels: {}
# replicaCount -- Desired number of pods, leaderElection will be enabled
# if this is greater than 1
replicaCount: 1
image:
# -- Image repository
repository: docker.io/kubeflow/spark-operator
# -- Image pull policy
pullPolicy: IfNotPresent
# -- if set, override the image tag whose default is the chart appVersion.
tag: ""
# -- Image pull secrets
imagePullSecrets: []
# -- String to partially override `spark-operator.fullname` template (will maintain the release name)
nameOverride: ""
# -- String to override release name
fullnameOverride: ""
rbac:
# -- **DEPRECATED** use `createRole` and `createClusterRole`
create: false
# -- Create and use RBAC `Role` resources
createRole: true
# -- Create and use RBAC `ClusterRole` resources
createClusterRole: true
# -- Optional annotations for rbac
annotations: {}
serviceAccounts:
spark:
# -- Create a service account for spark apps
create: true
# -- Optional name for the spark service account
name: ""
# -- Optional annotations for the spark service account
annotations: {}
sparkoperator:
# -- Create a service account for the operator
create: true
# -- Optional name for the operator service account
name: ""
# -- Optional annotations for the operator service account
annotations: {}
# -- List of namespaces where to run spark jobs
sparkJobNamespaces:
- ""
# - ns1
# -- Operator concurrency, higher values might increase memory usage
controllerThreads: 10
# -- Operator resync interval. Note that the operator will respond to events (e.g. create, update)
# unrelated to this setting
resyncInterval: 30
uiService:
# -- Enable UI service creation for Spark application
enable: true
# -- Ingress URL format.
# Requires the UI service to be enabled by setting `uiService.enable` to true.
ingressUrlFormat: ""
# -- Set higher levels for more verbose logging
logLevel: 2
# -- Pod environment variable sources
envFrom: []
# podSecurityContext -- Pod security context
podSecurityContext: {}
# securityContext -- Operator container security context
securityContext: {}
# sidecars -- Sidecar containers
sidecars: []
# volumes - Operator volumes
volumes: []
# volumeMounts - Operator volumeMounts
volumeMounts: []
webhook:
# -- Enable webhook server
enable: false
# -- Webhook service port
port: 8080
# -- Webhook container port name and service target port name
portName: webhook
# -- The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2.
# Empty string (default) will operate on all namespaces
namespaceSelector: ""
# -- The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2).
# Empty string (default) will operate on all objects
objectSelector: ""
# -- The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade
timeout: 30
metrics:
# -- Enable prometheus metric scraping
enable: true
# -- Metrics port
port: 10254
# -- Metrics port name
portName: metrics
# -- Metrics serving endpoint
endpoint: /metrics
# -- Metric prefix, will be added to all exported metrics
prefix: ""
# -- Prometheus pod monitor for operator's pod.
podMonitor:
# -- If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well.
enable: false
# -- Pod monitor labels
labels: {}
# -- The label to use to retrieve the job name from
jobLabel: spark-operator-podmonitor
# -- Prometheus metrics endpoint properties. `metrics.portName` will be used as a port
podMetricsEndpoint:
scheme: http
interval: 5s
# nodeSelector -- Node labels for pod assignment
nodeSelector: {}
# tolerations -- List of node taints to tolerate
tolerations: []
# affinity -- Affinity for pod assignment
affinity: {}
# podAnnotations -- Additional annotations to add to the pod
podAnnotations: {}
# podLabels -- Additional labels to add to the pod
podLabels: {}
# resources -- Pod resource requests and limits
# Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m".
# Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error:
# 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits.
resources: {}
# limits:
# cpu: 100m
# memory: 300Mi
# requests:
# cpu: 100m
# memory: 300Mi
batchScheduler:
# -- Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application
enable: false
resourceQuotaEnforcement:
# -- Whether to enable the ResourceQuota enforcement for SparkApplication resources.
# Requires the webhook to be enabled by setting `webhook.enable` to true.
# Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement.
enable: false
leaderElection:
# -- Leader election lock name.
# Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability.
lockName: "spark-operator-lock"
# -- Optionally store the lock in another namespace. Defaults to operator's namespace
lockNamespace: ""
istio:
# -- When using `istio`, spark jobs need to run without a sidecar to properly terminate
enabled: false
# labelSelectorFilter -- A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels.
labelSelectorFilter: ""
# priorityClassName -- A priority class to be used for running spark-operator pod.
priorityClassName: ""

View file

@ -0,0 +1,10 @@
---
resources:
- spark-master-controller.yml
- spark-master-service.yml
- spark-ui-proxy-controller.yml
- spark-worker-controller.yml
- spark-cert-issuer.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View file

@ -0,0 +1,15 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: drive@sunet.se
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View file

@ -0,0 +1,23 @@
kind: ReplicationController
apiVersion: v1
metadata:
name: spark-master-controller
spec:
replicas: 1
selector:
component: spark-master
template:
metadata:
labels:
component: spark-master
spec:
containers:
- name: spark-master
image: registry.k8s.io/spark:1.5.2_v1
command: ["/start-master"]
ports:
- containerPort: 7077
- containerPort: 8080
resources:
requests:
cpu: 100m

View file

@ -0,0 +1,14 @@
kind: Service
apiVersion: v1
metadata:
name: spark-master
spec:
ports:
- port: 7077
targetPort: 7077
name: spark
- port: 8080
targetPort: 8080
name: http
selector:
component: spark-master

View file

@ -0,0 +1,29 @@
kind: ReplicationController
apiVersion: v1
metadata:
name: spark-ui-proxy-controller
spec:
replicas: 1
selector:
component: spark-ui-proxy
template:
metadata:
labels:
component: spark-ui-proxy
spec:
containers:
- name: spark-ui-proxy
image: iguaziodocker/spark-ui-proxy:0.1.0
ports:
- containerPort: 80
resources:
requests:
cpu: 100m
args:
- spark-master:8080
livenessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 120
timeoutSeconds: 5

View file

@ -0,0 +1,23 @@
kind: ReplicationController
apiVersion: v1
metadata:
name: spark-worker-controller
spec:
replicas: 2
selector:
component: spark-worker
template:
metadata:
labels:
component: spark-worker
spec:
containers:
- name: spark-worker
image: registry.k8s.io/spark:1.5.2_v1
command: ["/start-worker"]
ports:
- containerPort: 8081
resources:
requests:
cpu: 100m

View file

@ -0,0 +1,13 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../base/]
helmCharts:
- includeCRDs: true
name: spark-operator
releaseName: spark
valuesFile: ./values/values.yaml
version: 1.4.2
namespace: spark
helmGlobals:
chartHome: ../../base/charts/

View file

@ -0,0 +1,163 @@
# -- Common labels to add to the resources
commonLabels: {}
# replicaCount -- Desired number of pods, leaderElection will be enabled
# if this is greater than 1
replicaCount: 3
image:
# -- Image repository
repository: docker.io/kubeflow/spark-operator
# -- Image pull policy
pullPolicy: IfNotPresent
# -- if set, override the image tag whose default is the chart appVersion.
tag: ""
# -- Image pull secrets
imagePullSecrets: []
rbac:
# -- Create and use RBAC `Role` resources
createRole: true
# -- Create and use RBAC `ClusterRole` resources
createClusterRole: true
serviceAccounts:
spark:
# -- Create a service account for spark apps
create: true
# -- Optional name for the spark service account
name: ""
# -- Optional annotations for the spark service account
annotations: {}
sparkoperator:
# -- Create a service account for the operator
create: true
# -- Optional name for the operator service account
name: ""
# -- Optional annotations for the operator service account
annotations: {}
# -- List of namespaces where to run spark jobs
sparkJobNamespaces:
- "spark"
# -- Operator concurrency, higher values might increase memory usage
controllerThreads: 10
# -- Operator resync interval. Note that the operator will respond to events (e.g. create, update)
# unrelated to this setting
resyncInterval: 30
uiService:
# -- Enable UI service creation for Spark application
enable: true
# -- Ingress URL format.
# Requires the UI service to be enabled by setting `uiService.enable` to true.
ingressUrlFormat: ""
# -- Set higher levels for more verbose logging
logLevel: 2
# -- Pod environment variable sources
envFrom: []
# podSecurityContext -- Pod security context
podSecurityContext: {}
# securityContext -- Operator container security context
securityContext: {}
# sidecars -- Sidecar containers
sidecars: []
# volumes - Operator volumes
volumes: []
# volumeMounts - Operator volumeMounts
volumeMounts: []
webhook:
# -- Enable webhook server
enable: false
# -- Webhook service port
port: 8080
# -- Webhook container port name and service target port name
portName: webhook
# -- The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2.
# Empty string (default) will operate on all namespaces
namespaceSelector: ""
# -- The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2).
# Empty string (default) will operate on all objects
objectSelector: ""
# -- The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade
timeout: 30
metrics:
# -- Enable prometheus metric scraping
enable: true
# -- Metrics port
port: 10254
# -- Metrics port name
portName: metrics
# -- Metrics serving endpoint
endpoint: /metrics
# -- Metric prefix, will be added to all exported metrics
prefix: ""
# -- Prometheus pod monitor for operator's pod.
podMonitor:
# -- If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well.
enable: false
# -- Pod monitor labels
labels: {}
# -- The label to use to retrieve the job name from
jobLabel: spark-operator-podmonitor
# -- Prometheus metrics endpoint properties. `metrics.portName` will be used as a port
podMetricsEndpoint:
scheme: http
interval: 5s
# nodeSelector -- Node labels for pod assignment
nodeSelector: {}
# tolerations -- List of node taints to tolerate
tolerations: []
# affinity -- Affinity for pod assignment
affinity: {}
# podAnnotations -- Additional annotations to add to the pod
podAnnotations: {}
# podLabels -- Additional labels to add to the pod
podLabels: {}
# resources -- Pod resource requests and limits
# Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m".
# Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error:
# 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits.
resources: {}
# limits:
# cpu: 100m
# memory: 300Mi
# requests:
# cpu: 100m
# memory: 300Mi
batchScheduler:
# -- Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application
enable: false
resourceQuotaEnforcement:
# -- Whether to enable the ResourceQuota enforcement for SparkApplication resources.
# Requires the webhook to be enabled by setting `webhook.enable` to true.
# Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement.
enable: false
leaderElection:
# -- Leader election lock name.
# Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability.
lockName: "spark-operator-lock"

View file

@ -0,0 +1,14 @@
apiVersion: v2
appVersion: "461"
description: Fast distributed SQL query engine for big data analytics that helps you
explore your data universe
home: https://trino.io/
icon: https://trino.io/assets/trino.png
maintainers:
- name: Trino community
name: trino
sources:
- https://github.com/trinodb/charts
- https://github.com/trinodb/trino/tree/master/core/docker
type: application
version: 0.32.0

View file

@ -0,0 +1,862 @@
# trino
![Version: 0.32.0](https://img.shields.io/badge/Version-0.32.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 461](https://img.shields.io/badge/AppVersion-461-informational?style=flat-square)
Fast distributed SQL query engine for big data analytics that helps you explore your data universe
**Homepage:** <https://trino.io/>
## Source Code
* <https://github.com/trinodb/charts>
* <https://github.com/trinodb/trino/tree/master/core/docker>
## Values
* `nameOverride` - string, default: `nil`
Override resource names to avoid name conflicts when deploying multiple releases in the same namespace.
Example:
```yaml
coordinatorNameOverride: trino-coordinator-adhoc
workerNameOverride: trino-worker-adhoc
nameOverride: trino-adhoc
```
* `coordinatorNameOverride` - string, default: `nil`
* `workerNameOverride` - string, default: `nil`
* `image.registry` - string, default: `""`
Image registry, defaults to empty, which results in DockerHub usage
* `image.repository` - string, default: `"trinodb/trino"`
Repository location of the Trino image, typically `organization/imagename`
* `image.tag` - string, default: `""`
Image tag, defaults to the Trino release version specified as `appVersion` from Chart.yaml
* `image.digest` - string, default: `""`
Optional digest value of the image specified as `sha256:abcd...`. A specified value overrides `tag`.
* `image.useRepositoryAsSoleImageReference` - bool, default: `false`
When true, only the content in `repository` is used as image reference
* `image.pullPolicy` - string, default: `"IfNotPresent"`
* `imagePullSecrets` - list, default: `[]`
An optional list of references to secrets in the same namespace to use for pulling images.
Example:
```yaml
imagePullSecrets:
- name: registry-credentials
```
* `server.workers` - int, default: `2`
* `server.node.environment` - string, default: `"production"`
* `server.node.dataDir` - string, default: `"/data/trino"`
* `server.node.pluginDir` - string, default: `"/usr/lib/trino/plugin"`
* `server.log.trino.level` - string, default: `"INFO"`
* `server.config.path` - string, default: `"/etc/trino"`
* `server.config.https.enabled` - bool, default: `false`
* `server.config.https.port` - int, default: `8443`
* `server.config.https.keystore.path` - string, default: `""`
* `server.config.authenticationType` - string, default: `""`
Trino supports multiple [authentication types](https://trino.io/docs/current/security/authentication-types.html): PASSWORD, CERTIFICATE, OAUTH2, JWT, KERBEROS.
* `server.config.query.maxMemory` - string, default: `"4GB"`
* `server.exchangeManager` - object, default: `{}`
Mandatory [exchange manager configuration](https://trino.io/docs/current/admin/fault-tolerant-execution.html#id1). Used to set the name and location(s) of the spooling storage destination. To enable fault-tolerant execution, set the `retry-policy` property in `additionalConfigProperties`. Additional exchange manager configurations can be added to `additionalExchangeManagerProperties`.
Example:
```yaml
server:
exchangeManager:
name: "filesystem"
baseDir: "/tmp/trino-local-file-system-exchange-manager"
additionalConfigProperties:
- retry-policy=TASK
additionalExchangeManagerProperties:
- exchange.sink-buffer-pool-min-size=10
- exchange.sink-buffers-per-partition=2
- exchange.source-concurrent-readers=4
```
* `server.workerExtraConfig` - string, default: `""`
* `server.coordinatorExtraConfig` - string, default: `""`
* `server.autoscaling.enabled` - bool, default: `false`
* `server.autoscaling.maxReplicas` - int, default: `5`
* `server.autoscaling.targetCPUUtilizationPercentage` - int, default: `50`
Target average CPU utilization, represented as a percentage of requested CPU. To disable scaling based on CPU, set to an empty string.
* `server.autoscaling.targetMemoryUtilizationPercentage` - int, default: `80`
Target average memory utilization, represented as a percentage of requested memory. To disable scaling based on memory, set to an empty string.
* `server.autoscaling.behavior` - object, default: `{}`
Configuration for scaling up and down.
Example:
```yaml
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 100
periodSeconds: 15
scaleUp:
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 15
- type: Pods
value: 4
periodSeconds: 15
selectPolicy: Max
```
* `accessControl` - object, default: `{}`
[System access control](https://trino.io/docs/current/security/built-in-system-access-control.html) configuration.
Set the type property to either:
* `configmap`, and provide the rule file contents in `rules`,
* `properties`, and provide configuration properties in `properties`.
Properties example:
```yaml
type: properties
properties: |
access-control.name=custom-access-control
access-control.custom_key=custom_value
```
Config map example:
```yaml
type: configmap
refreshPeriod: 60s
# Rules file is mounted to /etc/trino/access-control
configFile: "rules.json"
rules:
rules.json: |-
{
"catalogs": [
{
"user": "admin",
"catalog": "(mysql|system)",
"allow": "all"
},
{
"group": "finance|human_resources",
"catalog": "postgres",
"allow": true
},
{
"catalog": "hive",
"allow": "all"
},
{
"user": "alice",
"catalog": "postgresql",
"allow": "read-only"
},
{
"catalog": "system",
"allow": "none"
}
],
"schemas": [
{
"user": "admin",
"schema": ".*",
"owner": true
},
{
"user": "guest",
"owner": false
},
{
"catalog": "default",
"schema": "default",
"owner": true
}
]
}
```
* `resourceGroups` - object, default: `{}`
Resource groups file is mounted to /etc/trino/resource-groups/resource-groups.json
Example:
```yaml
resourceGroupsConfig: |-
{
"rootGroups": [
{
"name": "global",
"softMemoryLimit": "80%",
"hardConcurrencyLimit": 100,
"maxQueued": 100,
"schedulingPolicy": "fair",
"jmxExport": true,
"subGroups": [
{
"name": "admin",
"softMemoryLimit": "30%",
"hardConcurrencyLimit": 20,
"maxQueued": 10
},
{
"name": "finance_human_resources",
"softMemoryLimit": "20%",
"hardConcurrencyLimit": 15,
"maxQueued": 10
},
{
"name": "general",
"softMemoryLimit": "30%",
"hardConcurrencyLimit": 20,
"maxQueued": 10
},
{
"name": "readonly",
"softMemoryLimit": "10%",
"hardConcurrencyLimit": 5,
"maxQueued": 5
}
]
}
],
"selectors": [
{
"user": "admin",
"group": "global.admin"
},
{
"group": "finance|human_resources",
"group": "global.finance_human_resources"
},
{
"user": "alice",
"group": "global.readonly"
},
{
"group": "global.general"
}
]
}
```
* `additionalNodeProperties` - list, default: `[]`
[Additional node properties](https://trino.io/docs/current/installation/deployment.html#log-levels).
Example, assuming the NODE_ID environment variable has been set:
```yaml
- node.id=${NODE_ID}
```
* `additionalConfigProperties` - list, default: `[]`
[Additional config properties](https://trino.io/docs/current/admin/properties.html).
Example:
```yaml
- internal-communication.shared-secret=random-value-999
- http-server.process-forwarded=true
```
* `additionalLogProperties` - list, default: `[]`
[Additional log properties](https://trino.io/docs/current/installation/deployment.html#log-levels).
Example:
```yaml
- io.airlift=DEBUG
```
* `additionalExchangeManagerProperties` - list, default: `[]`
[Exchange manager properties](https://trino.io/docs/current/admin/fault-tolerant-execution.html#exchange-manager).
Example:
```yaml
- exchange.s3.region=object-store-region
- exchange.s3.endpoint=your-object-store-endpoint
- exchange.s3.aws-access-key=your-access-key
- exchange.s3.aws-secret-key=your-secret-key
```
* `eventListenerProperties` - list, default: `[]`
[Event listener](https://trino.io/docs/current/develop/event-listener.html#event-listener) properties. To configure multiple event listeners, add them in `coordinator.additionalConfigFiles` and `worker.additionalConfigFiles`, and set the `event-listener.config-files` property in `additionalConfigProperties` to their locations.
Example:
```yaml
- event-listener.name=custom-event-listener
- custom-property1=custom-value1
- custom-property2=custom-value2
```
* `catalogs` - object, default: `{"tpcds":"connector.name=tpcds\ntpcds.splits-per-node=4\n","tpch":"connector.name=tpch\ntpch.splits-per-node=4\n"}`
Configure [catalogs](https://trino.io/docs/current/installation/deployment.html#catalog-properties).
Example:
```yaml
objectstore: |
connector.name=iceberg
iceberg.catalog.type=glue
jmx: |
connector.name=memory
memory: |
connector.name=memory
memory.max-data-per-node=128MB
```
* `additionalCatalogs` - object, default: `{}`
Deprecated, use `catalogs` instead. Configure additional [catalogs](https://trino.io/docs/current/installation/deployment.html#catalog-properties).
* `env` - list, default: `[]`
additional environment variables added to every pod, specified as a list with explicit values
Example:
```yaml
- name: NAME
value: "value"
```
* `envFrom` - list, default: `[]`
additional environment variables added to every pod, specified as a list of either `ConfigMap` or `Secret` references
Example:
```yaml
- secretRef:
name: extra-secret
```
* `initContainers` - object, default: `{}`
Additional [containers that run to completion](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) during pod initialization.
Example:
```yaml
coordinator:
- name: init-coordinator
image: busybox:1.28
imagePullPolicy: IfNotPresent
command: ['sh', '-c', "until nslookup myservice.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"]
worker:
- name: init-worker
image: busybox:1.28
command: ['sh', '-c', 'echo The worker is running! && sleep 3600']
```
* `sidecarContainers` - object, default: `{}`
Additional [containers that starts before](https://kubernetes.io/docs/concepts/workloads/pods/sidecar-containers/) the Trino container and continues to run.
Example:
```yaml
coordinator:
- name: side-coordinator
image: busybox:1.28
imagePullPolicy: IfNotPresent
command: ['sleep', '1']
worker:
- name: side-worker
image: busybox:1.28
imagePullPolicy: IfNotPresent
command: ['sleep', '1']
```
* `securityContext` - object, default: `{"runAsGroup":1000,"runAsUser":1000}`
[Pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) configuration. To remove the default, set it to null (or `~`).
* `containerSecurityContext` - object, default: `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]}}`
[Container security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container) configuration.
* `containerSecurityContext.allowPrivilegeEscalation` - bool, default: `false`
Control whether a process can gain more privileges than its parent process.
* `containerSecurityContext.capabilities.drop` - list, default: `["ALL"]`
A list of the Linux kernel capabilities that are dropped from every container. Valid values are listed in [the capabilities manual page](https://man7.org/linux/man-pages/man7/capabilities.7.html). Ensure # to remove the "CAP_" prefix which the kernel attaches to the names of permissions.
* `shareProcessNamespace.coordinator` - bool, default: `false`
* `shareProcessNamespace.worker` - bool, default: `false`
* `service.annotations` - object, default: `{}`
* `service.type` - string, default: `"ClusterIP"`
* `service.port` - int, default: `8080`
* `service.nodePort` - string, default: `""`
The port the service listens on the host, for the `NodePort` type. If not set, Kubernetes will [allocate a port automatically](https://kubernetes.io/docs/concepts/services-networking/service/#nodeport-custom-port).
* `auth` - object, default: `{}`
Available authentication methods.
Use username and password provided as a [password file](https://trino.io/docs/current/security/password-file.html#file-format):
```yaml
passwordAuth: "username:encrypted-password-with-htpasswd"
```
Set the name of a secret containing this file in the password.db key
```yaml
passwordAuthSecret: "trino-password-authentication"
```
Additionally, set [users' groups](https://trino.io/docs/current/security/group-file.html#file-format):
```yaml
refreshPeriod: 5s
groups: "group_name:user_1,user_2,user_3"
```
* `serviceAccount.create` - bool, default: `false`
Specifies whether a service account should be created
* `serviceAccount.name` - string, default: `""`
The name of the service account to use. If not set and create is true, a name is generated using the fullname template
* `serviceAccount.annotations` - object, default: `{}`
Annotations to add to the service account
* `configMounts` - list, default: `[]`
Allows mounting additional Trino configuration files from Kubernetes config maps on all nodes.
Example:
```yaml
- name: sample-config-mount
configMap: sample-config-map
path: /config-map/sample.json
```
* `secretMounts` - list, default: `[]`
Allows mounting additional Trino configuration files from Kubernetes secrets on all nodes.
Example:
```yaml
- name: sample-secret
secretName: sample-secret
path: /secrets/sample.json
```
* `coordinator.deployment.progressDeadlineSeconds` - int, default: `600`
The maximum time in seconds for a deployment to make progress before it is considered failed. The deployment controller continues to process failed deployments and a condition with a ProgressDeadlineExceeded reason is surfaced in the deployment status.
* `coordinator.deployment.revisionHistoryLimit` - int, default: `10`
The number of old ReplicaSets to retain to allow rollback.
* `coordinator.deployment.strategy` - object, default: `{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"}`
The deployment strategy to use to replace existing pods with new ones.
* `coordinator.jvm.maxHeapSize` - string, default: `"8G"`
* `coordinator.jvm.gcMethod.type` - string, default: `"UseG1GC"`
* `coordinator.jvm.gcMethod.g1.heapRegionSize` - string, default: `"32M"`
* `coordinator.config.memory.heapHeadroomPerNode` - string, default: `""`
* `coordinator.config.query.maxMemoryPerNode` - string, default: `"1GB"`
* `coordinator.additionalJVMConfig` - list, default: `[]`
* `coordinator.additionalExposedPorts` - object, default: `{}`
Additional ports configured in the coordinator container and the service.
Example:
```yaml
https:
servicePort: 8443
name: https
port: 8443
protocol: TCP
```
* `coordinator.resources` - object, default: `{}`
It is recommended not to specify default resources and to leave this as a conscious choice for the user. This also increases chances charts run on environments with little resources, such as Minikube. If you do want to specify resources, use the following example, and adjust it as necessary.
Example:
```yaml
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
```
* `coordinator.livenessProbe` - object, default: `{}`
[Liveness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes) options
Example:
```yaml
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
```
* `coordinator.readinessProbe` - object, default: `{}`
[Readiness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes)
Example:
```yaml
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
```
* `coordinator.lifecycle` - object, default: `{}`
Coordinator container [lifecycle events](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/)
Example:
```yaml
preStop:
exec:
command: ["/bin/sh", "-c", "sleep 120"]
```
* `coordinator.terminationGracePeriodSeconds` - int, default: `30`
* `coordinator.nodeSelector` - object, default: `{}`
* `coordinator.tolerations` - list, default: `[]`
* `coordinator.affinity` - object, default: `{}`
* `coordinator.additionalConfigFiles` - object, default: `{}`
Additional config files placed in the default configuration directory. Supports templating the files' contents with `tpl`.
Example:
```yaml
secret.txt: |
secret-value={{- .Values.someValue }}
```
* `coordinator.additionalVolumes` - list, default: `[]`
One or more additional volumes to add to the coordinator.
Example:
```yaml
- name: extras
emptyDir: {}
```
* `coordinator.additionalVolumeMounts` - list, default: `[]`
One or more additional volume mounts to add to the coordinator.
Example:
- name: extras
mountPath: /usr/share/extras
readOnly: true
* `coordinator.annotations` - object, default: `{}`
* `coordinator.labels` - object, default: `{}`
* `coordinator.configMounts` - list, default: `[]`
Allows mounting additional Trino configuration files from Kubernetes config maps on the coordinator node.
Example:
```yaml
- name: sample-config-mount
configMap: sample-config-mount
path: /config-mount/sample.json
```
* `coordinator.secretMounts` - list, default: `[]`
Allows mounting additional Trino configuration files from Kubernetes secrets on the coordinator node.
Example:
```yaml
- name: sample-secret
secretName: sample-secret
path: /secrets/sample.json
```
* `worker.deployment.progressDeadlineSeconds` - int, default: `600`
The maximum time in seconds for a deployment to make progress before it is considered failed. The deployment controller continues to process failed deployments and a condition with a ProgressDeadlineExceeded reason is surfaced in the deployment status.
* `worker.deployment.revisionHistoryLimit` - int, default: `10`
The number of old ReplicaSets to retain to allow rollback.
* `worker.deployment.strategy` - object, default: `{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"}`
The deployment strategy to use to replace existing pods with new ones.
* `worker.jvm.maxHeapSize` - string, default: `"8G"`
* `worker.jvm.gcMethod.type` - string, default: `"UseG1GC"`
* `worker.jvm.gcMethod.g1.heapRegionSize` - string, default: `"32M"`
* `worker.config.memory.heapHeadroomPerNode` - string, default: `""`
* `worker.config.query.maxMemoryPerNode` - string, default: `"1GB"`
* `worker.additionalJVMConfig` - list, default: `[]`
* `worker.additionalExposedPorts` - object, default: `{}`
Additional container ports configured in all worker pods.
Example:
```yaml
https:
servicePort: 8443
name: https
port: 8443
protocol: TCP
```
* `worker.resources` - object, default: `{}`
It is recommended not to specify default resources and to leave this as a conscious choice for the user. This also increases chances charts run on environments with little resources, such as Minikube. If you do want to specify resources, use the following example, and adjust it as necessary.
Example:
```yaml
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
```
* `worker.livenessProbe` - object, default: `{}`
[Liveness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes)
Example:
```yaml
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
```
* `worker.readinessProbe` - object, default: `{}`
[Readiness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes)
Example:
```yaml
initialDelaySeconds: 20
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
```
* `worker.lifecycle` - object, default: `{}`
Worker container [lifecycle events](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/) Setting `worker.lifecycle` conflicts with `worker.gracefulShutdown`.
Example:
```yaml
preStop:
exec:
command: ["/bin/sh", "-c", "sleep 120"]
```
* `worker.gracefulShutdown` - object, default: `{"enabled":false,"gracePeriodSeconds":120}`
Configure [graceful shutdown](https://trino.io/docs/current/admin/graceful-shutdown.html) in order to ensure that workers terminate without affecting running queries, given a sufficient grace period. When enabled, the value of `worker.terminationGracePeriodSeconds` must be at least two times greater than the configured `gracePeriodSeconds`. Enabling `worker.gracefulShutdown` conflicts with `worker.lifecycle`. When a custom `worker.lifecycle` configuration needs to be used, graceful shutdown must be configured manually.
Example:
```yaml
gracefulShutdown:
enabled: true
gracePeriodSeconds: 120
```
* `worker.terminationGracePeriodSeconds` - int, default: `30`
* `worker.nodeSelector` - object, default: `{}`
* `worker.tolerations` - list, default: `[]`
* `worker.affinity` - object, default: `{}`
* `worker.additionalConfigFiles` - object, default: `{}`
Additional config files placed in the default configuration directory. Supports templating the files' contents with `tpl`.
Example:
```yaml
secret.txt: |
secret-value={{- .Values.someValue }}
```
* `worker.additionalVolumes` - list, default: `[]`
One or more additional volume mounts to add to all workers.
Example:
```yaml
- name: extras
emptyDir: {}
```
* `worker.additionalVolumeMounts` - list, default: `[]`
One or more additional volume mounts to add to all workers.
Example:
```yaml
- name: extras
mountPath: /usr/share/extras
readOnly: true
```
* `worker.annotations` - object, default: `{}`
* `worker.labels` - object, default: `{}`
* `worker.configMounts` - list, default: `[]`
Allows mounting additional Trino configuration files from Kubernetes config maps on all worker nodes.
Example:
```yaml
- name: sample-config-mount
configMap: sample-config-mount
path: /config-mount/sample.json
```
* `worker.secretMounts` - list, default: `[]`
Allows mounting additional Trino configuration files from Kubernetes secrets on all worker nodes.
Example:
```yaml
- name: sample-secret
secretName: sample-secret
path: /secrets/sample.json
```
* `kafka.mountPath` - string, default: `"/etc/trino/schemas"`
* `kafka.tableDescriptions` - object, default: `{}`
Custom kafka table descriptions that will be mounted in mountPath.
Example:
```yaml
testschema.json: |-
{
"tableName": "testtable",
"schemaName": "testschema",
"topicName": "testtopic",
"key": {
"dataFormat": "json",
"fields": [
{
"name": "_key",
"dataFormat": "VARCHAR",
"type": "VARCHAR",
"hidden": "false"
}
]
},
"message": {
"dataFormat": "json",
"fields": [
{
"name": "id",
"mapping": "id",
"type": "BIGINT"
},
{
"name": "test_field",
"mapping": "test_field",
"type": "VARCHAR"
}
]
}
}
```
* `jmx.enabled` - bool, default: `false`
Set to true to enable the RMI server to expose Trino's [JMX metrics](https://trino.io/docs/current/admin/jmx.html).
* `jmx.registryPort` - int, default: `9080`
* `jmx.serverPort` - int, default: `9081`
* `jmx.exporter.enabled` - bool, default: `false`
Set to true to export JMX Metrics via HTTP for [Prometheus](https://github.com/prometheus/jmx_exporter) consumption
* `jmx.exporter.image` - string, default: `"bitnami/jmx-exporter:latest"`
* `jmx.exporter.pullPolicy` - string, default: `"Always"`
* `jmx.exporter.port` - int, default: `5556`
* `jmx.exporter.configProperties` - string, default: `""`
The string value is templated using `tpl`. The JMX config properties file is mounted to `/etc/jmx-exporter/jmx-exporter-config.yaml`.
Example:
```yaml
configProperties: |-
hostPort: localhost:{{- .Values.jmx.registryPort }}
startDelaySeconds: 0
ssl: false
lowercaseOutputName: false
lowercaseOutputLabelNames: false
includeObjectNames: ["java.lang:type=Threading"]
autoExcludeObjectNameAttributes: true
excludeObjectNameAttributes:
"java.lang:type=OperatingSystem":
- "ObjectName"
"java.lang:type=Runtime":
- "ClassPath"
- "SystemProperties"
rules:
- pattern: 'java\.lang<type=Threading><(.*)>ThreadCount: (.*)'
name: java_lang_Threading_ThreadCount
value: '$2'
help: 'ThreadCount (java.lang<type=Threading><>ThreadCount)'
type: UNTYPED
```
* `jmx.exporter.securityContext` - object, default: `{}`
* `jmx.exporter.resources` - object, default: `{}`
It is recommended not to specify default resources and to leave this as a conscious choice for the user. This also increases chances charts run on environments with little resources, such as Minikube. If you do want to specify resources, use the following example, and adjust it as necessary.
Example:
```yaml
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
```
* `jmx.coordinator` - object, default: `{}`
Override JMX configurations for the Trino coordinator.
Example
```yaml
coordinator:
enabled: true
exporter:
enable: true
configProperties: |-
hostPort: localhost:{{- .Values.jmx.registryPort }}
startDelaySeconds: 0
ssl: false
```
* `jmx.worker` - object, default: `{}`
Override JMX configurations for the Trino workers.
Example
```yaml
worker:
enabled: true
exporter:
enable: true
```
* `serviceMonitor.enabled` - bool, default: `false`
Set to true to create resources for the [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator).
* `serviceMonitor.labels` - object, default: `{"prometheus":"kube-prometheus"}`
Labels for serviceMonitor, so that Prometheus can select it
* `serviceMonitor.interval` - string, default: `"30s"`
The serviceMonitor web endpoint interval
* `serviceMonitor.coordinator` - object, default: `{}`
Override ServiceMonitor configurations for the Trino coordinator.
Example
```yaml
coordinator:
enabled: true
labels:
prometheus: my-prometheus
```
* `serviceMonitor.worker` - object, default: `{}`
Override ServiceMonitor configurations for the Trino workers.
Example
```yaml
worker:
enabled: true
labels:
prometheus: my-prometheus
```
* `commonLabels` - object, default: `{}`
Labels that get applied to every resource's metadata
* `ingress.enabled` - bool, default: `false`
* `ingress.className` - string, default: `""`
* `ingress.annotations` - object, default: `{}`
* `ingress.hosts` - list, default: `[]`
[Ingress rules](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules).
Example:
```yaml
- host: trino.example.com
paths:
- path: /
pathType: ImplementationSpecific
```
* `ingress.tls` - list, default: `[]`
Ingress [TLS](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) configuration.
Example:
```yaml
- secretName: chart-example-tls
hosts:
- chart-example.local
```
* `networkPolicy.enabled` - bool, default: `false`
Set to true to enable Trino pod protection with a [NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/). By default, the NetworkPolicy will only allow Trino pods to communicate with each other.
> [!NOTE]
> - NetworkPolicies cannot block the ingress traffic coming directly
> from the Kubernetes node on which the Pod is running,
> and are thus incompatible with services of type `NodePort`.
> - When using NetworkPolicies together with JMX metrics export,
> additional ingress rules might be required to allow metric scraping.
* `networkPolicy.ingress` - list, default: `[]`
Additional ingress rules to apply to the Trino pods.
Example:
```yaml
- from:
- ipBlock:
cidr: 172.17.0.0/16
except:
- 172.17.1.0/24
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: prometheus
- podSelector:
matchLabels:
role: backend-app
ports:
- protocol: TCP
port: 8080
- protocol: TCP
port: 5556
```
* `networkPolicy.egress` - list, default: `[]`
Egress rules to apply to the Trino pods.
Example:
```yaml
- to:
- podSelector:
matchLabels:
role: log-ingestor
ports:
- protocol: TCP
port: 9999
```
----------------------------------------------
Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2)

View file

@ -0,0 +1,25 @@
{{ template "chart.header" . }}
{{ template "chart.deprecationWarning" . }}
{{ template "chart.badgesSection" . }}
{{ template "chart.description" . }}
{{ template "chart.homepageLine" . }}
{{ template "chart.sourcesSection" . }}
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesHeader" . }}
{{- range .Values }}
* `{{ .Key }}` - {{ .Type }}, default: {{ if .Default }}{{ .Default }}{{ else }}{{ .AutoDefault }}{{ end }}{{ if or .Description .AutoDescription }} {{ end }}
{{- if .Description }}
{{ .Description | nindent 2 }}
{{- else if .AutoDescription }}
{{ .AutoDescription | nindent 2 }}
{{- end }}
{{- end }}
{{ template "helm-docs.versionFooter" . }}

View file

@ -0,0 +1,10 @@
Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "trino.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} --selector "app.kubernetes.io/name={{ template "trino.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=coordinator" --output name)
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:8080
{{- end }}

View file

@ -0,0 +1,141 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "trino.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "trino.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if hasPrefix .Release.Name $name }}
{{- $name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "trino.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- define "trino.coordinator" -}}
{{- if .Values.coordinatorNameOverride }}
{{- .Values.coordinatorNameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if hasPrefix .Release.Name $name }}
{{- printf "%s-%s" $name "coordinator" | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s-%s" .Release.Name $name "coordinator" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{- define "trino.worker" -}}
{{- if .Values.workerNameOverride }}
{{- .Values.workerNameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if hasPrefix .Release.Name $name }}
{{- printf "%s-%s" $name "worker" | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s-%s" .Release.Name $name "worker" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{- define "trino.catalog" -}}
{{ template "trino.fullname" . }}-catalog
{{- end -}}
{{/*
Common labels
*/}}
{{- define "trino.labels" -}}
helm.sh/chart: {{ include "trino.chart" . }}
{{ include "trino.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.commonLabels }}
{{ tpl (toYaml .Values.commonLabels) . }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "trino.selectorLabels" -}}
app.kubernetes.io/name: {{ include "trino.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "trino.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "trino.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Return the proper image name
{{ include "trino.image" . }}
Code is inspired from bitnami/common
*/}}
{{- define "trino.image" -}}
{{- $repositoryName := .Values.image.repository -}}
{{- if .Values.image.useRepositoryAsSoleImageReference -}}
{{- printf "%s" $repositoryName -}}
{{- else -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $registryName := .Values.image.registry -}}
{{- $separator := ":" -}}
{{- $termination := (default .Chart.AppVersion .Values.image.tag) | toString -}}
{{- if .Values.image.digest }}
{{- $separator = "@" -}}
{{- $termination = .Values.image.digest | toString -}}
{{- end -}}
{{- if $registryName }}
{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}}
{{- else -}}
{{- printf "%s%s%s" $repositoryName $separator $termination -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the file auth secret to use
*/}}
{{- define "trino.fileAuthSecretName" -}}
{{- if and .Values.auth .Values.auth.passwordAuthSecret }}
{{- .Values.auth.passwordAuthSecret | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if hasPrefix .Release.Name $name }}
{{- printf "%s-%s" $name "file-authentication" | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s-%s" .Release.Name $name "file-authentication" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,37 @@
{{- if .Values.server.autoscaling.enabled -}}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ template "trino.worker" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
spec:
maxReplicas: {{ .Values.server.autoscaling.maxReplicas }}
minReplicas: {{ .Values.server.workers }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ template "trino.worker" . }}
metrics:
{{- if .Values.server.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.server.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- if .Values.server.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.server.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{ if .Values.server.autoscaling.behavior -}}
behavior:
{{- toYaml .Values.server.autoscaling.behavior | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,14 @@
{{- if eq .Values.accessControl.type "configmap" }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "trino.fullname" . }}-access-control-volume-coordinator
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: coordinator
data:
{{- range $key, $val := .Values.accessControl.rules }}
{{ $key }}: {{ $val | quote }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,22 @@
{{- if .Values.worker.gracefulShutdown.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "trino.fullname" . }}-access-control-volume-worker
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: worker
data:
graceful-shutdown-rules.json: >-
{
"system_information": [
{
"allow": [
"write"
],
"user": "admin"
}
]
}
{{- end }}

View file

@ -0,0 +1,16 @@
{{- if or .Values.catalogs .Values.additionalCatalogs }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "trino.catalog" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: catalogs
data:
{{- $merged := merge .Values.catalogs .Values.additionalCatalogs }}
{{- range $catalogName, $catalogProperties := $merged }}
{{ $catalogName }}.properties: |
{{- $catalogProperties | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,180 @@
{{- $coordinatorJmx := merge .Values.jmx.coordinator (omit .Values.jmx "coordinator" "worker") -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "trino.coordinator" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: coordinator
data:
node.properties: |
node.environment={{ .Values.server.node.environment }}
node.data-dir={{ .Values.server.node.dataDir }}
plugin.dir={{ .Values.server.node.pluginDir }}
{{- range $configValue := .Values.additionalNodeProperties }}
{{ $configValue }}
{{- end }}
jvm.config: |
-server
-agentpath:/usr/lib/trino/bin/libjvmkill.so
-Xmx{{ .Values.coordinator.jvm.maxHeapSize }}
-XX:+{{ .Values.coordinator.jvm.gcMethod.type }}
-XX:G1HeapRegionSize={{ .Values.coordinator.jvm.gcMethod.g1.heapRegionSize }}
-XX:+ExplicitGCInvokesConcurrent
-XX:+HeapDumpOnOutOfMemoryError
-XX:+ExitOnOutOfMemoryError
-XX:-OmitStackTraceInFastThrow
-XX:ReservedCodeCacheSize=512M
-XX:PerMethodRecompilationCutoff=10000
-XX:PerBytecodeRecompilationCutoff=10000
-Djdk.attach.allowAttachSelf=true
-Djdk.nio.maxCachedBufferSize=2000000
# Allow loading dynamic agent used by JOL
-XX:+EnableDynamicAgentLoading
{{/* only check the version number if the image is not overriden in any way */}}
{{- with .Values.image -}}
{{- if and (eq .repository "trinodb/trino") (not .useRepositoryAsSoleImageReference) (not .registry) (not .digest) (gt (default $.Chart.AppVersion .tag | int ) 447 ) }}
# https://bugs.openjdk.org/browse/JDK-8329528
-XX:+UnlockDiagnosticVMOptions
-XX:G1NumCollectionsKeepPinned=10000000
{{- end }}{{/* if */}}
{{- end }}{{/* with */}}
{{- range $configValue := .Values.coordinator.additionalJVMConfig }}
{{ $configValue }}
{{- end }}
{{- if $coordinatorJmx.enabled }}
-Dcom.sun.management.jmxremote.rmi.port={{- $coordinatorJmx.serverPort }}
{{- end }}
config.properties: |
coordinator=true
{{- if gt (int .Values.server.workers) 0 }}
node-scheduler.include-coordinator=false
{{- else }}
node-scheduler.include-coordinator=true
{{- end }}
http-server.http.port={{ .Values.service.port }}
query.max-memory={{ .Values.server.config.query.maxMemory }}
query.max-memory-per-node={{ .Values.coordinator.config.query.maxMemoryPerNode }}
{{- if .Values.coordinator.config.memory.heapHeadroomPerNode }}
memory.heap-headroom-per-node={{ .Values.coordinator.config.memory.heapHeadroomPerNode }}
{{- end }}
discovery.uri=http://localhost:{{ .Values.service.port }}
{{- if .Values.server.config.authenticationType }}
http-server.authentication.type={{ .Values.server.config.authenticationType }}
{{- end }}
{{- range $configValue := .Values.additionalConfigProperties }}
{{ $configValue }}
{{- end }}
{{- if .Values.server.config.https.enabled }}
http-server.https.enabled=true
http-server.https.port={{ .Values.server.config.https.port }}
http-server.https.keystore.path={{ .Values.server.config.https.keystore.path }}
{{- end }}
{{- if $coordinatorJmx.enabled }}
jmx.rmiregistry.port={{- $coordinatorJmx.registryPort }}
jmx.rmiserver.port={{- $coordinatorJmx.serverPort }}
{{- end }}
{{- if .Values.worker.gracefulShutdown.enabled }}
shutdown.grace-period={{- .Values.worker.gracefulShutdown.gracePeriodSeconds -}}s
{{- end }}
{{- if .Values.server.coordinatorExtraConfig }}
{{- .Values.server.coordinatorExtraConfig | nindent 4 }}
{{- end }}
{{- if .Values.accessControl }}
{{- if eq .Values.accessControl.type "configmap" }}
access-control.properties: |
access-control.name=file
{{- if .Values.accessControl.refreshPeriod }}
security.refresh-period={{ .Values.accessControl.refreshPeriod }}
{{- end }}
security.config-file={{ .Values.server.config.path }}/access-control/{{ .Values.accessControl.configFile | default "rules.json" }}
{{- else if eq .Values.accessControl.type "properties" }}
access-control.properties: |
{{- if .Values.accessControl.properties }}
{{- .Values.accessControl.properties | nindent 4 }}
{{- else}}
{{- fail "accessControl.properties is required when accessControl.type is 'properties'." }}
{{- end }}
{{- else}}
{{- fail "Invalid accessControl.type value. It must be either 'configmap' or 'properties'." }}
{{- end }}
{{- end }}
{{- if .Values.resourceGroups }}
resource-groups.properties: |
resource-groups.configuration-manager=file
resource-groups.config-file={{ .Values.server.config.path }}/resource-groups/resource-groups.json
{{- end }}
{{- if .Values.server.exchangeManager }}
exchange-manager.properties: |
exchange-manager.name={{ .Values.server.exchangeManager.name }}
exchange.base-directories={{ .Values.server.exchangeManager.baseDir }}
{{- range $configValue := .Values.additionalExchangeManagerProperties }}
{{ $configValue }}
{{- end }}
{{- end }}
log.properties: |
io.trino={{ .Values.server.log.trino.level }}
{{- range $configValue := .Values.additionalLogProperties }}
{{ $configValue }}
{{- end }}
{{- if contains "PASSWORD" .Values.server.config.authenticationType }}{{- if not (index .Values.coordinator.additionalConfigFiles "password-authenticator.properties") }}
password-authenticator.properties: |
password-authenticator.name=file
file.password-file={{ .Values.server.config.path }}/auth/password.db
{{- end }}{{- end }}
{{- if .Values.auth.groups }}{{- if not (index .Values.coordinator.additionalConfigFiles "group-provider.properties") }}
group-provider.properties: |
group-provider.name=file
file.group-file={{ .Values.server.config.path }}/auth/group.db
{{- if .Values.auth.refreshPeriod }}
file.refresh-period={{ .Values.auth.refreshPeriod }}
{{- end }}
{{- end }}{{- end }}
{{ if .Values.eventListenerProperties }}
event-listener.properties: |
{{- range $configValue := .Values.eventListenerProperties }}
{{ $configValue }}
{{- end }}
{{ end }}
{{- range $fileName, $fileContent := .Values.coordinator.additionalConfigFiles }}
{{ $fileName }}: |
{{- tpl $fileContent $ | nindent 4 }}
{{- end }}
{{- if .Values.resourceGroups }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "trino.fullname" . }}-resource-groups-volume-coordinator
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: coordinator
data:
resource-groups.json: |-
{{- .Values.resourceGroups.resourceGroupsConfig | nindent 4 }}
{{- end }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "trino.fullname" . }}-schemas-volume-coordinator
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: coordinator
data:
{{- range $key, $val := .Values.kafka.tableDescriptions }}
{{ $key }}: {{ $val | quote }}
{{- end }}

View file

@ -0,0 +1,29 @@
{{- $coordinatorJmx := merge .Values.jmx.coordinator (omit .Values.jmx "coordinator" "worker") -}}
{{- if $coordinatorJmx.exporter.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "trino.fullname" . }}-jmx-exporter-config-coordinator
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: jmx
data:
jmx-exporter-config.yaml: |-
{{- tpl $coordinatorJmx.exporter.configProperties . | nindent 4 }}
{{- end }}
{{- $workerJmx := merge .Values.jmx.worker (omit .Values.jmx "coordinator" "worker") -}}
{{- if $workerJmx.exporter.enabled }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "trino.fullname" . }}-jmx-exporter-config-worker
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: jmx
data:
jmx-exporter-config.yaml: |-
{{- tpl $workerJmx.exporter.configProperties . | nindent 4 }}
{{- end }}

View file

@ -0,0 +1,120 @@
{{- $workerJmx := merge .Values.jmx.worker (omit .Values.jmx "coordinator" "worker") -}}
{{- if gt (int .Values.server.workers) 0 }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "trino.worker" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: worker
data:
node.properties: |
node.environment={{ .Values.server.node.environment }}
node.data-dir={{ .Values.server.node.dataDir }}
plugin.dir={{ .Values.server.node.pluginDir }}
{{- range $configValue := .Values.additionalNodeProperties }}
{{ $configValue }}
{{- end }}
jvm.config: |
-server
-agentpath:/usr/lib/trino/bin/libjvmkill.so
-Xmx{{ .Values.worker.jvm.maxHeapSize }}
-XX:+{{ .Values.worker.jvm.gcMethod.type }}
-XX:G1HeapRegionSize={{ .Values.worker.jvm.gcMethod.g1.heapRegionSize }}
-XX:+ExplicitGCInvokesConcurrent
-XX:+HeapDumpOnOutOfMemoryError
-XX:+ExitOnOutOfMemoryError
-XX:-OmitStackTraceInFastThrow
-XX:ReservedCodeCacheSize=512M
-XX:PerMethodRecompilationCutoff=10000
-XX:PerBytecodeRecompilationCutoff=10000
-Djdk.attach.allowAttachSelf=true
-Djdk.nio.maxCachedBufferSize=2000000
# Allow loading dynamic agent used by JOL
-XX:+EnableDynamicAgentLoading
{{/* only check the version number if the image is not overriden in any way */}}
{{- with .Values.image -}}
{{- if and (eq .repository "trinodb/trino") (not .useRepositoryAsSoleImageReference) (not .registry) (not .digest) (gt (default $.Chart.AppVersion .tag | int ) 447 ) }}
# https://bugs.openjdk.org/browse/JDK-8329528
-XX:+UnlockDiagnosticVMOptions
-XX:G1NumCollectionsKeepPinned=10000000
{{- end }}{{/* if */}}
{{- end }}{{/* with */}}
{{- range $configValue := .Values.worker.additionalJVMConfig }}
{{ $configValue }}
{{- end }}
{{- if $workerJmx.enabled }}
-Dcom.sun.management.jmxremote.rmi.port={{- $workerJmx.serverPort }}
{{- end }}
config.properties: |
coordinator=false
http-server.http.port={{ .Values.service.port }}
query.max-memory={{ .Values.server.config.query.maxMemory }}
query.max-memory-per-node={{ .Values.worker.config.query.maxMemoryPerNode }}
{{- if .Values.worker.config.memory.heapHeadroomPerNode }}
memory.heap-headroom-per-node={{ .Values.worker.config.memory.heapHeadroomPerNode }}
{{- end }}
discovery.uri=http://{{ template "trino.fullname" . }}:{{ .Values.service.port }}
{{- range $configValue := .Values.additionalConfigProperties }}
{{ $configValue }}
{{- end }}
{{- if $workerJmx.enabled }}
jmx.rmiregistry.port={{- $workerJmx.registryPort }}
jmx.rmiserver.port={{- $workerJmx.serverPort }}
{{- end }}
{{- if .Values.worker.gracefulShutdown.enabled }}
shutdown.grace-period={{- .Values.worker.gracefulShutdown.gracePeriodSeconds -}}s
{{- end }}
{{- if .Values.server.workerExtraConfig }}
{{- .Values.server.workerExtraConfig | nindent 4 }}
{{- end }}
{{- if .Values.worker.gracefulShutdown.enabled }}
access-control.properties: |
access-control.name=file
security.config-file={{ .Values.server.config.path }}/access-control/graceful-shutdown-rules.json
{{- end }}
{{- if .Values.server.exchangeManager }}
exchange-manager.properties: |
exchange-manager.name={{ .Values.server.exchangeManager.name }}
exchange.base-directories={{ .Values.server.exchangeManager.baseDir }}
{{- range $configValue := .Values.additionalExchangeManagerProperties }}
{{ $configValue }}
{{- end }}
{{- end }}
log.properties: |
io.trino={{ .Values.server.log.trino.level }}
{{- range $configValue := .Values.additionalLogProperties }}
{{ $configValue }}
{{- end }}
{{ if .Values.eventListenerProperties }}
event-listener.properties: |
{{- range $configValue := .Values.eventListenerProperties }}
{{ $configValue }}
{{- end }}
{{ end }}
{{- range $fileName, $fileContent := .Values.worker.additionalConfigFiles }}
{{ $fileName }}: |
{{- tpl $fileContent $ | nindent 4 }}
{{- end }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "trino.fullname" . }}-schemas-volume-worker
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: worker
data:
{{- range $key, $val := .Values.kafka.tableDescriptions }}
{{ $key }}: {{ $val | quote }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,247 @@
{{- $coordinatorJmx := merge .Values.jmx.coordinator (omit .Values.jmx "coordinator" "worker") -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "trino.coordinator" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: coordinator
trino.io/network-policy-protection: {{ ternary "enabled" "disabled" .Values.networkPolicy.enabled }}
{{- if .Values.coordinator.labels }}
{{- tpl (toYaml .Values.coordinator.labels) . | nindent 4 }}
{{- end }}
spec:
progressDeadlineSeconds: {{ .Values.coordinator.deployment.progressDeadlineSeconds }}
revisionHistoryLimit: {{ .Values.coordinator.deployment.revisionHistoryLimit }}
strategy:
{{- toYaml .Values.coordinator.deployment.strategy | nindent 4 }}
selector:
matchLabels:
{{- include "trino.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: coordinator
template:
metadata:
annotations:
{{- if and (eq .Values.accessControl.type "configmap") (not .Values.accessControl.refreshPeriod) }}
checksum/access-control-config: {{ include (print $.Template.BasePath "/configmap-access-control-coordinator.yaml") . | sha256sum }}
{{- end }}
{{- if or .Values.catalogs .Values.additionalCatalogs }}
checksum/catalog-config: {{ include (print $.Template.BasePath "/configmap-catalog.yaml") . | sha256sum }}
{{- end }}
checksum/coordinator-config: {{ include (print $.Template.BasePath "/configmap-coordinator.yaml") . | sha256sum }}
{{- if .Values.coordinator.annotations }}
{{- tpl (toYaml .Values.coordinator.annotations) . | nindent 8 }}
{{- end }}
labels:
{{- include "trino.labels" . | nindent 8 }}
app.kubernetes.io/component: coordinator
trino.io/network-policy-protection: {{ ternary "enabled" "disabled" .Values.networkPolicy.enabled }}
{{- if .Values.coordinator.labels }}
{{- tpl (toYaml .Values.coordinator.labels) . | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ include "trino.serviceAccountName" . }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.shareProcessNamespace.coordinator }}
shareProcessNamespace: {{ .Values.shareProcessNamespace.coordinator }}
{{- end }}
volumes:
- name: config-volume
configMap:
name: {{ template "trino.coordinator" . }}
{{- if or .Values.catalogs .Values.additionalCatalogs }}
- name: catalog-volume
configMap:
name: {{ template "trino.catalog" . }}
{{- end }}
- name: schemas-volume
configMap:
name: {{ template "trino.fullname" . }}-schemas-volume-coordinator
{{- if eq .Values.accessControl.type "configmap" }}
- name: access-control-volume
configMap:
name: {{ template "trino.fullname" . }}-access-control-volume-coordinator
{{- end }}
{{- if .Values.resourceGroups }}
- name: resource-groups-volume
configMap:
name: {{ template "trino.fullname" . }}-resource-groups-volume-coordinator
{{- end }}
{{- if or .Values.auth.passwordAuth .Values.auth.passwordAuthSecret .Values.auth.groups }}
- name: file-authentication-volume
secret:
secretName: {{ template "trino.fileAuthSecretName" . }}
items:
{{- if or .Values.auth.passwordAuth .Values.auth.passwordAuthSecret }}
- key: password.db
path: password.db
{{- end }}
{{- if .Values.auth.groups }}
- key: group.db
path: group.db
{{- end }}
{{- end }}
{{- if $coordinatorJmx.exporter.enabled }}
- name: jmx-exporter-config-volume
configMap:
name: {{ template "trino.fullname" . }}-jmx-exporter-config-coordinator
{{- end }}
{{- range .Values.configMounts }}
- name: {{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}
{{- range .Values.coordinator.configMounts }}
- name: {{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}
{{- range .Values.secretMounts }}
- name: {{ .name }}
secret:
secretName: {{ .secretName }}
{{- end }}
{{- range .Values.coordinator.secretMounts }}
- name: {{ .name }}
secret:
secretName: {{ .secretName }}
{{- end }}
{{- with .Values.coordinator.additionalVolumes }}
{{- . | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.initContainers.coordinator }}
initContainers:
{{- tpl (toYaml .Values.initContainers.coordinator) . | nindent 6 }}
{{- end }}
{{- if .Values.imagePullSecrets}}
imagePullSecrets:
{{- toYaml .Values.imagePullSecrets | nindent 8 }}
{{- end }}
terminationGracePeriodSeconds: {{ .Values.coordinator.terminationGracePeriodSeconds }}
containers:
- name: {{ .Chart.Name }}-coordinator
image: {{ include "trino.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
env:
{{- toYaml .Values.env | nindent 12 }}
envFrom:
{{- toYaml .Values.envFrom | nindent 12 }}
volumeMounts:
- mountPath: {{ .Values.server.config.path }}
name: config-volume
{{- if or .Values.catalogs .Values.additionalCatalogs }}
- mountPath: {{ .Values.server.config.path }}/catalog
name: catalog-volume
{{- end }}
- mountPath: {{ .Values.kafka.mountPath }}
name: schemas-volume
{{- if eq .Values.accessControl.type "configmap" }}
- mountPath: {{ .Values.server.config.path }}/access-control
name: access-control-volume
{{- end }}
{{- if .Values.resourceGroups }}
- mountPath: {{ .Values.server.config.path }}/resource-groups
name: resource-groups-volume
{{- end }}
{{- range .Values.configMounts }}
- name: {{ .name }}
mountPath: {{ .path }}
{{- end }}
{{- range .Values.coordinator.configMounts }}
- name: {{ .name }}
mountPath: {{ .path }}
{{- end }}
{{- range .Values.secretMounts }}
- name: {{ .name }}
mountPath: {{ .path }}
{{- end }}
{{- range .Values.coordinator.secretMounts }}
- name: {{ .name }}
mountPath: {{ .path }}
{{- end }}
{{- if or .Values.auth.passwordAuth .Values.auth.passwordAuthSecret .Values.auth.groups }}
- mountPath: {{ .Values.server.config.path }}/auth
name: file-authentication-volume
{{- end }}
{{- with .Values.coordinator.additionalVolumeMounts }}
{{- . | toYaml | nindent 12 }}
{{- end }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
{{- if $coordinatorJmx.enabled }}
- name: jmx-registry
containerPort: {{ $coordinatorJmx.registryPort }}
protocol: TCP
- name: jmx-server
containerPort: {{ $coordinatorJmx.serverPort }}
protocol: TCP
{{- end }}
{{- range $key, $value := .Values.coordinator.additionalExposedPorts }}
- name: {{ $value.name }}
containerPort: {{ $value.port }}
protocol: {{ $value.protocol }}
{{- end }}
livenessProbe:
httpGet:
path: /v1/info
port: http
initialDelaySeconds: {{ .Values.coordinator.livenessProbe.initialDelaySeconds | default 30 }}
periodSeconds: {{ .Values.coordinator.livenessProbe.periodSeconds | default 10 }}
timeoutSeconds: {{ .Values.coordinator.livenessProbe.timeoutSeconds | default 5 }}
failureThreshold: {{ .Values.coordinator.livenessProbe.failureThreshold | default 6 }}
successThreshold: {{ .Values.coordinator.livenessProbe.successThreshold | default 1 }}
readinessProbe:
exec:
command: [/usr/lib/trino/bin/health-check]
initialDelaySeconds: {{ .Values.coordinator.readinessProbe.initialDelaySeconds | default 10 }}
periodSeconds: {{ .Values.coordinator.readinessProbe.periodSeconds | default 10 }}
timeoutSeconds: {{ .Values.coordinator.readinessProbe.timeoutSeconds | default 5 }}
failureThreshold: {{ .Values.coordinator.readinessProbe.failureThreshold | default 6 }}
successThreshold: {{ .Values.coordinator.readinessProbe.successThreshold | default 1 }}
lifecycle:
{{- toYaml .Values.coordinator.lifecycle | nindent 12 }}
resources:
{{- toYaml .Values.coordinator.resources | nindent 12 }}
{{- if $coordinatorJmx.exporter.enabled }}
- name: jmx-exporter
image: {{ $coordinatorJmx.exporter.image }}
imagePullPolicy: {{ $coordinatorJmx.exporter.pullPolicy }}
securityContext:
{{- toYaml $coordinatorJmx.exporter.securityContext | nindent 12 }}
args:
- "{{ $coordinatorJmx.exporter.port }}"
- /etc/jmx-exporter/jmx-exporter-config.yaml
volumeMounts:
- mountPath: /etc/jmx-exporter/
name: jmx-exporter-config-volume
resources:
{{- toYaml $coordinatorJmx.exporter.resources | nindent 12 }}
ports:
- name: jmx-exporter
containerPort: {{ $coordinatorJmx.exporter.port }}
protocol: TCP
{{- end }}
{{- if .Values.sidecarContainers.coordinator }}
{{- toYaml .Values.sidecarContainers.coordinator | nindent 8 }}
{{- end }}
{{- with .Values.coordinator.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.coordinator.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.coordinator.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -0,0 +1,245 @@
{{- $workerJmx := merge .Values.jmx.worker (omit .Values.jmx "coordinator" "worker") -}}
{{- if gt (int .Values.server.workers) 0 }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "trino.worker" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: worker
trino.io/network-policy-protection: {{ ternary "enabled" "disabled" .Values.networkPolicy.enabled }}
{{- if .Values.worker.labels }}
{{- tpl (toYaml .Values.worker.labels) . | nindent 4 }}
{{- end }}
spec:
progressDeadlineSeconds: {{ .Values.worker.deployment.progressDeadlineSeconds }}
revisionHistoryLimit: {{ .Values.worker.deployment.revisionHistoryLimit }}
strategy:
{{- toYaml .Values.worker.deployment.strategy | nindent 4 }}
{{- if not .Values.server.autoscaling.enabled }}
replicas: {{ .Values.server.workers }}
{{- end }}
selector:
matchLabels:
{{- include "trino.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: worker
template:
metadata:
annotations:
{{- if or .Values.catalogs .Values.additionalCatalogs }}
checksum/catalog-config: {{ include (print $.Template.BasePath "/configmap-catalog.yaml") . | sha256sum }}
{{- end }}
checksum/worker-config: {{ include (print $.Template.BasePath "/configmap-worker.yaml") . | sha256sum }}
{{- if .Values.worker.gracefulShutdown.enabled }}
checksum/access-control-config: {{ include (print $.Template.BasePath "/configmap-access-control-worker.yaml") . | sha256sum }}
{{- end }}
{{- if .Values.worker.annotations }}
{{- tpl (toYaml .Values.worker.annotations) . | nindent 8 }}
{{- end }}
labels:
{{- include "trino.labels" . | nindent 8 }}
app.kubernetes.io/component: worker
trino.io/network-policy-protection: {{ ternary "enabled" "disabled" .Values.networkPolicy.enabled }}
{{- if .Values.worker.labels }}
{{- tpl (toYaml .Values.worker.labels) . | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ include "trino.serviceAccountName" . }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.shareProcessNamespace.worker }}
shareProcessNamespace: {{ .Values.shareProcessNamespace.worker }}
{{- end }}
volumes:
- name: config-volume
configMap:
name: {{ template "trino.worker" . }}
{{- if or .Values.catalogs .Values.additionalCatalogs }}
- name: catalog-volume
configMap:
name: {{ template "trino.catalog" . }}
{{- end }}
- name: schemas-volume
configMap:
name: {{ template "trino.fullname" . }}-schemas-volume-worker
{{- if $workerJmx.exporter.enabled }}
- name: jmx-exporter-config-volume
configMap:
name: {{ template "trino.fullname" . }}-jmx-exporter-config-worker
{{- end }}
{{- if .Values.worker.gracefulShutdown.enabled }}
- name: access-control-volume
configMap:
name: {{ template "trino.fullname" . }}-access-control-volume-worker
{{- end }}
{{- range .Values.configMounts }}
- name: {{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}
{{- range .Values.worker.configMounts }}
- name: {{ .name }}
configMap:
name: {{ .configMap }}
{{- end }}
{{- range .Values.secretMounts }}
- name: {{ .name }}
secret:
secretName: {{ .secretName }}
{{- end }}
{{- range .Values.worker.secretMounts }}
- name: {{ .name }}
secret:
secretName: {{ .secretName }}
{{- end }}
{{- with .Values.worker.additionalVolumes }}
{{- . | toYaml | nindent 8 }}
{{- end }}
{{- if .Values.initContainers.worker }}
initContainers:
{{- tpl (toYaml .Values.initContainers.worker) . | nindent 6 }}
{{- end }}
{{- if .Values.imagePullSecrets}}
imagePullSecrets:
{{- toYaml .Values.imagePullSecrets | nindent 8 }}
{{- end }}
{{- if and .Values.worker.gracefulShutdown.enabled (gt (mulf 2.0 .Values.worker.gracefulShutdown.gracePeriodSeconds) .Values.worker.terminationGracePeriodSeconds) }}
{{- fail "The user must set the `worker.terminationGracePeriodSeconds` to a value of at least two times the configured `gracePeriodSeconds`." }}
{{- else }}
terminationGracePeriodSeconds: {{ .Values.worker.terminationGracePeriodSeconds }}
{{- end }}
containers:
- name: {{ .Chart.Name }}-worker
image: {{ include "trino.image" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
securityContext:
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
env:
{{- toYaml .Values.env | nindent 12 }}
envFrom:
{{- toYaml .Values.envFrom | nindent 12 }}
volumeMounts:
- mountPath: {{ .Values.server.config.path }}
name: config-volume
{{- if or .Values.catalogs .Values.additionalCatalogs }}
- mountPath: {{ .Values.server.config.path }}/catalog
name: catalog-volume
{{- end }}
- mountPath: {{ .Values.kafka.mountPath }}
name: schemas-volume
{{- if .Values.worker.gracefulShutdown.enabled }}
- mountPath: {{ .Values.server.config.path }}/access-control
name: access-control-volume
{{- end }}
{{- range .Values.configMounts }}
- name: {{ .name }}
mountPath: {{ .path }}
{{- end }}
{{- range .Values.worker.configMounts }}
- name: {{ .name }}
mountPath: {{ .path }}
{{- end }}
{{- range .Values.secretMounts }}
- name: {{ .name }}
mountPath: {{ .path }}
{{- end }}
{{- range .Values.worker.secretMounts }}
- name: {{ .name }}
mountPath: {{ .path }}
{{- end }}
{{- with .Values.worker.additionalVolumeMounts }}
{{- . | toYaml | nindent 12 }}
{{- end }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
{{- if $workerJmx.enabled }}
- name: jmx-registry
containerPort: {{ $workerJmx.registryPort }}
protocol: TCP
- name: jmx-server
containerPort: {{ $workerJmx.serverPort }}
protocol: TCP
{{- end }}
{{- range $key, $value := .Values.worker.additionalExposedPorts }}
- name: {{ $value.name }}
containerPort: {{ $value.port }}
protocol: {{ $value.protocol }}
{{- end }}
livenessProbe:
httpGet:
path: /v1/info
port: http
initialDelaySeconds: {{ .Values.worker.livenessProbe.initialDelaySeconds | default 30 }}
periodSeconds: {{ .Values.worker.livenessProbe.periodSeconds | default 10 }}
timeoutSeconds: {{ .Values.worker.livenessProbe.timeoutSeconds | default 5 }}
failureThreshold: {{ .Values.worker.livenessProbe.failureThreshold | default 6 }}
successThreshold: {{ .Values.worker.livenessProbe.successThreshold | default 1 }}
readinessProbe:
exec:
command: [/usr/lib/trino/bin/health-check]
initialDelaySeconds: {{ .Values.worker.readinessProbe.initialDelaySeconds | default 10 }}
periodSeconds: {{ .Values.worker.readinessProbe.periodSeconds | default 10 }}
timeoutSeconds: {{ .Values.worker.readinessProbe.timeoutSeconds | default 5 }}
failureThreshold: {{ .Values.worker.readinessProbe.failureThreshold | default 6 }}
successThreshold: {{ .Values.worker.readinessProbe.successThreshold | default 1 }}
lifecycle:
{{- if .Values.worker.lifecycle }}
{{- if .Values.worker.gracefulShutdown.enabled }}
{{- fail "The `worker.lifecycle` configuration conflicts with `worker.gracefulShutdown`. Either disable `worker.gracefulShutdown` and apply the related configurations manually, or remove `worker.lifecycle`." }}
{{- end }}
{{- toYaml .Values.worker.lifecycle | nindent 12 }}
{{- else if .Values.worker.gracefulShutdown.enabled }}
preStop:
exec:
command:
- /bin/sh
- -c
- >-
curl -v -X PUT
-d '"SHUTTING_DOWN"'
-H 'Content-type: application/json'
-H 'X-Trino-User: admin'
http://localhost:{{- .Values.service.port -}}/v1/info/state
{{- end }}
resources:
{{- toYaml .Values.worker.resources | nindent 12 }}
{{- if $workerJmx.exporter.enabled }}
- name: jmx-exporter
image: {{ $workerJmx.exporter.image }}
imagePullPolicy: {{ $workerJmx.exporter.pullPolicy }}
securityContext:
{{- toYaml $workerJmx.exporter.securityContext | nindent 12 }}
args:
- "{{ $workerJmx.exporter.port }}"
- /etc/jmx-exporter/jmx-exporter-config.yaml
volumeMounts:
- mountPath: /etc/jmx-exporter/
name: jmx-exporter-config-volume
resources:
{{- toYaml $workerJmx.exporter.resources | nindent 12 }}
ports:
- name: jmx-exporter
containerPort: {{ $workerJmx.exporter.port }}
protocol: TCP
{{- end }}
{{- if .Values.sidecarContainers.worker }}
{{- toYaml .Values.sidecarContainers.worker | nindent 8 }}
{{- end }}
{{- with .Values.worker.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.worker.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,38 @@
{{- if .Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ template "trino.coordinator" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
annotations:
{{- toYaml .Values.ingress.annotations | nindent 4 }}
spec:
ingressClassName: {{ .Values.ingress.className }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "trino.fullname" $ }}
port:
number: {{ $.Values.service.port }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,37 @@
{{- if .Values.networkPolicy.enabled }}
{{- if eq "NodePort" .Values.service.type}}
{{- fail "NetworkPolicy enforcement is not supported with NodePort services, as traffic reaches the Pod through the node itself, bypassing pod-level network controls." }}
{{- end }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ template "trino.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: network-policy
spec:
podSelector:
matchLabels:
{{- include "trino.selectorLabels" . | nindent 6 }}
trino.io/network-policy-protection: enabled
policyTypes:
- Ingress
{{- with .Values.networkPolicy.egress }}
- Egress
egress:
{{- toYaml . | nindent 4 }}
{{- end }}
ingress:
- from:
- podSelector:
matchLabels:
{{- include "trino.selectorLabels" . | nindent 14 }}
trino.io/network-policy-protection: enabled
namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: {{ .Release.Namespace }}
{{- with .Values.networkPolicy.ingress }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,16 @@
{{- if or .Values.auth.passwordAuth .Values.auth.groups }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "trino.fileAuthSecretName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
data:
{{- if .Values.auth.passwordAuth }}
password.db: {{ .Values.auth.passwordAuth | b64enc }}
{{- end }}
{{- if .Values.auth.groups}}
group.db: {{ .Values.auth.groups | b64enc }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,36 @@
{{- $coordinatorJmx := merge .Values.jmx.coordinator (omit .Values.jmx "coordinator" "worker") -}}
apiVersion: v1
kind: Service
metadata:
name: {{ template "trino.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: coordinator
annotations:
{{- toYaml .Values.service.annotations | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if .Values.service.nodePort }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
{{- if $coordinatorJmx.exporter.enabled }}
- port: {{ $coordinatorJmx.exporter.port }}
targetPort: jmx-exporter
protocol: TCP
name: jmx-exporter
{{- end }}
{{- range $key, $value := .Values.coordinator.additionalExposedPorts }}
- port: {{ $value.servicePort }}
name: {{ $value.name }}
targetPort: {{ $value.port }}
protocol: {{ $value.protocol }}
{{- end }}
selector:
{{- include "trino.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: coordinator

View file

@ -0,0 +1,27 @@
{{- $workerJmx := merge .Values.jmx.worker (omit .Values.jmx "coordinator" "worker") -}}
apiVersion: v1
kind: Service
metadata:
name: {{ template "trino.fullname" . }}-worker
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: worker
annotations:
{{- toYaml .Values.service.annotations | nindent 4 }}
spec:
clusterIP: None
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if $workerJmx.exporter.enabled }}
- port: {{$workerJmx.exporter.port }}
targetPort: jmx-exporter
protocol: TCP
name: jmx-exporter
{{- end }}
selector:
{{- include "trino.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: worker

View file

@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "trino.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,24 @@
{{- $coordinatorServiceMonitor := merge .Values.serviceMonitor.coordinator (omit .Values.serviceMonitor "coordinator" "worker") -}}
{{- if $coordinatorServiceMonitor.enabled -}}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "trino.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
{{- if $coordinatorServiceMonitor.labels }}
{{- toYaml $coordinatorServiceMonitor.labels | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "trino.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: coordinator
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
endpoints:
- port: jmx-exporter
interval: {{ $coordinatorServiceMonitor.interval }}
{{- end }}

View file

@ -0,0 +1,24 @@
{{- $workerServiceMonitor := merge .Values.serviceMonitor.worker (omit .Values.serviceMonitor "coordinator" "worker") -}}
{{- if $workerServiceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "trino.fullname" . }}-worker
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
{{- if $workerServiceMonitor.labels }}
{{- toYaml $workerServiceMonitor.labels | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "trino.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: worker
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
endpoints:
- port: jmx-exporter
interval: {{ $workerServiceMonitor.interval }}
{{- end }}

View file

@ -0,0 +1,46 @@
apiVersion: v1
kind: Pod
metadata:
name: {{ include "trino.fullname" . }}-test-connection
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: test
test: connection
annotations:
"helm.sh/hook": test
spec:
containers:
- name: cli
image: {{ include "trino.image" . }}
command: ['trino']
args:
{{- if .Values.server.config.https.enabled }}
# port must match coordinator.additionalExposedPorts
- trino://{{ include "trino.fullname" . }}.{{ .Release.Namespace }}:8443?SSL=true&SSLVerification=FULL&SSLTrustStorePath=/etc/trino/certificates/tls.crt
{{- else }}
- trino://{{ include "trino.fullname" . }}:{{ .Values.service.port }}
{{- end }}
- --user=admin
{{- if eq .Values.server.config.authenticationType "PASSWORD" }}
- --password
{{- end }}
- --debug
- --execute=SELECT 1
- --no-progress
{{- if eq .Values.server.config.authenticationType "PASSWORD" }}
env:
- name: TRINO_PASSWORD
# must match test-values.yaml
value: admin123
{{- end }}
{{- if .Values.server.config.https.enabled }}
volumeMounts:
- name: certificates
readOnly: true
mountPath: "/etc/trino/certificates"
volumes:
- name: certificates
secret:
secretName: certificates
{{- end }}
restartPolicy: Never

View file

@ -0,0 +1,120 @@
{{- if .Values.worker.gracefulShutdown.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ include "trino.fullname" . }}-pod-manager
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: test
test: graceful-shutdown
annotations:
"helm.sh/hook": test
"helm.sh/hook-weight": "0"
"helm.sh/hook-delete-policy": hook-succeeded
rules:
- apiGroups: [ "" ]
resources: [ "pods" ]
verbs: [ "get", "list", "delete" ]
- apiGroups: [ "" ]
resources: [ "pods/log" ]
verbs: [ "get" ]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "trino.fullname" . }}-pod-manager-sa
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: test
test: graceful-shutdown
annotations:
"helm.sh/hook": test
"helm.sh/hook-weight": "0"
"helm.sh/hook-delete-policy": hook-succeeded
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "trino.fullname" . }}-pod-manager-binding
namespace: {{ .Release.Namespace }}
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: test
test: graceful-shutdown
annotations:
"helm.sh/hook": test
"helm.sh/hook-weight": "1"
"helm.sh/hook-delete-policy": hook-succeeded
subjects:
- kind: ServiceAccount
name: {{ include "trino.fullname" . }}-pod-manager-sa
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: {{ include "trino.fullname" . }}-pod-manager
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Pod
metadata:
name: {{ include "trino.fullname" . }}-test-graceful-shutdown
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: test
test: graceful-shutdown
annotations:
"helm.sh/hook": test
"helm.sh/hook-weight": "2"
"helm.sh/hook-delete-policy": hook-succeeded
spec:
serviceAccountName: {{ include "trino.fullname" . }}-pod-manager-sa
initContainers:
- name: get-worker-pod
image: bitnami/kubectl:latest
command: [ "sh", "-c" ]
args:
- >-
kubectl get pods
--selector="app.kubernetes.io/name={{ include "trino.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=worker"
--output=jsonpath="{.items[0].metadata.name}"
--namespace={{ .Release.Namespace }}
> /pods/worker-pod.txt
volumeMounts:
- mountPath: /pods
name: worker-pod
containers:
- name: check-logs
image: bitnami/kubectl:latest
command: [ "sh", "-c" ]
args:
- >-
WORKER_POD=$(cat /pods/worker-pod.txt) &&
kubectl logs ${WORKER_POD}
--follow
--container=trino-worker
--namespace={{ .Release.Namespace }}
| grep --max-count=1 "Shutdown requested"
volumeMounts:
- mountPath: /pods
name: worker-pod
- name: trigger-graceful-shutdown
image: bitnami/kubectl:latest
command: [ "sh", "-c" ]
args:
- >-
sleep 5 &&
WORKER_POD=$(cat /pods/worker-pod.txt) &&
kubectl delete pod
${WORKER_POD}
--namespace={{ .Release.Namespace }}
volumeMounts:
- mountPath: /pods
name: worker-pod
restartPolicy: Never
volumes:
- name: worker-pod
emptyDir: {}
{{- end }}

View file

@ -0,0 +1,99 @@
{{- $coordinatorJmx := merge .Values.jmx.coordinator (omit .Values.jmx "coordinator" "worker") -}}
{{- $workerJmx := merge .Values.jmx.worker (omit .Values.jmx "coordinator" "worker") -}}
{{- $coordinatorServiceMonitor := merge .Values.serviceMonitor.coordinator (omit .Values.serviceMonitor "coordinator" "worker") -}}
{{- $workerServiceMonitor := merge .Values.serviceMonitor.worker (omit .Values.serviceMonitor "coordinator" "worker") -}}
{{- if or $coordinatorJmx.exporter.enabled $coordinatorServiceMonitor.enabled $workerJmx.exporter.enabled $workerServiceMonitor.enabled -}}
apiVersion: v1
kind: Pod
metadata:
name: {{ include "trino.fullname" . }}-test-jmx
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: test
test: jmx
annotations:
"helm.sh/hook": test
"helm.sh/hook-delete-policy": hook-succeeded
spec:
containers:
{{- if $coordinatorJmx.exporter.enabled }}
- name: trino-jmx-coordinator
image: {{ include "trino.image" . }}
command: ["/bin/bash", "-c"]
args:
- curl -s {{ include "trino.fullname" . }}.{{ .Release.Namespace }}:{{ $coordinatorJmx.exporter.port }}/metrics | grep -q trino
{{- end }}
{{- if $workerJmx.exporter.port }}
- name: trino-jmx-worker
image: {{ include "trino.image" . }}
command: ["/bin/bash", "-c"]
args:
- curl -s {{ include "trino.fullname" . }}-worker.{{ .Release.Namespace }}:{{ $workerJmx.exporter.port }}/metrics | grep -q trino
{{- end }}
{{- if $coordinatorServiceMonitor.enabled }}
- name: service-monitor-coordinator
image: python:3-slim
command: [ "python", "/tests/test.py" ]
args: ["{{ include "trino.fullname" . }}"]
volumeMounts:
- name: tests
mountPath: /tests
{{- end }}
{{- if $workerServiceMonitor.enabled }}
- name: service-monitor-worker
image: python:3-slim
command: ["python", "/tests/test.py"]
args: ["{{ include "trino.fullname" . }}-worker"]
volumeMounts:
- name: tests
mountPath: /tests
{{- end }}
volumes:
- name: tests
configMap:
name: {{ include "trino.fullname" . }}-test-jmx
restartPolicy: Never
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "trino.fullname" . }}-test-jmx
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: test
test: jmx
annotations:
"helm.sh/hook": test
"helm.sh/hook-delete-policy": hook-succeeded
data:
test.py: |
from urllib.request import urlopen
from urllib.error import URLError, HTTPError
import json
import logging
import sys
import time
logger = logging.getLogger(__name__)
target_service = sys.argv[1]
url = f"http://prometheus-operator-kube-p-prometheus:9090/api/v1/targets?scrapePool=serviceMonitor/{{ .Release.Namespace }}/{target_service}/0&state=active"
while True:
try:
with urlopen(url) as response:
data = json.load(response)
except (URLError, HTTPError) as e:
logger.warning("Error fetching targets, Prometheus service might not be ready: ", e)
time.sleep(2) # Retry after 2 seconds
continue
try:
service_name = data["data"]["activeTargets"][0]["discoveredLabels"]["__meta_kubernetes_service_name"]
except (KeyError, IndexError) as e:
logger.warning("Invalid Prometheus response: ", e)
time.sleep(2) # Retry after 2 seconds
continue
if service_name == target_service:
logger.info(f"Found expected service '{service_name}' in Prometheus targets!")
break
{{- end }}

View file

@ -0,0 +1,28 @@
{{- if .Values.networkPolicy.enabled }}
apiVersion: v1
kind: Pod
metadata:
name: {{ include "trino.fullname" . }}-test-networkpolicy
labels:
{{- include "trino.labels" . | nindent 4 }}
app.kubernetes.io/component: test
test: network-policy
annotations:
"helm.sh/hook": test
"helm.sh/hook-delete-policy": hook-succeeded
spec:
containers:
- name: check-connection
image: {{ include "trino.image" . }}
command: [ "/bin/bash", "-c" ]
args:
- >-
curl
{{ include "trino.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.service.port }}
--head
--fail
--connect-timeout 10
--max-time 10
2>&1 | grep -q "timed out"
restartPolicy: Never
{{- end }}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
helmCharts:
- includeCRDs: true
name: trino
namespace: trino
releaseName: trino
valuesFile: ./values/values.yaml
version: 0.32.0
repo: https://trinodb.github.io/charts

View file

@ -0,0 +1,10 @@
image:
tag: "461"
server:
workers: 3
coordinator:
jvm:
maxHeapSize: "8G"
worker:
jvm:
maxHeapSize: "8G"

View file

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base/