Compare commits

..

No commits in common. "main" and "mandersson-dorisgu" have entirely different histories.

109 changed files with 225 additions and 12865 deletions

View file

@ -1,2 +1,2 @@
# k8s-manifests # k8s-manifests

View file

@ -1,15 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: drive@sunet.se
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View file

@ -1,3 +1,2 @@
resources: resources:
- argocd-ingress.yaml - argocd-ingress.yaml
- argocd-cert-issuer.yaml

View file

@ -3,13 +3,16 @@ kind: Ingress
metadata: metadata:
name: argocd-ingress name: argocd-ingress
namespace: argocd namespace: argocd
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:
defaultBackend: defaultBackend:
service: service:
name: argocd-server name: argocd-server
port: port:
number: 80 number: 8443
ingressClassName: nginx ingressClassName: traefik
tls: tls:
- hosts: - hosts:
- argocd.drive.test.sunet.dev - argocd.drive.test.sunet.dev

View file

@ -1,30 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
namespace: argocd
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- sunet-argocd.drive.sunet.se
secretName: tls-secret
rules:
- host: sunet-argocd.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -1,30 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- vr-argocd.drive.sunet.se
secretName: tls-secret
rules:
- host: vr-argocd.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -1,27 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- argocd.drive.test.sunet.se
secretName: tls-secret
rules:
- host: argocd.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -3,9 +3,6 @@ kind: Ingress
metadata: metadata:
name: argocd-ingress name: argocd-ingress
namespace: argocd namespace: argocd
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec: spec:
defaultBackend: defaultBackend:
service: service:

View file

@ -1,30 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- vr-argocd.drive.test.sunet.se
secretName: tls-secret
rules:
- host: vr-argocd.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -30,7 +30,7 @@ spec:
restartPolicy: Always restartPolicy: Always
containers: containers:
- name: customer - name: customer
image: docker.sunet.se/drive/nextcloud-custom:29.0.9.3-1 image: docker.sunet.se/drive/nextcloud-custom:28.0.5.2-2
volumeMounts: volumeMounts:
- name: nextcloud-data - name: nextcloud-data
mountPath: /var/www/html/config/ mountPath: /var/www/html/config/

View file

@ -4,13 +4,15 @@ kind: Ingress
metadata: metadata:
name: customer-ingress name: customer-ingress
annotations: annotations:
kubernetes.io/ingress.class: nginx kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:
tls: tls:
- hosts: - hosts:
- nordunet.drive.test.sunet.se - nordunet.drive.test.sunet.se
secretName: tls-secret secretName: tls-secret
ingressClassName: nginx
rules: rules:
- host: nordunet.drive.test.sunet.se - host: nordunet.drive.test.sunet.se
http: http:

View file

@ -4,13 +4,15 @@ kind: Ingress
metadata: metadata:
name: customer-ingress name: customer-ingress
annotations: annotations:
kubernetes.io/ingress.class: nginx kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:
tls: tls:
- hosts: - hosts:
- vinnova.drive.test.sunet.se - vinnova.drive.test.sunet.se
secretName: tls-secret secretName: tls-secret
ingressClassName: nginx
rules: rules:
- host: vinnova.drive.test.sunet.se - host: vinnova.drive.test.sunet.se
http: http:

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: health-ingress
namespace: health
annotations:
kubernetes.io/ingress.class: nginx
spec:
defaultBackend:
service:
name: health-node
port:
number: 8443
ingressClassName: nginx
tls:
- hosts:
- sunet-kube.drive.sunet.se
secretName: tls-secret
rules:
- host: sunet-kube.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: health-node
port:
number: 8080

View file

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: health-ingress.yml

View file

@ -5,18 +5,20 @@ metadata:
name: health-ingress name: health-ingress
namespace: health namespace: health
annotations: annotations:
kubernetes.io/ingress.class: nginx traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:
defaultBackend: defaultBackend:
service: service:
name: health-node name: health-node
port: port:
number: 8443 number: 8443
ingressClassName: nginx ingressClassName: traefik
tls: tls:
- hosts: - hosts:
- kube.drive.test.sunet.se - kube.drive.test.sunet.se
secretName: tls-secret secretName: tls-secret
rules: rules:
- host: kube.drive.test.sunet.se - host: kube.drive.test.sunet.se
http: http:

View file

@ -1,27 +1,27 @@
annotations: annotations:
artifacthub.io/images: | artifacthub.io/images: |
- image: quay.io/jupyterhub/configurable-http-proxy:4.6.2 - image: quay.io/jupyterhub/configurable-http-proxy:4.6.1
name: configurable-http-proxy name: configurable-http-proxy
- image: quay.io/jupyterhub/k8s-hub:4.0.0 - image: quay.io/jupyterhub/k8s-hub:3.2.1
name: k8s-hub name: k8s-hub
- image: quay.io/jupyterhub/k8s-image-awaiter:4.0.0 - image: quay.io/jupyterhub/k8s-image-awaiter:3.2.1
name: k8s-image-awaiter name: k8s-image-awaiter
- image: quay.io/jupyterhub/k8s-network-tools:4.0.0 - image: quay.io/jupyterhub/k8s-network-tools:3.2.1
name: k8s-network-tools name: k8s-network-tools
- image: quay.io/jupyterhub/k8s-secret-sync:4.0.0 - image: quay.io/jupyterhub/k8s-secret-sync:3.2.1
name: k8s-secret-sync name: k8s-secret-sync
- image: quay.io/jupyterhub/k8s-singleuser-sample:4.0.0 - image: quay.io/jupyterhub/k8s-singleuser-sample:3.2.1
name: k8s-singleuser-sample name: k8s-singleuser-sample
- image: registry.k8s.io/kube-scheduler:v1.30.6 - image: registry.k8s.io/kube-scheduler:v1.26.11
name: kube-scheduler name: kube-scheduler
- image: registry.k8s.io/pause:3.10 - image: registry.k8s.io/pause:3.9
name: pause name: pause
- image: registry.k8s.io/pause:3.10 - image: registry.k8s.io/pause:3.9
name: pause name: pause
- image: traefik:v3.2.0 - image: traefik:v2.10.5
name: traefik name: traefik
apiVersion: v2 apiVersion: v2
appVersion: 5.2.1 appVersion: 4.0.2
description: Multi-user Jupyter installation description: Multi-user Jupyter installation
home: https://z2jh.jupyter.org home: https://z2jh.jupyter.org
icon: https://hub.jupyter.org/helm-chart/images/hublogo.svg icon: https://hub.jupyter.org/helm-chart/images/hublogo.svg
@ -29,7 +29,7 @@ keywords:
- jupyter - jupyter
- jupyterhub - jupyterhub
- z2jh - z2jh
kubeVersion: '>=1.28.0-0' kubeVersion: '>=1.23.0-0'
maintainers: maintainers:
- email: erik@sundellopensource.se - email: erik@sundellopensource.se
name: Erik Sundell name: Erik Sundell
@ -38,4 +38,4 @@ maintainers:
name: jupyterhub name: jupyterhub
sources: sources:
- https://github.com/jupyterhub/zero-to-jupyterhub-k8s - https://github.com/jupyterhub/zero-to-jupyterhub-k8s
version: 4.0.0 version: 3.2.1

View file

@ -107,35 +107,28 @@ c.JupyterHub.hub_connect_url = (
) )
# implement common labels # implement common labels
# This mimics the jupyterhub.commonLabels helper, but declares managed-by to # this duplicates the jupyterhub.commonLabels helper
# kubespawner instead of helm.
#
# The labels app and release are old labels enabled to be deleted in z2jh 5, but
# for now retained to avoid a breaking change in z2jh 4 that would force user
# server restarts. Restarts would be required because NetworkPolicy resources
# must select old/new pods with labels that then needs to be seen on both
# old/new pods, and we want these resources to keep functioning for old/new user
# server pods during an upgrade.
#
common_labels = c.KubeSpawner.common_labels = {} common_labels = c.KubeSpawner.common_labels = {}
common_labels["app.kubernetes.io/name"] = common_labels["app"] = get_config( common_labels["app"] = get_config(
"nameOverride", "nameOverride",
default=get_config("Chart.Name", "jupyterhub"), default=get_config("Chart.Name", "jupyterhub"),
) )
release = get_config("Release.Name") common_labels["heritage"] = "jupyterhub"
if release:
common_labels["app.kubernetes.io/instance"] = common_labels["release"] = release
chart_name = get_config("Chart.Name") chart_name = get_config("Chart.Name")
chart_version = get_config("Chart.Version") chart_version = get_config("Chart.Version")
if chart_name and chart_version: if chart_name and chart_version:
common_labels["helm.sh/chart"] = common_labels["chart"] = ( common_labels["chart"] = "{}-{}".format(
f"{chart_name}-{chart_version.replace('+', '_')}" chart_name,
chart_version.replace("+", "_"),
) )
common_labels["app.kubernetes.io/managed-by"] = "kubespawner" release = get_config("Release.Name")
if release:
common_labels["release"] = release
c.KubeSpawner.namespace = os.environ.get("POD_NAMESPACE", "default") c.KubeSpawner.namespace = os.environ.get("POD_NAMESPACE", "default")
# Max number of consecutive failures before the Hub restarts itself # Max number of consecutive failures before the Hub restarts itself
# requires jupyterhub 0.9.2
set_config_if_not_none( set_config_if_not_none(
c.Spawner, c.Spawner,
"consecutive_failure_limit", "consecutive_failure_limit",
@ -256,8 +249,7 @@ if tolerations:
storage_type = get_config("singleuser.storage.type") storage_type = get_config("singleuser.storage.type")
if storage_type == "dynamic": if storage_type == "dynamic":
pvc_name_template = get_config("singleuser.storage.dynamic.pvcNameTemplate") pvc_name_template = get_config("singleuser.storage.dynamic.pvcNameTemplate")
if pvc_name_template: c.KubeSpawner.pvc_name_template = pvc_name_template
c.KubeSpawner.pvc_name_template = pvc_name_template
volume_name_template = get_config("singleuser.storage.dynamic.volumeNameTemplate") volume_name_template = get_config("singleuser.storage.dynamic.volumeNameTemplate")
c.KubeSpawner.storage_pvc_ensure = True c.KubeSpawner.storage_pvc_ensure = True
set_config_if_not_none( set_config_if_not_none(
@ -276,14 +268,13 @@ if storage_type == "dynamic":
c.KubeSpawner.volumes = [ c.KubeSpawner.volumes = [
{ {
"name": volume_name_template, "name": volume_name_template,
"persistentVolumeClaim": {"claimName": "{pvc_name}"}, "persistentVolumeClaim": {"claimName": pvc_name_template},
} }
] ]
c.KubeSpawner.volume_mounts = [ c.KubeSpawner.volume_mounts = [
{ {
"mountPath": get_config("singleuser.storage.homeMountPath"), "mountPath": get_config("singleuser.storage.homeMountPath"),
"name": volume_name_template, "name": volume_name_template,
"subPath": get_config("singleuser.storage.dynamic.subPath"),
} }
] ]
elif storage_type == "static": elif storage_type == "static":

View file

@ -3,7 +3,6 @@ Utility methods for use in jupyterhub_config.py and dynamic subconfigs.
Methods here can be imported by extraConfig in values.yaml Methods here can be imported by extraConfig in values.yaml
""" """
import os import os
from collections.abc import Mapping from collections.abc import Mapping
from functools import lru_cache from functools import lru_cache

View file

@ -48,6 +48,7 @@
- commonLabels | uses appLabel - commonLabels | uses appLabel
- labels | uses commonLabels - labels | uses commonLabels
- matchLabels | uses labels - matchLabels | uses labels
- podCullerSelector | uses matchLabels
## Example usage ## Example usage
@ -111,62 +112,31 @@
{{- /* {{- /*
jupyterhub.commonLabels: jupyterhub.commonLabels:
Foundation for "jupyterhub.labels". Foundation for "jupyterhub.labels".
Provides labels: app, release, (chart and heritage).
Provides old labels:
app
release
chart (omitted for matchLabels)
heritage (omitted for matchLabels)
Provides modern labels (omitted for matchLabels):
app.kubernetes.io/name ("app")
app.kubernetes.io/instance ("release")
helm.sh/chart ("chart")
app.kubernetes.io/managed-by ("heritage")
*/}} */}}
{{- define "jupyterhub.commonLabels" -}} {{- define "jupyterhub.commonLabels" -}}
app: {{ .appLabel | default (include "jupyterhub.appLabel" .) | quote }} app: {{ .appLabel | default (include "jupyterhub.appLabel" .) }}
release: {{ .Release.Name | quote }} release: {{ .Release.Name }}
{{- if not .matchLabels }} {{- if not .matchLabels }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
heritage: {{ .Release.Service }} heritage: {{ .heritageLabel | default .Release.Service }}
app.kubernetes.io/name: {{ .appLabel | default (include "jupyterhub.appLabel" .) | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- /* {{- /*
jupyterhub.labels: jupyterhub.labels:
Provides old labels: Provides labels: component, app, release, (chart and heritage).
component
app
release
chart (omitted for matchLabels)
heritage (omitted for matchLabels)
Provides modern labels (omitted for matchLabels):
app.kubernetes.io/component ("component")
app.kubernetes.io/name ("app")
app.kubernetes.io/instance release ("release")
helm.sh/chart ("chart")
app.kubernetes.io/managed-by ("heritage")
*/}} */}}
{{- define "jupyterhub.labels" -}} {{- define "jupyterhub.labels" -}}
component: {{ include "jupyterhub.componentLabel" . }} component: {{ include "jupyterhub.componentLabel" . }}
{{- if not .matchLabels }}
app.kubernetes.io/component: {{ include "jupyterhub.componentLabel" . }}
{{- end }}
{{ include "jupyterhub.commonLabels" . }} {{ include "jupyterhub.commonLabels" . }}
{{- end }} {{- end }}
{{- /* {{- /*
jupyterhub.matchLabels: jupyterhub.matchLabels:
Provides old labels: Used to provide pod selection labels: component, app, release.
component
app
release
*/}} */}}
{{- define "jupyterhub.matchLabels" -}} {{- define "jupyterhub.matchLabels" -}}
{{- $_ := merge (dict "matchLabels" true) . -}} {{- $_ := merge (dict "matchLabels" true) . -}}

View file

@ -5,7 +5,7 @@ metadata:
labels: labels:
{{- include "jupyterhub.labels" . | nindent 4 }} {{- include "jupyterhub.labels" . | nindent 4 }}
spec: spec:
{{- if not (typeIs "<nil>" .Values.hub.revisionHistoryLimit) }} {{- if typeIs "int" .Values.hub.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.hub.revisionHistoryLimit }} revisionHistoryLimit: {{ .Values.hub.revisionHistoryLimit }}
{{- end }} {{- end }}
replicas: 1 replicas: 1
@ -97,6 +97,9 @@ spec:
{{- . | toYaml | nindent 8 }} {{- . | toYaml | nindent 8 }}
{{- end }} {{- end }}
containers: containers:
{{- with .Values.hub.extraContainers }}
{{- . | toYaml | nindent 8 }}
{{- end }}
- name: hub - name: hub
image: {{ .Values.hub.image.name }}:{{ .Values.hub.image.tag }} image: {{ .Values.hub.image.name }}:{{ .Values.hub.image.tag }}
{{- with .Values.hub.command }} {{- with .Values.hub.command }}
@ -241,9 +244,6 @@ spec:
path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/health path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/health
port: http port: http
{{- end }} {{- end }}
{{- with .Values.hub.extraContainers }}
{{- . | toYaml | nindent 8 }}
{{- end }}
{{- with .Values.hub.extraPodSpec }} {{- with .Values.hub.extraPodSpec }}
{{- . | toYaml | nindent 6 }} {{- . | toYaml | nindent 6 }}
{{- end }} {{- end }}

View file

@ -8,7 +8,7 @@ type: Opaque
data: data:
{{- $values := merge dict .Values }} {{- $values := merge dict .Values }}
{{- /* also passthrough subset of Chart / Release */}} {{- /* also passthrough subset of Chart / Release */}}
{{- $_ := set $values "Chart" (dict "Name" .Chart.Name "Version" .Chart.Version "AppVersion" .Chart.AppVersion) }} {{- $_ := set $values "Chart" (dict "Name" .Chart.Name "Version" .Chart.Version) }}
{{- $_ := set $values "Release" (pick .Release "Name" "Namespace" "Service") }} {{- $_ := set $values "Release" (pick .Release "Name" "Namespace" "Service") }}
values.yaml: {{ $values | toYaml | b64enc | quote }} values.yaml: {{ $values | toYaml | b64enc | quote }}

View file

@ -31,9 +31,6 @@ spec:
{{- with .Values.hub.service.ports.nodePort }} {{- with .Values.hub.service.ports.nodePort }}
nodePort: {{ . }} nodePort: {{ . }}
{{- end }} {{- end }}
{{- with .Values.hub.service.ports.appProtocol }}
appProtocol: {{ . }}
{{- end }}
{{- with .Values.hub.service.extraPorts }} {{- with .Values.hub.service.extraPorts }}
{{- . | toYaml | nindent 4 }} {{- . | toYaml | nindent 4 }}

View file

@ -34,7 +34,7 @@ spec:
type: RollingUpdate type: RollingUpdate
rollingUpdate: rollingUpdate:
maxUnavailable: 100% maxUnavailable: 100%
{{- if not (typeIs "<nil>" .Values.prePuller.revisionHistoryLimit) }} {{- if typeIs "int" .Values.prePuller.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.prePuller.revisionHistoryLimit }} revisionHistoryLimit: {{ .Values.prePuller.revisionHistoryLimit }}
{{- end }} {{- end }}
template: template:

View file

@ -24,9 +24,6 @@ spec:
name: {{ include "jupyterhub.proxy-public.fullname" $ }} name: {{ include "jupyterhub.proxy-public.fullname" $ }}
port: port:
name: http name: http
{{- with $.Values.ingress.extraPaths }}
{{- . | toYaml | nindent 10 }}
{{- end }}
{{- if $host }} {{- if $host }}
host: {{ $host | quote }} host: {{ $host | quote }}
{{- end }} {{- end }}

View file

@ -8,7 +8,7 @@ metadata:
labels: labels:
{{- include "jupyterhub.labels" . | nindent 4 }} {{- include "jupyterhub.labels" . | nindent 4 }}
spec: spec:
{{- if not (typeIs "<nil>" .Values.proxy.traefik.revisionHistoryLimit) }} {{- if typeIs "int" .Values.proxy.traefik.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.proxy.traefik.revisionHistoryLimit }} revisionHistoryLimit: {{ .Values.proxy.traefik.revisionHistoryLimit }}
{{- end }} {{- end }}
replicas: 1 replicas: 1
@ -130,10 +130,10 @@ spec:
{{- end }} {{- end }}
args: args:
- watch-save - watch-save
- --label=app.kubernetes.io/name={{ include "jupyterhub.appLabel" . }} - --label=app={{ include "jupyterhub.appLabel" . }}
- --label=app.kubernetes.io/instance={{ .Release.Name }} - --label=release={{ .Release.Name }}
- --label=helm.sh/chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - --label=chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
- --label=app.kubernetes.io/managed-by=secret-sync - --label=heritage=secret-sync
- {{ include "jupyterhub.proxy-public-tls.fullname" . }} - {{ include "jupyterhub.proxy-public-tls.fullname" . }}
- acme.json - acme.json
- /etc/acme/acme.json - /etc/acme/acme.json

View file

@ -7,7 +7,7 @@ metadata:
labels: labels:
{{- include "jupyterhub.labels" . | nindent 4 }} {{- include "jupyterhub.labels" . | nindent 4 }}
spec: spec:
{{- if not (typeIs "<nil>" .Values.proxy.chp.revisionHistoryLimit) }} {{- if typeIs "int" .Values.proxy.chp.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.proxy.chp.revisionHistoryLimit }} revisionHistoryLimit: {{ .Values.proxy.chp.revisionHistoryLimit }}
{{- end }} {{- end }}
replicas: 1 replicas: 1
@ -100,9 +100,6 @@ spec:
{{- if .Values.debug.enabled }} {{- if .Values.debug.enabled }}
- --log-level=debug - --log-level=debug
{{- end }} {{- end }}
{{- if .Values.hub.config.JupyterHub.subdomain_host }}
- --host-routing
{{- end }}
{{- range .Values.proxy.chp.extraCommandLineFlags }} {{- range .Values.proxy.chp.extraCommandLineFlags }}
- {{ tpl . $ }} - {{ tpl . $ }}
{{- end }} {{- end }}

View file

@ -16,7 +16,7 @@ metadata:
{{- include "jupyterhub.labels" . | nindent 4 }} {{- include "jupyterhub.labels" . | nindent 4 }}
spec: spec:
podManagementPolicy: Parallel podManagementPolicy: Parallel
{{- if not (typeIs "<nil>" .Values.scheduling.userPlaceholder.revisionHistoryLimit) }} {{- if typeIs "int" .Values.scheduling.userPlaceholder.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.scheduling.userPlaceholder.revisionHistoryLimit }} revisionHistoryLimit: {{ .Values.scheduling.userPlaceholder.revisionHistoryLimit }}
{{- end }} {{- end }}
replicas: {{ .Values.scheduling.userPlaceholder.replicas }} replicas: {{ .Values.scheduling.userPlaceholder.replicas }}

View file

@ -10,14 +10,26 @@ data:
This is configuration of a k8s official kube-scheduler binary running in the This is configuration of a k8s official kube-scheduler binary running in the
user-scheduler. user-scheduler.
The config version and kube-scheduler binary version has a fallback for k8s
clusters versioned v1.23 or lower because:
- v1 / v1beta3 config requires kube-scheduler binary >=1.25 / >=1.23
- kube-scheduler binary >=1.25 requires storage.k8s.io/v1/CSIStorageCapacity
available first in k8s >=1.24
ref: https://kubernetes.io/docs/reference/scheduling/config/ ref: https://kubernetes.io/docs/reference/scheduling/config/
ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1/ ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1/
ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1beta3/
*/}} */}}
config.yaml: | config.yaml: |
{{- if semverCompare ">=1.24.0-0" .Capabilities.KubeVersion.Version }}
apiVersion: kubescheduler.config.k8s.io/v1 apiVersion: kubescheduler.config.k8s.io/v1
{{- else }}
apiVersion: kubescheduler.config.k8s.io/v1beta3
{{- end }}
kind: KubeSchedulerConfiguration kind: KubeSchedulerConfiguration
leaderElection: leaderElection:
resourceLock: leases resourceLock: endpointsleases
resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }} resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
resourceNamespace: "{{ .Release.Namespace }}" resourceNamespace: "{{ .Release.Namespace }}"
profiles: profiles:

View file

@ -6,7 +6,7 @@ metadata:
labels: labels:
{{- include "jupyterhub.labels" . | nindent 4 }} {{- include "jupyterhub.labels" . | nindent 4 }}
spec: spec:
{{- if not (typeIs "<nil>" .Values.scheduling.userScheduler.revisionHistoryLimit) }} {{- if typeIs "int" .Values.scheduling.userScheduler.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.scheduling.userScheduler.revisionHistoryLimit }} revisionHistoryLimit: {{ .Values.scheduling.userScheduler.revisionHistoryLimit }}
{{- end }} {{- end }}
replicas: {{ .Values.scheduling.userScheduler.replicas }} replicas: {{ .Values.scheduling.userScheduler.replicas }}
@ -50,7 +50,17 @@ spec:
{{- end }} {{- end }}
containers: containers:
- name: kube-scheduler - name: kube-scheduler
{{- if semverCompare ">=1.24.0-0" .Capabilities.KubeVersion.Version }}
image: {{ .Values.scheduling.userScheduler.image.name }}:{{ .Values.scheduling.userScheduler.image.tag }} image: {{ .Values.scheduling.userScheduler.image.name }}:{{ .Values.scheduling.userScheduler.image.tag }}
{{- else }}
# WARNING: The tag of this image is hardcoded, and the
# "scheduling.userScheduler.image.tag" configuration of the
# Helm chart that generated this resource manifest isn't
# respected. If you install the Helm chart in a k8s cluster
# versioned 1.24 or higher, your configuration will be
# respected.
image: {{ .Values.scheduling.userScheduler.image.name }}:v1.23.14
{{- end }}
{{- with .Values.scheduling.userScheduler.image.pullPolicy }} {{- with .Values.scheduling.userScheduler.image.pullPolicy }}
imagePullPolicy: {{ . }} imagePullPolicy: {{ . }}
{{- end }} {{- end }}

View file

@ -20,12 +20,8 @@ rules:
# - changed in 1.21: get/list/watch permission for namespace, # - changed in 1.21: get/list/watch permission for namespace,
# csidrivers, csistoragecapacities was added. # csidrivers, csistoragecapacities was added.
# - unchanged between 1.22 and 1.27 # - unchanged between 1.22 and 1.27
# - changed in 1.28: permissions to get/update lock endpoint resource
# removed
# - unchanged between 1.28 and 1.30
# - (1.31 is known to bring some changes below)
# #
# ref: https://github.com/kubernetes/kubernetes/blob/v1.30.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L721-L862 # ref: https://github.com/kubernetes/kubernetes/blob/v1.27.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L736-L892
- apiGroups: - apiGroups:
- "" - ""
- events.k8s.io - events.k8s.io
@ -50,6 +46,21 @@ rules:
verbs: verbs:
- get - get
- update - update
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- apiGroups:
- ""
resourceNames:
- {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
resources:
- endpoints
verbs:
- get
- update
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -172,9 +183,9 @@ rules:
# Copied from the system:volume-scheduler ClusterRole of the k8s version # Copied from the system:volume-scheduler ClusterRole of the k8s version
# matching the kube-scheduler binary we use. # matching the kube-scheduler binary we use.
# #
# NOTE: These rules have not changed between 1.12 and 1.29. # NOTE: These rules have not changed between 1.12 and 1.27.
# #
# ref: https://github.com/kubernetes/kubernetes/blob/v1.29.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1283-L1310 # ref: https://github.com/kubernetes/kubernetes/blob/v1.27.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1311-L1338
- apiGroups: - apiGroups:
- "" - ""
resources: resources:

File diff suppressed because one or more lines are too long

View file

@ -41,7 +41,6 @@ hub:
annotations: {} annotations: {}
ports: ports:
nodePort: nodePort:
appProtocol:
extraPorts: [] extraPorts: []
loadBalancerIP: loadBalancerIP:
baseUrl: / baseUrl: /
@ -85,21 +84,16 @@ hub:
extraVolumeMounts: [] extraVolumeMounts: []
image: image:
name: quay.io/jupyterhub/k8s-hub name: quay.io/jupyterhub/k8s-hub
tag: "4.0.0" tag: "3.2.1"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
resources: {} resources: {}
podSecurityContext: podSecurityContext:
runAsNonRoot: true
fsGroup: 1000 fsGroup: 1000
seccompProfile:
type: "RuntimeDefault"
containerSecurityContext: containerSecurityContext:
runAsUser: 1000 runAsUser: 1000
runAsGroup: 1000 runAsGroup: 1000
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
lifecycle: {} lifecycle: {}
loadRoles: {} loadRoles: {}
services: {} services: {}
@ -203,20 +197,15 @@ proxy:
chp: chp:
revisionHistoryLimit: revisionHistoryLimit:
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image: image:
name: quay.io/jupyterhub/configurable-http-proxy name: quay.io/jupyterhub/configurable-http-proxy
# tag is automatically bumped to new patch versions by the # tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow. # watch-dependencies.yaml workflow.
# #
tag: "4.6.2" # https://github.com/jupyterhub/configurable-http-proxy/tags tag: "4.6.1" # https://github.com/jupyterhub/configurable-http-proxy/tags
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
extraCommandLineFlags: [] extraCommandLineFlags: []
@ -261,20 +250,15 @@ proxy:
traefik: traefik:
revisionHistoryLimit: revisionHistoryLimit:
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image: image:
name: traefik name: traefik
# tag is automatically bumped to new patch versions by the # tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow. # watch-dependencies.yaml workflow.
# #
tag: "v3.2.0" # ref: https://hub.docker.com/_/traefik?tab=tags tag: "v2.10.5" # ref: https://hub.docker.com/_/traefik?tab=tags
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
hsts: hsts:
@ -316,17 +300,12 @@ proxy:
extraPodSpec: {} extraPodSpec: {}
secretSync: secretSync:
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image: image:
name: quay.io/jupyterhub/k8s-secret-sync name: quay.io/jupyterhub/k8s-secret-sync
tag: "4.0.0" tag: "3.2.1"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
resources: {} resources: {}
@ -366,7 +345,7 @@ singleuser:
networkTools: networkTools:
image: image:
name: quay.io/jupyterhub/k8s-network-tools name: quay.io/jupyterhub/k8s-network-tools
tag: "4.0.0" tag: "3.2.1"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
resources: {} resources: {}
@ -413,13 +392,12 @@ singleuser:
homeMountPath: /home/jovyan homeMountPath: /home/jovyan
dynamic: dynamic:
storageClass: storageClass:
pvcNameTemplate: pvcNameTemplate: claim-{username}{servername}
volumeNameTemplate: volume-{user_server} volumeNameTemplate: volume-{username}{servername}
storageAccessModes: [ReadWriteOnce] storageAccessModes: [ReadWriteOnce]
subPath:
image: image:
name: quay.io/jupyterhub/k8s-singleuser-sample name: quay.io/jupyterhub/k8s-singleuser-sample
tag: "4.0.0" tag: "3.2.1"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
startTimeout: 300 startTimeout: 300
@ -454,24 +432,19 @@ scheduling:
# #
plugins: plugins:
score: score:
# We make use of the default scoring plugins, but we re-enable some with # These scoring plugins are enabled by default according to
# a new priority, leave some enabled with their lower default priority, # https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins
# and disable some. # 2022-02-22.
# #
# Below are the default scoring plugins as of 2024-09-23 according to # Enabled with high priority:
# https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins.
#
# Re-enabled with high priority:
# - NodeAffinity # - NodeAffinity
# - InterPodAffinity # - InterPodAffinity
# - NodeResourcesFit # - NodeResourcesFit
# - ImageLocality # - ImageLocality
#
# Remains enabled with low default priority: # Remains enabled with low default priority:
# - TaintToleration # - TaintToleration
# - PodTopologySpread # - PodTopologySpread
# - VolumeBinding # - VolumeBinding
#
# Disabled for scoring: # Disabled for scoring:
# - NodeResourcesBalancedAllocation # - NodeResourcesBalancedAllocation
# #
@ -500,25 +473,20 @@ scheduling:
- name: NodeResourcesFit - name: NodeResourcesFit
args: args:
scoringStrategy: scoringStrategy:
type: MostAllocated
resources: resources:
- name: cpu - name: cpu
weight: 1 weight: 1
- name: memory - name: memory
weight: 1 weight: 1
type: MostAllocated
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image: image:
# IMPORTANT: Bumping the minor version of this binary should go hand in # IMPORTANT: Bumping the minor version of this binary should go hand in
# hand with an inspection of the user-scheduelr's RBAC # hand with an inspection of the user-scheduelrs RBAC resources
# resources that we have forked in # that we have forked in
# templates/scheduling/user-scheduler/rbac.yaml. # templates/scheduling/user-scheduler/rbac.yaml.
# #
# Debugging advice: # Debugging advice:
@ -551,7 +519,7 @@ scheduling:
# here. We aim to stay around 1 minor version behind the latest k8s # here. We aim to stay around 1 minor version behind the latest k8s
# version. # version.
# #
tag: "v1.30.6" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG tag: "v1.26.11" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
nodeSelector: {} nodeSelector: {}
@ -583,7 +551,7 @@ scheduling:
# #
# If you update this, also update prePuller.pause.image.tag # If you update this, also update prePuller.pause.image.tag
# #
tag: "3.10" tag: "3.9"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
revisionHistoryLimit: revisionHistoryLimit:
@ -591,14 +559,9 @@ scheduling:
labels: {} labels: {}
annotations: {} annotations: {}
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
resources: {} resources: {}
corePods: corePods:
tolerations: tolerations:
@ -632,14 +595,9 @@ prePuller:
annotations: {} annotations: {}
resources: {} resources: {}
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
extraTolerations: [] extraTolerations: []
# hook relates to the hook-image-awaiter Job and hook-image-puller DaemonSet # hook relates to the hook-image-awaiter Job and hook-image-puller DaemonSet
hook: hook:
@ -648,18 +606,13 @@ prePuller:
# image and the configuration below relates to the hook-image-awaiter Job # image and the configuration below relates to the hook-image-awaiter Job
image: image:
name: quay.io/jupyterhub/k8s-image-awaiter name: quay.io/jupyterhub/k8s-image-awaiter
tag: "4.0.0" tag: "3.2.1"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
podSchedulingWaitDuration: 10 podSchedulingWaitDuration: 10
nodeSelector: {} nodeSelector: {}
tolerations: [] tolerations: []
@ -674,14 +627,9 @@ prePuller:
extraImages: {} extraImages: {}
pause: pause:
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image: image:
name: registry.k8s.io/pause name: registry.k8s.io/pause
# tag is automatically bumped to new patch versions by the # tag is automatically bumped to new patch versions by the
@ -689,7 +637,7 @@ prePuller:
# #
# If you update this, also update scheduling.userPlaceholder.image.tag # If you update this, also update scheduling.userPlaceholder.image.tag
# #
tag: "3.10" tag: "3.9"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
@ -701,7 +649,6 @@ ingress:
pathSuffix: pathSuffix:
pathType: Prefix pathType: Prefix
tls: [] tls: []
extraPaths: []
# cull relates to the jupyterhub-idle-culler service, responsible for evicting # cull relates to the jupyterhub-idle-culler service, responsible for evicting
# inactive singleuser pods. # inactive singleuser pods.

View file

@ -1,15 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: drive@sunet.se
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View file

@ -4,6 +4,7 @@ kind: Ingress
metadata: metadata:
name: jupyterhub-ingress name: jupyterhub-ingress
annotations: annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true" traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:

View file

@ -1,4 +1,4 @@
--- ---
resources: [jupyterhub-ingress.yml, jupyterhub-service.yml, jupyterhub-cert-issuer.yaml] resources: [jupyterhub-ingress.yml, jupyterhub-service.yml]
apiVersion: kustomize.config.k8s.io/v1beta1 apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization

View file

@ -1,20 +1,23 @@
---
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
metadata: metadata:
name: jupyterhub-ingress name: jupyterhub-ingress
annotations: annotations:
kubernetes.io/ingress.class: nginx kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:
defaultBackend: defaultBackend:
service: service:
name: proxy-public name: proxy-public
port: port:
number: 80 number: 8443
ingressClassName: nginx
tls: tls:
- hosts: - hosts:
- vr-jupyter.drive.sunet.se - vr-jupyter.drive.sunet.se
secretName: prod-tls-secret secretName: prod-tls-secret
rules: rules:
- host: vr-jupyter.drive.sunet.se - host: vr-jupyter.drive.sunet.se
http: http:

View file

@ -1,32 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jupyterhub-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
kubernetes.io/ingress.class: nginx
spec:
ingressClassName: nginx
defaultBackend:
service:
name: proxy-public
port:
number: 80
tls:
- hosts:
- sunet-jupyter.drive.sunet.se
secretName: tls-secret
rules:
- host: sunet-jupyter.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: proxy-public
port:
number: 80

View file

@ -1,16 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base/]
helmCharts:
- includeCRDs: true
name: jupyterhub
releaseName: sunet-jupyterhub
valuesFile: ./values/values.yaml
version: 3.2.1
namespace: sunet-jupyterhub
helmGlobals:
chartHome: ../../../base/charts/
patches:
- path: jupyterhub-ingress.yml
- path: jupyterhub-service.yml

View file

@ -1,337 +0,0 @@
debug:
enabled: true
hub:
config:
Authenticator:
auto_login: true
enable_auth_state: true
JupyterHub:
tornado_settings:
headers: { 'Content-Security-Policy': "frame-ancestors *;" }
db:
pvc:
storageClassName: csi-sc-cinderplugin
extraConfig:
oauthCode: |
import time
import requests
from datetime import datetime
from oauthenticator.generic import GenericOAuthenticator
token_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/api/v1/token'
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def get_nextcloud_access_token(refresh_token):
client_id = os.environ['NEXTCLOUD_CLIENT_ID']
client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
code = refresh_token
data = {
'grant_type': 'refresh_token',
'code': code,
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret
}
response = requests.post(token_url, data=data)
if debug:
print(response.text)
return response.json()
def post_auth_hook(authenticator, handler, authentication):
user = authentication['auth_state']['oauth_user']['ocs']['data']['id']
auth_state = authentication['auth_state']
auth_state['token_expires'] = time.time() + auth_state['token_response']['expires_in']
authentication['auth_state'] = auth_state
return authentication
class NextcloudOAuthenticator(GenericOAuthenticator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_dict = {}
async def pre_spawn_start(self, user, spawner):
super().pre_spawn_start(user, spawner)
auth_state = await user.get_auth_state()
if not auth_state:
return
access_token = auth_state['access_token']
spawner.environment['NEXTCLOUD_ACCESS_TOKEN'] = access_token
async def refresh_user(self, user, handler=None):
auth_state = await user.get_auth_state()
if not auth_state:
if debug:
print(f'auth_state missing for {user}')
return False
access_token = auth_state['access_token']
refresh_token = auth_state['refresh_token']
token_response = auth_state['token_response']
now = time.time()
now_hr = datetime.fromtimestamp(now)
expires = auth_state['token_expires']
expires_hr = datetime.fromtimestamp(expires)
expires = 0
if debug:
print(f'auth_state for {user}: {auth_state}')
if now >= expires:
if debug:
print(f'Time is: {now_hr}, token expired: {expires_hr}')
print(f'Refreshing token for {user}')
try:
token_response = get_nextcloud_access_token(refresh_token)
auth_state['access_token'] = token_response['access_token']
auth_state['refresh_token'] = token_response['refresh_token']
auth_state['token_expires'] = now + token_response['expires_in']
auth_state['token_response'] = token_response
if debug:
print(f'Successfully refreshed token for {user.name}')
print(f'auth_state for {user.name}: {auth_state}')
return {'name': user.name, 'auth_state': auth_state}
except Exception as e:
if debug:
print(f'Failed to refresh token for {user}')
return False
return False
if debug:
print(f'Time is: {now_hr}, token expires: {expires_hr}')
return True
c.JupyterHub.authenticator_class = NextcloudOAuthenticator
c.NextcloudOAuthenticator.client_id = os.environ['NEXTCLOUD_CLIENT_ID']
c.NextcloudOAuthenticator.client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
c.NextcloudOAuthenticator.login_service = 'Sunet Drive'
c.NextcloudOAuthenticator.username_claim = lambda r: r.get('ocs', {}).get('data', {}).get('id')
c.NextcloudOAuthenticator.userdata_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/ocs/v2.php/cloud/user?format=json'
c.NextcloudOAuthenticator.authorize_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/authorize'
c.NextcloudOAuthenticator.token_url = token_url
c.NextcloudOAuthenticator.oauth_callback_url = 'https://' + os.environ['JUPYTER_HOST'] + '/hub/oauth_callback'
c.NextcloudOAuthenticator.allow_all = True
c.NextcloudOAuthenticator.refresh_pre_spawn = True
c.NextcloudOAuthenticator.enable_auth_state = True
c.NextcloudOAuthenticator.auth_refresh_age = 3600
c.NextcloudOAuthenticator.post_auth_hook = post_auth_hook
serviceCode: |
import sys
c.JupyterHub.load_roles = [
{
"name": "refresh-token",
"services": [
"refresh-token"
],
"scopes": [
"read:users",
"admin:auth_state"
]
},
{
"name": "user",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"self",
],
},
{
"name": "server",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"inherit",
],
}
]
c.JupyterHub.services = [
{
'name': 'refresh-token',
'url': 'http://' + os.environ.get('HUB_SERVICE_HOST', 'hub') + ':' + os.environ.get('HUB_SERVICE_PORT_REFRESH_TOKEN', '8082'),
'display': False,
'oauth_no_confirm': True,
'api_token': os.environ['JUPYTERHUB_API_KEY'],
'command': [sys.executable, '/usr/local/etc/jupyterhub/refresh-token.py']
}
]
c.JupyterHub.admin_users = {"refresh-token"}
c.JupyterHub.api_tokens = {
os.environ['JUPYTERHUB_API_KEY']: "refresh-token",
}
extraFiles:
refresh-token.py:
mountPath: /usr/local/etc/jupyterhub/refresh-token.py
stringData: |
"""A token refresh service authenticating with the Hub.
This service serves `/services/refresh-token/`,
authenticated with the Hub,
showing the user their own info.
"""
import json
import os
import requests
import socket
from jupyterhub.services.auth import HubAuthenticated
from jupyterhub.utils import url_path_join
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, HTTPError, RequestHandler, authenticated
from urllib.parse import urlparse
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def my_debug(s):
if debug:
with open("/proc/1/fd/1", "a") as stdout:
print(s, file=stdout)
class RefreshHandler(HubAuthenticated, RequestHandler):
def api_request(self, method, url, **kwargs):
my_debug(f'{self.hub_auth}')
url = url_path_join(self.hub_auth.api_url, url)
allow_404 = kwargs.pop('allow_404', False)
headers = kwargs.setdefault('headers', {})
headers.setdefault('Authorization', f'token {self.hub_auth.api_token}')
try:
r = requests.request(method, url, **kwargs)
except requests.ConnectionError as e:
my_debug(f'Error connecting to {url}: {e}')
msg = f'Failed to connect to Hub API at {url}.'
msg += f' Is the Hub accessible at this URL (from host: {socket.gethostname()})?'
if '127.0.0.1' in url:
msg += ' Make sure to set c.JupyterHub.hub_ip to an IP accessible to' + \
' single-user servers if the servers are not on the same host as the Hub.'
raise HTTPError(500, msg)
data = None
if r.status_code == 404 and allow_404:
pass
elif r.status_code == 403:
my_debug(
'Lacking permission to check authorization with JupyterHub,' +
f' my auth token may have expired: [{r.status_code}] {r.reason}'
)
my_debug(r.text)
raise HTTPError(
500,
'Permission failure checking authorization, I may need a new token'
)
elif r.status_code >= 500:
my_debug(f'Upstream failure verifying auth token: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(
502, 'Failed to check authorization (upstream problem)')
elif r.status_code >= 400:
my_debug(f'Failed to check authorization: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(500, 'Failed to check authorization')
else:
data = r.json()
return data
@authenticated
def get(self):
user_model = self.get_current_user()
# Fetch current auth state
user_data = self.api_request('GET', url_path_join('users', user_model['name']))
auth_state = user_data['auth_state']
access_token = auth_state['access_token']
token_expires = auth_state['token_expires']
self.set_header('content-type', 'application/json')
self.write(json.dumps({'access_token': access_token, 'token_expires': token_expires}, indent=1, sort_keys=True))
class PingHandler(RequestHandler):
def get(self):
my_debug(f"DEBUG: In ping get")
self.set_header('content-type', 'application/json')
self.write(json.dumps({'ping': 1}))
def main():
app = Application([
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + 'tokens', RefreshHandler),
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + '/?', PingHandler),
])
http_server = HTTPServer(app)
url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL'])
http_server.listen(url.port)
IOLoop.current().start()
if __name__ == '__main__':
main()
networkPolicy:
ingress:
- ports:
- port: 8082
from:
- podSelector:
matchLabels:
hub.jupyter.org/network-access-hub: "true"
service:
extraPorts:
- port: 8082
targetPort: 8082
name: refresh-token
extraEnv:
NEXTCLOUD_DEBUG_OAUTH: "no"
NEXTCLOUD_HOST: sunet.drive.sunet.se
JUPYTER_HOST: sunet-jupyter.drive.sunet.se
JUPYTERHUB_API_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: api-key
JUPYTERHUB_CRYPT_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: crypt-key
NEXTCLOUD_CLIENT_ID:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-id
NEXTCLOUD_CLIENT_SECRET:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-secret
networkPolicy:
enabled: false
proxy:
chp:
networkPolicy:
egress:
- to:
- podSelector:
matchLabels:
app: jupyterhub
component: hub
ports:
- port: 8082
singleuser:
image:
name: docker.sunet.se/drive/jupyter-custom
tag: lab-4.0.10-sunet5
storage:
dynamic:
storageClass: csi-sc-cinderplugin
extraEnv:
JUPYTER_ENABLE_LAB: "yes"
JUPYTER_HOST: sunet-jupyter.drive.sunet.se
NEXTCLOUD_HOST: sunet.drive.sunet.se
extraFiles:
jupyter_notebook_config:
mountPath: /home/jovyan/.jupyter/jupyter_server_config.py
stringData: |
import os
c = get_config()
c.NotebookApp.allow_origin = '*'
c.NotebookApp.tornado_settings = {
'headers': { 'Content-Security-Policy': "frame-ancestors *;" }
}
os.system('/usr/local/bin/nc-sync')
mode: 0644

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jupyterhub-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: proxy-public
port:
number: 8443
tls:
- hosts:
- vr-jupyter.drive.sunet.se
secretName: tls-secret
ingressClassName: nginx
rules:
- host: vr-jupyter.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: proxy-public
port:
number: 80

View file

@ -1,25 +0,0 @@
---
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
labels:
app: jupyterhub-node
name: jupyterhub-node
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: jupyterhub-node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kind: List
metadata:
resourceVersion: ""
selfLink: ""

View file

@ -1,16 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base/]
helmCharts:
- includeCRDs: true
name: jupyterhub
releaseName: vr-jupyterhub
valuesFile: ./values/values.yaml
version: 4.0.0
namespace: vr-jupyterhub
helmGlobals:
chartHome: ../../../base/charts/
patches:
- path: jupyterhub-ingress.yml
- path: jupyterhub-service.yml

View file

@ -1,337 +0,0 @@
debug:
enabled: true
hub:
config:
Authenticator:
auto_login: true
enable_auth_state: true
JupyterHub:
tornado_settings:
headers: { 'Content-Security-Policy': "frame-ancestors *;" }
db:
pvc:
storageClassName: csi-sc-cinderplugin
extraConfig:
oauthCode: |
import time
import requests
from datetime import datetime
from oauthenticator.generic import GenericOAuthenticator
token_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/api/v1/token'
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def get_nextcloud_access_token(refresh_token):
client_id = os.environ['NEXTCLOUD_CLIENT_ID']
client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
code = refresh_token
data = {
'grant_type': 'refresh_token',
'code': code,
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret
}
response = requests.post(token_url, data=data)
if debug:
print(response.text)
return response.json()
def post_auth_hook(authenticator, handler, authentication):
user = authentication['auth_state']['oauth_user']['ocs']['data']['id']
auth_state = authentication['auth_state']
auth_state['token_expires'] = time.time() + auth_state['token_response']['expires_in']
authentication['auth_state'] = auth_state
return authentication
class NextcloudOAuthenticator(GenericOAuthenticator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_dict = {}
async def pre_spawn_start(self, user, spawner):
super().pre_spawn_start(user, spawner)
auth_state = await user.get_auth_state()
if not auth_state:
return
access_token = auth_state['access_token']
spawner.environment['NEXTCLOUD_ACCESS_TOKEN'] = access_token
async def refresh_user(self, user, handler=None):
auth_state = await user.get_auth_state()
if not auth_state:
if debug:
print(f'auth_state missing for {user}')
return False
access_token = auth_state['access_token']
refresh_token = auth_state['refresh_token']
token_response = auth_state['token_response']
now = time.time()
now_hr = datetime.fromtimestamp(now)
expires = auth_state['token_expires']
expires_hr = datetime.fromtimestamp(expires)
expires = 0
if debug:
print(f'auth_state for {user}: {auth_state}')
if now >= expires:
if debug:
print(f'Time is: {now_hr}, token expired: {expires_hr}')
print(f'Refreshing token for {user}')
try:
token_response = get_nextcloud_access_token(refresh_token)
auth_state['access_token'] = token_response['access_token']
auth_state['refresh_token'] = token_response['refresh_token']
auth_state['token_expires'] = now + token_response['expires_in']
auth_state['token_response'] = token_response
if debug:
print(f'Successfully refreshed token for {user.name}')
print(f'auth_state for {user.name}: {auth_state}')
return {'name': user.name, 'auth_state': auth_state}
except Exception as e:
if debug:
print(f'Failed to refresh token for {user}')
return False
return False
if debug:
print(f'Time is: {now_hr}, token expires: {expires_hr}')
return True
c.JupyterHub.authenticator_class = NextcloudOAuthenticator
c.NextcloudOAuthenticator.client_id = os.environ['NEXTCLOUD_CLIENT_ID']
c.NextcloudOAuthenticator.client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
c.NextcloudOAuthenticator.login_service = 'Sunet Drive'
c.NextcloudOAuthenticator.username_claim = lambda r: r.get('ocs', {}).get('data', {}).get('id')
c.NextcloudOAuthenticator.userdata_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/ocs/v2.php/cloud/user?format=json'
c.NextcloudOAuthenticator.authorize_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/authorize'
c.NextcloudOAuthenticator.token_url = token_url
c.NextcloudOAuthenticator.oauth_callback_url = 'https://' + os.environ['JUPYTER_HOST'] + '/hub/oauth_callback'
c.NextcloudOAuthenticator.allow_all = True
c.NextcloudOAuthenticator.refresh_pre_spawn = True
c.NextcloudOAuthenticator.enable_auth_state = True
c.NextcloudOAuthenticator.auth_refresh_age = 3600
c.NextcloudOAuthenticator.post_auth_hook = post_auth_hook
serviceCode: |
import sys
c.JupyterHub.load_roles = [
{
"name": "refresh-token",
"services": [
"refresh-token"
],
"scopes": [
"read:users",
"admin:auth_state"
]
},
{
"name": "user",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"self",
],
},
{
"name": "server",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"inherit",
],
}
]
c.JupyterHub.services = [
{
'name': 'refresh-token',
'url': 'http://' + os.environ.get('HUB_SERVICE_HOST', 'hub') + ':' + os.environ.get('HUB_SERVICE_PORT_REFRESH_TOKEN', '8082'),
'display': False,
'oauth_no_confirm': True,
'api_token': os.environ['JUPYTERHUB_API_KEY'],
'command': [sys.executable, '/usr/local/etc/jupyterhub/refresh-token.py']
}
]
c.JupyterHub.admin_users = {"refresh-token"}
c.JupyterHub.api_tokens = {
os.environ['JUPYTERHUB_API_KEY']: "refresh-token",
}
extraFiles:
refresh-token.py:
mountPath: /usr/local/etc/jupyterhub/refresh-token.py
stringData: |
"""A token refresh service authenticating with the Hub.
This service serves `/services/refresh-token/`,
authenticated with the Hub,
showing the user their own info.
"""
import json
import os
import requests
import socket
from jupyterhub.services.auth import HubAuthenticated
from jupyterhub.utils import url_path_join
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, HTTPError, RequestHandler, authenticated
from urllib.parse import urlparse
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def my_debug(s):
if debug:
with open("/proc/1/fd/1", "a") as stdout:
print(s, file=stdout)
class RefreshHandler(HubAuthenticated, RequestHandler):
def api_request(self, method, url, **kwargs):
my_debug(f'{self.hub_auth}')
url = url_path_join(self.hub_auth.api_url, url)
allow_404 = kwargs.pop('allow_404', False)
headers = kwargs.setdefault('headers', {})
headers.setdefault('Authorization', f'token {self.hub_auth.api_token}')
try:
r = requests.request(method, url, **kwargs)
except requests.ConnectionError as e:
my_debug(f'Error connecting to {url}: {e}')
msg = f'Failed to connect to Hub API at {url}.'
msg += f' Is the Hub accessible at this URL (from host: {socket.gethostname()})?'
if '127.0.0.1' in url:
msg += ' Make sure to set c.JupyterHub.hub_ip to an IP accessible to' + \
' single-user servers if the servers are not on the same host as the Hub.'
raise HTTPError(500, msg)
data = None
if r.status_code == 404 and allow_404:
pass
elif r.status_code == 403:
my_debug(
'Lacking permission to check authorization with JupyterHub,' +
f' my auth token may have expired: [{r.status_code}] {r.reason}'
)
my_debug(r.text)
raise HTTPError(
500,
'Permission failure checking authorization, I may need a new token'
)
elif r.status_code >= 500:
my_debug(f'Upstream failure verifying auth token: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(
502, 'Failed to check authorization (upstream problem)')
elif r.status_code >= 400:
my_debug(f'Failed to check authorization: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(500, 'Failed to check authorization')
else:
data = r.json()
return data
@authenticated
def get(self):
user_model = self.get_current_user()
# Fetch current auth state
user_data = self.api_request('GET', url_path_join('users', user_model['name']))
auth_state = user_data['auth_state']
access_token = auth_state['access_token']
token_expires = auth_state['token_expires']
self.set_header('content-type', 'application/json')
self.write(json.dumps({'access_token': access_token, 'token_expires': token_expires}, indent=1, sort_keys=True))
class PingHandler(RequestHandler):
def get(self):
my_debug(f"DEBUG: In ping get")
self.set_header('content-type', 'application/json')
self.write(json.dumps({'ping': 1}))
def main():
app = Application([
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + 'tokens', RefreshHandler),
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + '/?', PingHandler),
])
http_server = HTTPServer(app)
url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL'])
http_server.listen(url.port)
IOLoop.current().start()
if __name__ == '__main__':
main()
networkPolicy:
ingress:
- ports:
- port: 8082
from:
- podSelector:
matchLabels:
hub.jupyter.org/network-access-hub: "true"
service:
extraPorts:
- port: 8082
targetPort: 8082
name: refresh-token
extraEnv:
NEXTCLOUD_DEBUG_OAUTH: "no"
NEXTCLOUD_HOST: vr.drive.sunet.se
JUPYTER_HOST: vr-jupyter.drive.sunet.se
JUPYTERHUB_API_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: api-key
JUPYTERHUB_CRYPT_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: crypt-key
NEXTCLOUD_CLIENT_ID:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-id
NEXTCLOUD_CLIENT_SECRET:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-secret
networkPolicy:
enabled: false
proxy:
chp:
networkPolicy:
egress:
- to:
- podSelector:
matchLabels:
app: jupyterhub
component: hub
ports:
- port: 8082
singleuser:
image:
name: docker.sunet.se/drive/jupyter-custom
tag: lab-4.0.10-sunet4
storage:
dynamic:
storageClass: csi-sc-cinderplugin
extraEnv:
JUPYTER_ENABLE_LAB: "yes"
JUPYTER_HOST: vr-jupyter.drive.sunet.se
NEXTCLOUD_HOST: vr.drive.sunet.se
extraFiles:
jupyter_notebook_config:
mountPath: /home/jovyan/.jupyter/jupyter_server_config.py
stringData: |
import os
c = get_config()
c.NotebookApp.allow_origin = '*'
c.NotebookApp.tornado_settings = {
'headers': { 'Content-Security-Policy': "frame-ancestors *;" }
}
os.system('/usr/local/bin/nc-sync')
mode: 0644

View file

@ -5,8 +5,6 @@ metadata:
name: jupyterhub-ingress name: jupyterhub-ingress
annotations: annotations:
kubernetes.io/ingress.class: nginx kubernetes.io/ingress.class: nginx
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec: spec:
ingressClassName: nginx ingressClassName: nginx
defaultBackend: defaultBackend:

View file

@ -7,7 +7,7 @@ helmCharts:
name: jupyterhub name: jupyterhub
releaseName: sunet-jupyterhub releaseName: sunet-jupyterhub
valuesFile: ./values/values.yaml valuesFile: ./values/values.yaml
version: 4.0.0 version: 3.2.1
namespace: sunet-jupyterhub namespace: sunet-jupyterhub
helmGlobals: helmGlobals:
chartHome: ../../../base/charts/ chartHome: ../../../base/charts/

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jupyterhub-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: proxy-public
port:
number: 8443
tls:
- hosts:
- vr-jupyter.drive.test.sunet.se
secretName: tls-secret
ingressClassName: nginx
rules:
- host: vr-jupyter.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: proxy-public
port:
number: 80

View file

@ -1,24 +0,0 @@
---
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
labels:
app: jupyterhub-node
name: jupyterhub-node
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: jupyterhub-node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kind: List
metadata:
resourceVersion: ""
selfLink: ""

View file

@ -1,16 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base/]
helmCharts:
- includeCRDs: true
name: jupyterhub
releaseName: vr-jupyterhub
valuesFile: ./values/values.yaml
version: 4.0.0
namespace: vr-jupyterhub
helmGlobals:
chartHome: ../../../base/charts/
patches:
- path: jupyterhub-ingress.yml
- path: jupyterhub-service.yml

View file

@ -1,337 +0,0 @@
debug:
enabled: true
hub:
config:
Authenticator:
auto_login: true
enable_auth_state: true
JupyterHub:
tornado_settings:
headers: { 'Content-Security-Policy': "frame-ancestors *;" }
db:
pvc:
storageClassName: csi-sc-cinderplugin
extraConfig:
oauthCode: |
import time
import requests
from datetime import datetime
from oauthenticator.generic import GenericOAuthenticator
token_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/api/v1/token'
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def get_nextcloud_access_token(refresh_token):
client_id = os.environ['NEXTCLOUD_CLIENT_ID']
client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
code = refresh_token
data = {
'grant_type': 'refresh_token',
'code': code,
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret
}
response = requests.post(token_url, data=data)
if debug:
print(response.text)
return response.json()
def post_auth_hook(authenticator, handler, authentication):
user = authentication['auth_state']['oauth_user']['ocs']['data']['id']
auth_state = authentication['auth_state']
auth_state['token_expires'] = time.time() + auth_state['token_response']['expires_in']
authentication['auth_state'] = auth_state
return authentication
class NextcloudOAuthenticator(GenericOAuthenticator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_dict = {}
async def pre_spawn_start(self, user, spawner):
super().pre_spawn_start(user, spawner)
auth_state = await user.get_auth_state()
if not auth_state:
return
access_token = auth_state['access_token']
spawner.environment['NEXTCLOUD_ACCESS_TOKEN'] = access_token
async def refresh_user(self, user, handler=None):
auth_state = await user.get_auth_state()
if not auth_state:
if debug:
print(f'auth_state missing for {user}')
return False
access_token = auth_state['access_token']
refresh_token = auth_state['refresh_token']
token_response = auth_state['token_response']
now = time.time()
now_hr = datetime.fromtimestamp(now)
expires = auth_state['token_expires']
expires_hr = datetime.fromtimestamp(expires)
expires = 0
if debug:
print(f'auth_state for {user}: {auth_state}')
if now >= expires:
if debug:
print(f'Time is: {now_hr}, token expired: {expires_hr}')
print(f'Refreshing token for {user}')
try:
token_response = get_nextcloud_access_token(refresh_token)
auth_state['access_token'] = token_response['access_token']
auth_state['refresh_token'] = token_response['refresh_token']
auth_state['token_expires'] = now + token_response['expires_in']
auth_state['token_response'] = token_response
if debug:
print(f'Successfully refreshed token for {user.name}')
print(f'auth_state for {user.name}: {auth_state}')
return {'name': user.name, 'auth_state': auth_state}
except Exception as e:
if debug:
print(f'Failed to refresh token for {user}')
return False
return False
if debug:
print(f'Time is: {now_hr}, token expires: {expires_hr}')
return True
c.JupyterHub.authenticator_class = NextcloudOAuthenticator
c.NextcloudOAuthenticator.client_id = os.environ['NEXTCLOUD_CLIENT_ID']
c.NextcloudOAuthenticator.client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
c.NextcloudOAuthenticator.login_service = 'Sunet Drive'
c.NextcloudOAuthenticator.username_claim = lambda r: r.get('ocs', {}).get('data', {}).get('id')
c.NextcloudOAuthenticator.userdata_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/ocs/v2.php/cloud/user?format=json'
c.NextcloudOAuthenticator.authorize_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/authorize'
c.NextcloudOAuthenticator.token_url = token_url
c.NextcloudOAuthenticator.oauth_callback_url = 'https://' + os.environ['JUPYTER_HOST'] + '/hub/oauth_callback'
c.NextcloudOAuthenticator.allow_all = True
c.NextcloudOAuthenticator.refresh_pre_spawn = True
c.NextcloudOAuthenticator.enable_auth_state = True
c.NextcloudOAuthenticator.auth_refresh_age = 3600
c.NextcloudOAuthenticator.post_auth_hook = post_auth_hook
serviceCode: |
import sys
c.JupyterHub.load_roles = [
{
"name": "refresh-token",
"services": [
"refresh-token"
],
"scopes": [
"read:users",
"admin:auth_state"
]
},
{
"name": "user",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"self",
],
},
{
"name": "server",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"inherit",
],
}
]
c.JupyterHub.services = [
{
'name': 'refresh-token',
'url': 'http://' + os.environ.get('HUB_SERVICE_HOST', 'hub') + ':' + os.environ.get('HUB_SERVICE_PORT_REFRESH_TOKEN', '8082'),
'display': False,
'oauth_no_confirm': True,
'api_token': os.environ['JUPYTERHUB_API_KEY'],
'command': [sys.executable, '/usr/local/etc/jupyterhub/refresh-token.py']
}
]
c.JupyterHub.admin_users = {"refresh-token"}
c.JupyterHub.api_tokens = {
os.environ['JUPYTERHUB_API_KEY']: "refresh-token",
}
extraFiles:
refresh-token.py:
mountPath: /usr/local/etc/jupyterhub/refresh-token.py
stringData: |
"""A token refresh service authenticating with the Hub.
This service serves `/services/refresh-token/`,
authenticated with the Hub,
showing the user their own info.
"""
import json
import os
import requests
import socket
from jupyterhub.services.auth import HubAuthenticated
from jupyterhub.utils import url_path_join
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, HTTPError, RequestHandler, authenticated
from urllib.parse import urlparse
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def my_debug(s):
if debug:
with open("/proc/1/fd/1", "a") as stdout:
print(s, file=stdout)
class RefreshHandler(HubAuthenticated, RequestHandler):
def api_request(self, method, url, **kwargs):
my_debug(f'{self.hub_auth}')
url = url_path_join(self.hub_auth.api_url, url)
allow_404 = kwargs.pop('allow_404', False)
headers = kwargs.setdefault('headers', {})
headers.setdefault('Authorization', f'token {self.hub_auth.api_token}')
try:
r = requests.request(method, url, **kwargs)
except requests.ConnectionError as e:
my_debug(f'Error connecting to {url}: {e}')
msg = f'Failed to connect to Hub API at {url}.'
msg += f' Is the Hub accessible at this URL (from host: {socket.gethostname()})?'
if '127.0.0.1' in url:
msg += ' Make sure to set c.JupyterHub.hub_ip to an IP accessible to' + \
' single-user servers if the servers are not on the same host as the Hub.'
raise HTTPError(500, msg)
data = None
if r.status_code == 404 and allow_404:
pass
elif r.status_code == 403:
my_debug(
'Lacking permission to check authorization with JupyterHub,' +
f' my auth token may have expired: [{r.status_code}] {r.reason}'
)
my_debug(r.text)
raise HTTPError(
500,
'Permission failure checking authorization, I may need a new token'
)
elif r.status_code >= 500:
my_debug(f'Upstream failure verifying auth token: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(
502, 'Failed to check authorization (upstream problem)')
elif r.status_code >= 400:
my_debug(f'Failed to check authorization: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(500, 'Failed to check authorization')
else:
data = r.json()
return data
@authenticated
def get(self):
user_model = self.get_current_user()
# Fetch current auth state
user_data = self.api_request('GET', url_path_join('users', user_model['name']))
auth_state = user_data['auth_state']
access_token = auth_state['access_token']
token_expires = auth_state['token_expires']
self.set_header('content-type', 'application/json')
self.write(json.dumps({'access_token': access_token, 'token_expires': token_expires}, indent=1, sort_keys=True))
class PingHandler(RequestHandler):
def get(self):
my_debug(f"DEBUG: In ping get")
self.set_header('content-type', 'application/json')
self.write(json.dumps({'ping': 1}))
def main():
app = Application([
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + 'tokens', RefreshHandler),
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + '/?', PingHandler),
])
http_server = HTTPServer(app)
url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL'])
http_server.listen(url.port)
IOLoop.current().start()
if __name__ == '__main__':
main()
networkPolicy:
ingress:
- ports:
- port: 8082
from:
- podSelector:
matchLabels:
hub.jupyter.org/network-access-hub: "true"
service:
extraPorts:
- port: 8082
targetPort: 8082
name: refresh-token
extraEnv:
NEXTCLOUD_DEBUG_OAUTH: "no"
NEXTCLOUD_HOST: vr.drive.test.sunet.se
JUPYTER_HOST: vr-jupyter.drive.test.sunet.se
JUPYTERHUB_API_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: api-key
JUPYTERHUB_CRYPT_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: crypt-key
NEXTCLOUD_CLIENT_ID:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-id
NEXTCLOUD_CLIENT_SECRET:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-secret
networkPolicy:
enabled: false
proxy:
chp:
networkPolicy:
egress:
- to:
- podSelector:
matchLabels:
app: jupyterhub
component: hub
ports:
- port: 8082
singleuser:
image:
name: docker.sunet.se/drive/jupyter-custom
tag: lab-4.0.10-sunet4
storage:
dynamic:
storageClass: csi-sc-cinderplugin
extraEnv:
JUPYTER_ENABLE_LAB: "yes"
JUPYTER_HOST: vr-jupyter.drive.test.sunet.se
NEXTCLOUD_HOST: vr.drive.test.sunet.se
extraFiles:
jupyter_notebook_config:
mountPath: /home/jovyan/.jupyter/jupyter_server_config.py
stringData: |
import os
c = get_config()
c.NotebookApp.allow_origin = '*'
c.NotebookApp.tornado_settings = {
'headers': { 'Content-Security-Policy': "frame-ancestors *;" }
}
os.system('/usr/local/bin/nc-sync')
mode: 0644

View file

@ -1,8 +0,0 @@
resources:
- portal-cert-manager.yml
- portal-deployment.yml
- portal-ingress.yml
- portal-namespace.yml
- portal-service.yml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View file

@ -1,15 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: drive@sunet.se
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View file

@ -1,30 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: portal-node
namespace: portal
creationTimestamp:
labels:
app: portal-node
spec:
replicas: 3
selector:
matchLabels:
app: portal-node
template:
metadata:
creationTimestamp:
labels:
app: portal-node
spec:
containers:
- name: portal
image: docker.sunet.se/drive/portal:0.1.2-1
imagePullPolicy: Always
resources: {}
env:
- name: DRIVE_DOMAIN
value: "drive.test.sunet.se"
strategy: {}
status: {}

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: portal-ingress
namespace: portal
annotations:
kubernetes.io/ingress.class: nginx
spec:
defaultBackend:
service:
name: portal-node
port:
number: 8080
tls:
- hosts:
- portal.drive.test.sunet.se
secretName: tls-secret
ingressClassName: nginx
rules:
- host: portal.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: portal-node
port:
number: 8080

View file

@ -1,8 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: portal
spec:
finalizers:
- kubernetes

View file

@ -1,25 +0,0 @@
---
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
labels:
app: portal-node
name: portal-node
namespace: portal
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: portal-node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kind: List
metadata:
resourceVersion: ""
selfLink: ""

View file

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: portal-ingress.yml

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: portal-ingress
namespace: portal
annotations:
kubernetes.io/ingress.class: nginx
spec:
defaultBackend:
service:
name: portal-node
port:
number: 8080
ingressClassName: nginx
tls:
- hosts:
- portal.drive.sunet.se
secretName: tls-secret
rules:
- host: portal.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: portal-node
port:
number: 8080

View file

@ -1,7 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: portal-ingress.yml
- path: portal-deployment.yml

View file

@ -1,27 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: portal-node
namespace: portal
creationTimestamp:
labels:
app: portal-node
spec:
replicas: 3
selector:
matchLabels:
app: portal-node
template:
metadata:
creationTimestamp:
labels:
app: portal-node
spec:
containers:
- name: portal
env:
- name: DRIVE_DOMAIN
value: "drive.test.sunet.se"
strategy: {}
status: {}

View file

@ -1,32 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: portal-ingress
namespace: portal
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: portal-node
port:
number: 8080
ingressClassName: nginx
tls:
- hosts:
- drive.test.sunet.se
secretName: tls-secret
rules:
- host: drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: portal-node
port:
number: 8080

View file

@ -19,34 +19,35 @@ spec:
spec: spec:
containers: containers:
- name: doris - name: doris
image: docker.sunet.se/rds/doris-rds:git-15de3c5b9 image: docker.sunet.se/rds/doris-rds:ci-RDS-Connectors-13
env: env:
- name: Logging__LogLevel__Default - name: ASPNETCORE_ENVIRONMENT
value: Debug value: Development
- name: ScieboRds__ConnectorServiceName - name: ASPNETCORE_URLS
value: layer1-port-doris value: http://+:80
- name: Domain
value: sunet.se
- name: ScieboRds__TokenStorageUrl - name: ScieboRds__TokenStorageUrl
value: http://layer3-token-storage value: http://layer3-token-storage
- name: Doris__ApiKey - name: S3__Url
value: dummy
- name: ManifestIndex__Url
value: https://snd-storage-metadata-index-test-snd-dev.apps.k8s.gu.se
- name: ManifestIndex__ApiKey
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: doris-gu-secrets name: doris-api-key
key: "api-key" key: "api-key"
- name: Doris__DorisApiEnabled - name: S3__AccessKey
value: 'true'
- name: Doris__PrincipalDomain
value: gu.se
- name: Doris__ApiUrl
value: https://dev.snd.se/doris/api/rocrate
- name: NextCloud__BaseUrl
value: https://gu.drive.test.sunet.se
- name: NextCloud__User
value: _doris_datasets
- name: NextCloud__Password
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: doris-gu-secret name: doris-s3-key
key: "nextcloudpw" key: "s3-key"
- name: S3__SecretKey
valueFrom:
secretKeyRef:
name: doris-s3-secret
key: "s3-secret"
resources: {} resources: {}
strategy: {} strategy: {}
status: {} status: {}

View file

@ -0,0 +1,49 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: layer1-gu-port-doris
namespace: helmrds
labels:
app: layer1-gu-port-doris
spec:
replicas: 1
selector:
matchLabels:
app: layer1-gu-port-doris
template:
metadata:
creationTimestamp:
labels:
app: layer1-gu-port-doris
spec:
containers:
- name: doris
image: docker.sunet.se/rds/doris-rds:git-15de3c5b9
env:
- name: ScieboRds__TokenStorageUrl
value: http://layer3-token-storage
- name: Doris__ApiKey
valueFrom:
secretKeyRef:
name: doris-gu-secrets
key: "api-key"
- name: Doris__DorisApiEnabled
value: true
- name: Doris__PrincipalDomain
value: gu.se
- name: Doris__ApiUrl
value: https://dev.snd.se/doris/api/rocrate
- name: NextCloud__BaseUrl
value: https://gu.drive.test.sunet.se
- name: NextCloud__User
value: _doris_datasets
- name: NextCloud__Password
valueFrom:
secretKeyRef:
name: doris-gu-secret
key: "nextcloudpw"
resources: {}
strategy: {}
status: {}

View file

@ -5,15 +5,16 @@ items:
kind: Service kind: Service
metadata: metadata:
labels: labels:
app: jupyterhub-node app: layer1-gu-port-doris
name: jupyterhub-node name: layer1-gu-port-doris
namespace: helmrds
spec: spec:
ports: ports:
- port: 8080 - port: 80
protocol: TCP protocol: TCP
targetPort: 8080 targetPort: 8080
selector: selector:
app: jupyterhub-node app: layer1-gu-port-doris
sessionAffinity: None sessionAffinity: None
type: ClusterIP type: ClusterIP
status: status:

View file

@ -12,7 +12,7 @@ items:
ports: ports:
- port: 80 - port: 80
protocol: TCP protocol: TCP
targetPort: 8080 targetPort: 80
selector: selector:
app: layer1-port-doris app: layer1-port-doris
sessionAffinity: None sessionAffinity: None

View file

@ -1,11 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: gu-drive
namespace: helmrds
spec:
type: ExternalName
externalName: gu.drive.test.sunet.se
ports:
- port: 443

View file

@ -3,7 +3,6 @@ resources:
- doris-deployment.yml - doris-deployment.yml
- rds-ingress.yml - rds-ingress.yml
- sunet-service.yml - sunet-service.yml
- gu-service.yml
apiVersion: kustomize.config.k8s.io/v1beta1 apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization

View file

@ -64,11 +64,11 @@ layer0-web:
environment: environment:
SECRET_KEY: IAMLAYER0WEBSECRET SECRET_KEY: IAMLAYER0WEBSECRET
EMBED_MODE: true EMBED_MODE: true
# layer1-port-doris: layer1-port-doris:
# enabled: false enabled: false
# environment: environment:
# DISPLAYNAME: Doris Connector DISPLAYNAME: Doris
# METADATA_PROFILE: './metadata_profile.json' METADATA_PROFILE: './metadata_profile.json'
layer1-port-owncloud: layer1-port-owncloud:
enabled: true enabled: true
layer1-port-zenodo: layer1-port-zenodo:

View file

@ -5,7 +5,9 @@ metadata:
name: describo-ingress name: describo-ingress
namespace: helmrds namespace: helmrds
annotations: annotations:
kubernetes.io/ingress.class: nginx kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:
defaultBackend: defaultBackend:
service: service:
@ -16,7 +18,7 @@ spec:
- hosts: - hosts:
- describo.drive.test.sunet.se - describo.drive.test.sunet.se
secretName: tls-secret secretName: tls-secret
ingressClassName: nginx
rules: rules:
- host: describo.drive.test.sunet.se - host: describo.drive.test.sunet.se
http: http:

View file

@ -1,11 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: gu-drive
namespace: helmrds
spec:
type: ExternalName
externalName: gu.drive.test.sunet.se
ports:
- port: 443

View file

@ -6,4 +6,3 @@ patches:
- path: describo-ingress.yml - path: describo-ingress.yml
- path: rds-ingress.yml - path: rds-ingress.yml
- path: sunet-service.yml - path: sunet-service.yml
- path: gu-service.yml

View file

@ -5,7 +5,9 @@ metadata:
name: rds-ingress name: rds-ingress
namespace: helmrds namespace: helmrds
annotations: annotations:
kubernetes.io/ingress.class: nginx kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:
defaultBackend: defaultBackend:
service: service:
@ -16,7 +18,7 @@ spec:
- hosts: - hosts:
- rds.drive.test.sunet.se - rds.drive.test.sunet.se
secretName: tls-secret secretName: tls-secret
ingressClassName: nginx
rules: rules:
- host: rds.drive.test.sunet.se - host: rds.drive.test.sunet.se
http: http:

View file

@ -1,38 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
ci/
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
# MacOS
.DS_Store
# helm-unittest
./tests
.debug
__snapshot__
# helm-docs
README.md.gotmpl

View file

@ -1,11 +0,0 @@
apiVersion: v2
appVersion: v1beta2-1.6.1-3.5.0
description: A Helm chart for Spark on Kubernetes operator
home: https://github.com/kubeflow/spark-operator
keywords:
- spark
maintainers:
- email: yuchaoran2011@gmail.com
name: yuchaoran2011
name: spark-operator
version: 1.4.2

View file

@ -1,146 +0,0 @@
# spark-operator
![Version: 1.4.2](https://img.shields.io/badge/Version-1.4.2-informational?style=flat-square) ![AppVersion: v1beta2-1.6.1-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.6.1--3.5.0-informational?style=flat-square)
A Helm chart for Spark on Kubernetes operator
**Homepage:** <https://github.com/kubeflow/spark-operator>
## Introduction
This chart bootstraps a [Kubernetes Operator for Apache Spark](https://github.com/kubeflow/spark-operator) deployment using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Helm >= 3
- Kubernetes >= 1.16
## Previous Helm Chart
The previous `spark-operator` Helm chart hosted at [helm/charts](https://github.com/helm/charts) has been moved to this repository in accordance with the [Deprecation timeline](https://github.com/helm/charts#deprecation-timeline). Note that a few things have changed between this version and the old version:
- This repository **only** supports Helm chart installations using Helm 3+ since the `apiVersion` on the chart has been marked as `v2`.
- Previous versions of the Helm chart have not been migrated, and the version has been set to `1.0.0` at the onset. If you are looking for old versions of the chart, it's best to run `helm pull incubator/sparkoperator --version <your-version>` until you are ready to move to this repository's version.
- Several configuration properties have been changed, carefully review the [values](#values) section below to make sure you're aligned with the new values.
## Usage
### Add Helm Repo
```shell
helm repo add spark-operator https://kubeflow.github.io/spark-operator
helm repo update
```
See [helm repo](https://helm.sh/docs/helm/helm_repo) for command documentation.
### Install the chart
```shell
helm install [RELEASE_NAME] spark-operator/spark-operator
```
For example, if you want to create a release with name `spark-operator` in the `default` namespace:
```shell
helm install spark-operator spark-operator/spark-operator
```
Note that `helm` will fail to install if the namespace doesn't exist. Either create the namespace beforehand or pass the `--create-namespace` flag to the `helm install` command.
```shell
helm install spark-operator spark-operator/spark-operator \
--namespace spark-operator \
--create-namespace
```
See [helm install](https://helm.sh/docs/helm/helm_install) for command documentation.
### Upgrade the chart
```shell
helm upgrade [RELEASE_NAME] spark-operator/spark-operator [flags]
```
See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade) for command documentation.
### Uninstall the chart
```shell
helm uninstall [RELEASE_NAME]
```
This removes all the Kubernetes resources associated with the chart and deletes the release, except for the `crds`, those will have to be removed manually.
See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall) for command documentation.
## Values
| Key | Type | Default | Description |
|-----|------|---------|-------------|
| affinity | object | `{}` | Affinity for pod assignment |
| batchScheduler.enable | bool | `false` | Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application |
| commonLabels | object | `{}` | Common labels to add to the resources |
| controllerThreads | int | `10` | Operator concurrency, higher values might increase memory usage |
| envFrom | list | `[]` | Pod environment variable sources |
| fullnameOverride | string | `""` | String to override release name |
| image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
| image.repository | string | `"docker.io/kubeflow/spark-operator"` | Image repository |
| image.tag | string | `""` | if set, override the image tag whose default is the chart appVersion. |
| imagePullSecrets | list | `[]` | Image pull secrets |
| ingressUrlFormat | string | `""` | Ingress URL format. Requires the UI service to be enabled by setting `uiService.enable` to true. |
| istio.enabled | bool | `false` | When using `istio`, spark jobs need to run without a sidecar to properly terminate |
| labelSelectorFilter | string | `""` | A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels. |
| leaderElection.lockName | string | `"spark-operator-lock"` | Leader election lock name. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability. |
| leaderElection.lockNamespace | string | `""` | Optionally store the lock in another namespace. Defaults to operator's namespace |
| logLevel | int | `2` | Set higher levels for more verbose logging |
| metrics.enable | bool | `true` | Enable prometheus metric scraping |
| metrics.endpoint | string | `"/metrics"` | Metrics serving endpoint |
| metrics.port | int | `10254` | Metrics port |
| metrics.portName | string | `"metrics"` | Metrics port name |
| metrics.prefix | string | `""` | Metric prefix, will be added to all exported metrics |
| nameOverride | string | `""` | String to partially override `spark-operator.fullname` template (will maintain the release name) |
| nodeSelector | object | `{}` | Node labels for pod assignment |
| podAnnotations | object | `{}` | Additional annotations to add to the pod |
| podLabels | object | `{}` | Additional labels to add to the pod |
| podMonitor | object | `{"enable":false,"jobLabel":"spark-operator-podmonitor","labels":{},"podMetricsEndpoint":{"interval":"5s","scheme":"http"}}` | Prometheus pod monitor for operator's pod. |
| podMonitor.enable | bool | `false` | If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well. |
| podMonitor.jobLabel | string | `"spark-operator-podmonitor"` | The label to use to retrieve the job name from |
| podMonitor.labels | object | `{}` | Pod monitor labels |
| podMonitor.podMetricsEndpoint | object | `{"interval":"5s","scheme":"http"}` | Prometheus metrics endpoint properties. `metrics.portName` will be used as a port |
| podSecurityContext | object | `{}` | Pod security context |
| priorityClassName | string | `""` | A priority class to be used for running spark-operator pod. |
| rbac.annotations | object | `{}` | Optional annotations for rbac |
| rbac.create | bool | `false` | **DEPRECATED** use `createRole` and `createClusterRole` |
| rbac.createClusterRole | bool | `true` | Create and use RBAC `ClusterRole` resources |
| rbac.createRole | bool | `true` | Create and use RBAC `Role` resources |
| replicaCount | int | `1` | Desired number of pods, leaderElection will be enabled if this is greater than 1 |
| resourceQuotaEnforcement.enable | bool | `false` | Whether to enable the ResourceQuota enforcement for SparkApplication resources. Requires the webhook to be enabled by setting `webhook.enable` to true. Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement. |
| resources | object | `{}` | Pod resource requests and limits Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m". Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error: 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits. |
| resyncInterval | int | `30` | Operator resync interval. Note that the operator will respond to events (e.g. create, update) unrelated to this setting |
| securityContext | object | `{}` | Operator container security context |
| serviceAccounts.spark.annotations | object | `{}` | Optional annotations for the spark service account |
| serviceAccounts.spark.create | bool | `true` | Create a service account for spark apps |
| serviceAccounts.spark.name | string | `""` | Optional name for the spark service account |
| serviceAccounts.sparkoperator.annotations | object | `{}` | Optional annotations for the operator service account |
| serviceAccounts.sparkoperator.create | bool | `true` | Create a service account for the operator |
| serviceAccounts.sparkoperator.name | string | `""` | Optional name for the operator service account |
| sidecars | list | `[]` | Sidecar containers |
| sparkJobNamespaces | list | `[""]` | List of namespaces where to run spark jobs |
| tolerations | list | `[]` | List of node taints to tolerate |
| uiService.enable | bool | `true` | Enable UI service creation for Spark application |
| volumeMounts | list | `[]` | |
| volumes | list | `[]` | |
| webhook.enable | bool | `false` | Enable webhook server |
| webhook.namespaceSelector | string | `""` | The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2. Empty string (default) will operate on all namespaces |
| webhook.objectSelector | string | `""` | The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2). Empty string (default) will operate on all objects |
| webhook.port | int | `8080` | Webhook service port |
| webhook.portName | string | `"webhook"` | Webhook container port name and service target port name |
| webhook.timeout | int | `30` | The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade |
## Maintainers
| Name | Email | Url |
| ---- | ------ | --- |
| yuchaoran2011 | <yuchaoran2011@gmail.com> | |

View file

@ -1,79 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "spark-operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "spark-operator.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "spark-operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "spark-operator.labels" -}}
helm.sh/chart: {{ include "spark-operator.chart" . }}
{{ include "spark-operator.selectorLabels" . }}
{{- if .Values.commonLabels }}
{{ toYaml .Values.commonLabels }}
{{- end }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "spark-operator.selectorLabels" -}}
app.kubernetes.io/name: {{ include "spark-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to be used by the operator
*/}}
{{- define "spark-operator.serviceAccountName" -}}
{{- if .Values.serviceAccounts.sparkoperator.create -}}
{{ default (include "spark-operator.fullname" .) .Values.serviceAccounts.sparkoperator.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.sparkoperator.name }}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to be used by spark apps
*/}}
{{- define "spark.serviceAccountName" -}}
{{- if .Values.serviceAccounts.spark.create -}}
{{- $sparkServiceaccount := printf "%s-%s" .Release.Name "spark" -}}
{{ default $sparkServiceaccount .Values.serviceAccounts.spark.name }}
{{- else -}}
{{ default "default" .Values.serviceAccounts.spark.name }}
{{- end -}}
{{- end -}}

View file

@ -1,140 +0,0 @@
# If the admission webhook is enabled, then a post-install step is required
# to generate and install the secret in the operator namespace.
# In the post-install hook, the token corresponding to the operator service account
# is used to authenticate with the Kubernetes API server to install the secret bundle.
{{- $jobNamespaces := .Values.sparkJobNamespaces | default list }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "spark-operator.fullname" . }}
labels:
{{- include "spark-operator.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "spark-operator.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
{{- if or .Values.podAnnotations .Values.metrics.enable }}
annotations:
{{- if .Values.metrics.enable }}
prometheus.io/scrape: "true"
prometheus.io/port: "{{ .Values.metrics.port }}"
prometheus.io/path: {{ .Values.metrics.endpoint }}
{{- end }}
{{- if .Values.podAnnotations }}
{{- toYaml .Values.podAnnotations | trim | nindent 8 }}
{{- end }}
{{- end }}
labels:
{{- include "spark-operator.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | trim | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ include "spark-operator.serviceAccountName" . }}
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
image: {{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if gt (int .Values.replicaCount) 1 }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
{{- end }}
envFrom:
{{- toYaml .Values.envFrom | nindent 10 }}
securityContext:
{{- toYaml .Values.securityContext | nindent 10 }}
{{- if or .Values.metrics.enable .Values.webhook.enable }}
ports:
{{ if .Values.metrics.enable -}}
- name: {{ .Values.metrics.portName | quote }}
containerPort: {{ .Values.metrics.port }}
{{- end }}
{{ if .Values.webhook.enable -}}
- name: {{ .Values.webhook.portName | quote }}
containerPort: {{ .Values.webhook.port }}
{{- end }}
{{ end -}}
args:
- -v={{ .Values.logLevel }}
- -logtostderr
{{- if eq (len $jobNamespaces) 1 }}
- -namespace={{ index $jobNamespaces 0 }}
{{- end }}
- -enable-ui-service={{ .Values.uiService.enable}}
- -ingress-url-format={{ .Values.ingressUrlFormat }}
- -controller-threads={{ .Values.controllerThreads }}
- -resync-interval={{ .Values.resyncInterval }}
- -enable-batch-scheduler={{ .Values.batchScheduler.enable }}
- -label-selector-filter={{ .Values.labelSelectorFilter }}
{{- if .Values.metrics.enable }}
- -enable-metrics=true
- -metrics-labels=app_type
- -metrics-port={{ .Values.metrics.port }}
- -metrics-endpoint={{ .Values.metrics.endpoint }}
- -metrics-prefix={{ .Values.metrics.prefix }}
{{- end }}
{{- if .Values.webhook.enable }}
- -enable-webhook=true
- -webhook-secret-name={{ include "spark-operator.webhookSecretName" . }}
- -webhook-secret-namespace={{ .Release.Namespace }}
- -webhook-svc-name={{ include "spark-operator.webhookServiceName" . }}
- -webhook-svc-namespace={{ .Release.Namespace }}
- -webhook-config-name={{ include "spark-operator.fullname" . }}-webhook-config
- -webhook-port={{ .Values.webhook.port }}
- -webhook-timeout={{ .Values.webhook.timeout }}
- -webhook-namespace-selector={{ .Values.webhook.namespaceSelector }}
- -webhook-object-selector={{ .Values.webhook.objectSelector }}
{{- end }}
- -enable-resource-quota-enforcement={{ .Values.resourceQuotaEnforcement.enable }}
{{- if gt (int .Values.replicaCount) 1 }}
- -leader-election=true
- -leader-election-lock-namespace={{ default .Release.Namespace .Values.leaderElection.lockNamespace }}
- -leader-election-lock-name={{ .Values.leaderElection.lockName }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.sidecars }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -1,19 +0,0 @@
{{ if and .Values.metrics.enable .Values.podMonitor.enable }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ include "spark-operator.name" . -}}-podmonitor
labels: {{ toYaml .Values.podMonitor.labels | nindent 4 }}
spec:
podMetricsEndpoints:
- interval: {{ .Values.podMonitor.podMetricsEndpoint.interval }}
port: {{ .Values.metrics.portName | quote }}
scheme: {{ .Values.podMonitor.podMetricsEndpoint.scheme }}
jobLabel: {{ .Values.podMonitor.jobLabel }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
{{- include "spark-operator.selectorLabels" . | nindent 6 }}
{{ end }}

View file

@ -1,148 +0,0 @@
{{- if or .Values.rbac.create .Values.rbac.createClusterRole -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "spark-operator.fullname" . }}
labels:
{{- include "spark-operator.labels" . | nindent 4 }}
{{- with .Values.rbac.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
rules:
- apiGroups:
- ""
resources:
- pods
- persistentvolumeclaims
verbs:
- "*"
- apiGroups:
- ""
resources:
- services
- configmaps
- secrets
verbs:
- create
- get
- delete
- update
- patch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
verbs:
- create
- get
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- events
verbs:
- create
- update
- patch
- apiGroups:
- ""
resources:
- resourcequotas
verbs:
- get
- list
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
verbs:
- create
- get
- update
- delete
- apiGroups:
- sparkoperator.k8s.io
resources:
- sparkapplications
- sparkapplications/status
- sparkapplications/finalizers
- scheduledsparkapplications
- scheduledsparkapplications/status
- scheduledsparkapplications/finalizers
verbs:
- "*"
{{- if .Values.batchScheduler.enable }}
# required for the `volcano` batch scheduler
- apiGroups:
- scheduling.incubator.k8s.io
- scheduling.sigs.dev
- scheduling.volcano.sh
resources:
- podgroups
verbs:
- "*"
{{- end }}
{{ if .Values.webhook.enable }}
- apiGroups:
- batch
resources:
- jobs
verbs:
- delete
{{- end }}
{{- if gt (int .Values.replicaCount) 1 }}
- apiGroups:
- coordination.k8s.io
resources:
- leases
resourceNames:
- {{ .Values.leaderElection.lockName }}
verbs:
- get
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
{{- end }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "spark-operator.fullname" . }}
labels:
{{- include "spark-operator.labels" . | nindent 4 }}
{{- with .Values.rbac.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
subjects:
- kind: ServiceAccount
name: {{ include "spark-operator.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ include "spark-operator.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end }}

View file

@ -1,12 +0,0 @@
{{- if .Values.serviceAccounts.sparkoperator.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "spark-operator.serviceAccountName" . }}
labels:
{{- include "spark-operator.labels" . | nindent 4 }}
{{- with .Values.serviceAccounts.sparkoperator.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -1,39 +0,0 @@
{{- if or .Values.rbac.create .Values.rbac.createRole }}
{{- $jobNamespaces := .Values.sparkJobNamespaces | default list }}
{{- range $jobNamespace := $jobNamespaces }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: spark-role
namespace: {{ $jobNamespace }}
labels:
{{- include "spark-operator.labels" $ | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- pods
- services
- configmaps
- persistentvolumeclaims
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: spark
namespace: {{ $jobNamespace }}
labels:
{{- include "spark-operator.labels" $ | nindent 4 }}
subjects:
- kind: ServiceAccount
name: {{ include "spark.serviceAccountName" $ }}
namespace: {{ $jobNamespace }}
roleRef:
kind: Role
name: spark-role
apiGroup: rbac.authorization.k8s.io
{{- end }}
{{- end }}

View file

@ -1,14 +0,0 @@
{{- if .Values.serviceAccounts.spark.create }}
{{- range $sparkJobNamespace := .Values.sparkJobNamespaces | default (list .Release.Namespace) }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "spark.serviceAccountName" $ }}
namespace: {{ $sparkJobNamespace }}
{{- with $.Values.serviceAccounts.spark.annotations }}
annotations: {{ toYaml . | nindent 4 }}
{{- end }}
labels: {{ include "spark-operator.labels" $ | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -1,14 +0,0 @@
{{/*
Create the name of the secret to be used by webhook
*/}}
{{- define "spark-operator.webhookSecretName" -}}
{{ include "spark-operator.fullname" . }}-webhook-certs
{{- end -}}
{{/*
Create the name of the service to be used by webhook
*/}}
{{- define "spark-operator.webhookServiceName" -}}
{{ include "spark-operator.fullname" . }}-webhook-svc
{{- end -}}

View file

@ -1,13 +0,0 @@
{{- if .Values.webhook.enable -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "spark-operator.webhookSecretName" . }}
labels:
{{- include "spark-operator.labels" . | nindent 4 }}
data:
ca-key.pem: ""
ca-cert.pem: ""
server-key.pem: ""
server-cert.pem: ""
{{- end }}

View file

@ -1,15 +0,0 @@
{{- if .Values.webhook.enable -}}
apiVersion: v1
kind: Service
metadata:
name: {{ include "spark-operator.webhookServiceName" . }}
labels:
{{- include "spark-operator.labels" . | nindent 4 }}
spec:
selector:
{{- include "spark-operator.selectorLabels" . | nindent 4 }}
ports:
- port: 443
targetPort: {{ .Values.webhook.portName | quote }}
name: {{ .Values.webhook.portName }}
{{- end }}

View file

@ -1,301 +0,0 @@
suite: Test spark operator deployment
templates:
- deployment.yaml
release:
name: spark-operator
tests:
- it: Should contain namespace arg when sparkJobNamespaces is equal to 1
set:
sparkJobNamespaces:
- ns1
asserts:
- contains:
path: spec.template.spec.containers[0].args
content: -namespace=ns1
- it: Should add pod annotations if podAnnotations is set
set:
podAnnotations:
key1: value1
key2: value2
asserts:
- equal:
path: spec.template.metadata.annotations.key1
value: value1
- equal:
path: spec.template.metadata.annotations.key2
value: value2
- it: Should add prometheus annotations if metrics.enable is true
set:
metrics:
enable: true
port: 10254
endpoint: /metrics
asserts:
- equal:
path: spec.template.metadata.annotations["prometheus.io/scrape"]
value: "true"
- equal:
path: spec.template.metadata.annotations["prometheus.io/port"]
value: "10254"
- equal:
path: spec.template.metadata.annotations["prometheus.io/path"]
value: /metrics
- it: Should add secrets if imagePullSecrets is set
set:
imagePullSecrets:
- name: test-secret1
- name: test-secret2
asserts:
- equal:
path: spec.template.spec.imagePullSecrets[0].name
value: test-secret1
- equal:
path: spec.template.spec.imagePullSecrets[1].name
value: test-secret2
- it: Should add pod securityContext if podSecurityContext is set
set:
podSecurityContext:
runAsUser: 1000
runAsGroup: 2000
fsGroup: 3000
asserts:
- equal:
path: spec.template.spec.securityContext.runAsUser
value: 1000
- equal:
path: spec.template.spec.securityContext.runAsGroup
value: 2000
- equal:
path: spec.template.spec.securityContext.fsGroup
value: 3000
- it: Should use the specified image repository if image.repository and image.tag is set
set:
image:
repository: test-repository
tag: test-tag
asserts:
- equal:
path: spec.template.spec.containers[0].image
value: test-repository:test-tag
- it: Should use the specified image pull policy if image.pullPolicy is set
set:
image:
pullPolicy: Always
asserts:
- equal:
path: spec.template.spec.containers[0].imagePullPolicy
value: Always
- it: Should add container securityContext if securityContext is set
set:
securityContext:
runAsUser: 1000
runAsGroup: 2000
fsGroup: 3000
asserts:
- equal:
path: spec.template.spec.containers[0].securityContext.runAsUser
value: 1000
- equal:
path: spec.template.spec.containers[0].securityContext.runAsGroup
value: 2000
- equal:
path: spec.template.spec.containers[0].securityContext.fsGroup
value: 3000
- it: Should add metric ports if metrics.enable is true
set:
metrics:
enable: true
port: 10254
portName: metrics
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: metrics
containerPort: 10254
count: 1
- it: Should add webhook ports if webhook.enable is true
set:
webhook:
enable: true
port: 8080
portName: webhook
asserts:
- contains:
path: spec.template.spec.containers[0].ports
content:
name: webhook
containerPort: 8080
count: 1
- it: Should add resources if resources is set
set:
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
asserts:
- equal:
path: spec.template.spec.containers[0].resources
value:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
- it: Should add sidecars if sidecars is set
set:
sidecars:
- name: sidecar1
image: sidecar-image1
- name: sidecar2
image: sidecar-image2
asserts:
- contains:
path: spec.template.spec.containers
content:
name: sidecar1
image: sidecar-image1
count: 1
- contains:
path: spec.template.spec.containers
content:
name: sidecar2
image: sidecar-image2
count: 1
- it: Should add volumes if volumes is set
set:
volumes:
- name: volume1
emptyDir: {}
- name: volume2
emptyDir: {}
asserts:
- contains:
path: spec.template.spec.volumes
content:
name: volume1
emptyDir: {}
count: 1
- contains:
path: spec.template.spec.volumes
content:
name: volume2
emptyDir: {}
count: 1
- it: Should add volume mounts if volumeMounts is set
set:
volumeMounts:
- name: volume1
mountPath: /volume1
- name: volume2
mountPath: /volume2
asserts:
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
name: volume1
mountPath: /volume1
count: 1
- contains:
path: spec.template.spec.containers[0].volumeMounts
content:
name: volume2
mountPath: /volume2
count: 1
- it: Should add nodeSelector if nodeSelector is set
set:
nodeSelector:
key1: value1
key2: value2
asserts:
- equal:
path: spec.template.spec.nodeSelector.key1
value: value1
- equal:
path: spec.template.spec.nodeSelector.key2
value: value2
- it: Should add affinity if affinity is set
set:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- antarctica-east1
- antarctica-west1
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: another-node-label-key
operator: In
values:
- another-node-label-value
asserts:
- equal:
path: spec.template.spec.affinity
value:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values:
- antarctica-east1
- antarctica-west1
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: another-node-label-key
operator: In
values:
- another-node-label-value
- it: Should add tolerations if tolerations is set
set:
tolerations:
- key: key1
operator: Equal
value: value1
effect: NoSchedule
- key: key2
operator: Exists
effect: NoSchedule
asserts:
- equal:
path: spec.template.spec.tolerations
value:
- key: key1
operator: Equal
value: value1
effect: NoSchedule
- key: key2
operator: Exists
effect: NoSchedule

View file

@ -1,90 +0,0 @@
suite: Test spark operator rbac
templates:
- rbac.yaml
release:
name: spark-operator
tests:
- it: Should not render spark operator rbac resources if rbac.create is false and rbac.createClusterRole is false
set:
rbac:
create: false
createClusterRole: false
asserts:
- hasDocuments:
count: 0
- it: Should render spark operator cluster role if rbac.create is true
set:
rbac:
create: true
documentIndex: 0
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
name: spark-operator
- it: Should render spark operator cluster role if rbac.createClusterRole is true
set:
rbac:
createClusterRole: true
documentIndex: 0
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
name: spark-operator
- it: Should render spark operator cluster role binding if rbac.create is true
set:
rbac:
create: true
documentIndex: 1
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
name: spark-operator
- it: Should render spark operator cluster role binding correctly if rbac.createClusterRole is true
set:
rbac:
createClusterRole: true
release:
documentIndex: 1
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
name: spark-operator
- contains:
path: subjects
content:
kind: ServiceAccount
name: spark-operator
namespace: NAMESPACE
count: 1
- equal:
path: roleRef
value:
kind: ClusterRole
name: spark-operator
apiGroup: rbac.authorization.k8s.io
- it: Should add extra annotations to spark operator cluster role if rbac.annotations is set
set:
rbac:
annotations:
key1: value1
key2: value2
documentIndex: 0
asserts:
- equal:
path: metadata.annotations.key1
value: value1
- equal:
path: metadata.annotations.key2
value: value2

View file

@ -1,54 +0,0 @@
suite: Test spark operator service account
templates:
- serviceaccount.yaml
release:
name: spark-operator
tests:
- it: Should not render service account if serviceAccounts.sparkoperator.create is false
set:
serviceAccounts:
sparkoperator:
create: false
asserts:
- hasDocuments:
count: 0
- it: Should render service account if serviceAccounts.sparkoperator.create is true
set:
serviceAccounts:
sparkoperator:
create: true
asserts:
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark-operator
- it: Should use the specified service account name if serviceAccounts.sparkoperator.name is set
set:
serviceAccounts:
sparkoperator:
name: custom-service-account
asserts:
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: custom-service-account
- it: Should add extra annotations if serviceAccounts.sparkoperator.annotations is set
set:
serviceAccounts:
sparkoperator:
annotations:
key1: value1
key2: value2
asserts:
- equal:
path: metadata.annotations.key1
value: value1
- equal:
path: metadata.annotations.key2
value: value2

View file

@ -1,133 +0,0 @@
suite: Test spark rbac
templates:
- spark-rbac.yaml
release:
name: spark-operator
tests:
- it: Should not render spark rbac resources if rbac.create is false and rbac.createRole is false
set:
rbac:
create: false
createRole: false
asserts:
- hasDocuments:
count: 0
- it: Should render spark role if rbac.create is true
set:
rbac:
create: true
documentIndex: 0
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
name: spark-role
- it: Should render spark role if rbac.createRole is true
set:
rbac:
createRole: true
documentIndex: 0
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
name: spark-role
- it: Should render spark role binding if rbac.create is true
set:
rbac:
create: true
documentIndex: 1
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
name: spark
- it: Should render spark role binding if rbac.createRole is true
set:
rbac:
createRole: true
documentIndex: 1
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
name: spark
- it: Should create a single spark role with namespace "" by default
documentIndex: 0
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
name: spark-role
namespace: ""
- it: Should create a single spark role binding with namespace "" by default
values:
- ../values.yaml
documentIndex: 1
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
name: spark
namespace: ""
- it: Should render multiple spark roles if sparkJobNamespaces is set with multiple values
set:
sparkJobNamespaces:
- ns1
- ns2
documentIndex: 0
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
name: spark-role
namespace: ns1
- it: Should render multiple spark role bindings if sparkJobNamespaces is set with multiple values
set:
sparkJobNamespaces:
- ns1
- ns2
documentIndex: 1
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
name: spark
namespace: ns1
- it: Should render multiple spark roles if sparkJobNamespaces is set with multiple values
set:
sparkJobNamespaces:
- ns1
- ns2
documentIndex: 2
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
name: spark-role
namespace: ns2
- it: Should render multiple spark role bindings if sparkJobNamespaces is set with multiple values
set:
sparkJobNamespaces:
- ns1
- ns2
documentIndex: 3
asserts:
- containsDocument:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
name: spark
namespace: ns2

View file

@ -1,112 +0,0 @@
suite: Test spark service account
templates:
- spark-serviceaccount.yaml
release:
name: spark-operator
tests:
- it: Should not render service account if serviceAccounts.spark.create is false
set:
serviceAccounts:
spark:
create: false
asserts:
- hasDocuments:
count: 0
- it: Should render service account if serviceAccounts.spark.create is true
set:
serviceAccounts:
spark:
create: true
asserts:
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark-operator-spark
- it: Should use the specified service account name if serviceAccounts.spark.name is set
set:
serviceAccounts:
spark:
name: spark
asserts:
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
- it: Should add extra annotations if serviceAccounts.spark.annotations is set
set:
serviceAccounts:
spark:
annotations:
key1: value1
key2: value2
asserts:
- equal:
path: metadata.annotations.key1
value: value1
- equal:
path: metadata.annotations.key2
value: value2
- it: Should create multiple service accounts if sparkJobNamespaces is set
set:
serviceAccounts:
spark:
name: spark
sparkJobNamespaces:
- ns1
- ns2
- ns3
documentIndex: 0
asserts:
- hasDocuments:
count: 3
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
namespace: ns1
- it: Should create multiple service accounts if sparkJobNamespaces is set
set:
serviceAccounts:
spark:
name: spark
sparkJobNamespaces:
- ns1
- ns2
- ns3
documentIndex: 1
asserts:
- hasDocuments:
count: 3
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
namespace: ns2
- it: Should create multiple service accounts if sparkJobNamespaces is set
set:
serviceAccounts:
spark:
name: spark
sparkJobNamespaces:
- ns1
- ns2
- ns3
documentIndex: 2
asserts:
- hasDocuments:
count: 3
- containsDocument:
apiVersion: v1
kind: ServiceAccount
name: spark
namespace: ns3

View file

@ -1,31 +0,0 @@
suite: Test spark operator webhook secret
templates:
- webhook/secret.yaml
release:
name: spark-operator
namespace: spark-operator
tests:
- it: Should not render the webhook secret if webhook.enable is false
asserts:
- hasDocuments:
count: 0
- it: Should render the webhook secret with empty data fields
set:
webhook:
enable: true
asserts:
- containsDocument:
apiVersion: v1
kind: Secret
name: spark-operator-webhook-certs
- equal:
path: data
value:
ca-key.pem: ""
ca-cert.pem: ""
server-key.pem: ""
server-cert.pem: ""

View file

@ -1,33 +0,0 @@
suite: Test spark operator webhook service
templates:
- webhook/service.yaml
release:
name: spark-operator
tests:
- it: Should not render the webhook service if webhook.enable is false
set:
webhook:
enable: false
asserts:
- hasDocuments:
count: 0
- it: Should render the webhook service correctly if webhook.enable is true
set:
webhook:
enable: true
portName: webhook
asserts:
- containsDocument:
apiVersion: v1
kind: Service
name: spark-operator-webhook-svc
- equal:
path: spec.ports[0]
value:
port: 443
targetPort: webhook
name: webhook

View file

@ -1,189 +0,0 @@
# Default values for spark-operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- Common labels to add to the resources
commonLabels: {}
# replicaCount -- Desired number of pods, leaderElection will be enabled
# if this is greater than 1
replicaCount: 1
image:
# -- Image repository
repository: docker.io/kubeflow/spark-operator
# -- Image pull policy
pullPolicy: IfNotPresent
# -- if set, override the image tag whose default is the chart appVersion.
tag: ""
# -- Image pull secrets
imagePullSecrets: []
# -- String to partially override `spark-operator.fullname` template (will maintain the release name)
nameOverride: ""
# -- String to override release name
fullnameOverride: ""
rbac:
# -- **DEPRECATED** use `createRole` and `createClusterRole`
create: false
# -- Create and use RBAC `Role` resources
createRole: true
# -- Create and use RBAC `ClusterRole` resources
createClusterRole: true
# -- Optional annotations for rbac
annotations: {}
serviceAccounts:
spark:
# -- Create a service account for spark apps
create: true
# -- Optional name for the spark service account
name: ""
# -- Optional annotations for the spark service account
annotations: {}
sparkoperator:
# -- Create a service account for the operator
create: true
# -- Optional name for the operator service account
name: ""
# -- Optional annotations for the operator service account
annotations: {}
# -- List of namespaces where to run spark jobs
sparkJobNamespaces:
- ""
# - ns1
# -- Operator concurrency, higher values might increase memory usage
controllerThreads: 10
# -- Operator resync interval. Note that the operator will respond to events (e.g. create, update)
# unrelated to this setting
resyncInterval: 30
uiService:
# -- Enable UI service creation for Spark application
enable: true
# -- Ingress URL format.
# Requires the UI service to be enabled by setting `uiService.enable` to true.
ingressUrlFormat: ""
# -- Set higher levels for more verbose logging
logLevel: 2
# -- Pod environment variable sources
envFrom: []
# podSecurityContext -- Pod security context
podSecurityContext: {}
# securityContext -- Operator container security context
securityContext: {}
# sidecars -- Sidecar containers
sidecars: []
# volumes - Operator volumes
volumes: []
# volumeMounts - Operator volumeMounts
volumeMounts: []
webhook:
# -- Enable webhook server
enable: false
# -- Webhook service port
port: 8080
# -- Webhook container port name and service target port name
portName: webhook
# -- The webhook server will only operate on namespaces with this label, specified in the form key1=value1,key2=value2.
# Empty string (default) will operate on all namespaces
namespaceSelector: ""
# -- The webhook will only operate on resources with this label/s, specified in the form key1=value1,key2=value2, OR key in (value1,value2).
# Empty string (default) will operate on all objects
objectSelector: ""
# -- The annotations applied to init job, required to restore certs deleted by the cleanup job during upgrade
timeout: 30
metrics:
# -- Enable prometheus metric scraping
enable: true
# -- Metrics port
port: 10254
# -- Metrics port name
portName: metrics
# -- Metrics serving endpoint
endpoint: /metrics
# -- Metric prefix, will be added to all exported metrics
prefix: ""
# -- Prometheus pod monitor for operator's pod.
podMonitor:
# -- If enabled, a pod monitor for operator's pod will be submitted. Note that prometheus metrics should be enabled as well.
enable: false
# -- Pod monitor labels
labels: {}
# -- The label to use to retrieve the job name from
jobLabel: spark-operator-podmonitor
# -- Prometheus metrics endpoint properties. `metrics.portName` will be used as a port
podMetricsEndpoint:
scheme: http
interval: 5s
# nodeSelector -- Node labels for pod assignment
nodeSelector: {}
# tolerations -- List of node taints to tolerate
tolerations: []
# affinity -- Affinity for pod assignment
affinity: {}
# podAnnotations -- Additional annotations to add to the pod
podAnnotations: {}
# podLabels -- Additional labels to add to the pod
podLabels: {}
# resources -- Pod resource requests and limits
# Note, that each job submission will spawn a JVM within the Spark Operator Pod using "/usr/local/openjdk-11/bin/java -Xmx128m".
# Kubernetes may kill these Java processes at will to enforce resource limits. When that happens, you will see the following error:
# 'failed to run spark-submit for SparkApplication [...]: signal: killed' - when this happens, you may want to increase memory limits.
resources: {}
# limits:
# cpu: 100m
# memory: 300Mi
# requests:
# cpu: 100m
# memory: 300Mi
batchScheduler:
# -- Enable batch scheduler for spark jobs scheduling. If enabled, users can specify batch scheduler name in spark application
enable: false
resourceQuotaEnforcement:
# -- Whether to enable the ResourceQuota enforcement for SparkApplication resources.
# Requires the webhook to be enabled by setting `webhook.enable` to true.
# Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-resource-quota-enforcement.
enable: false
leaderElection:
# -- Leader election lock name.
# Ref: https://github.com/kubeflow/spark-operator/blob/master/docs/user-guide.md#enabling-leader-election-for-high-availability.
lockName: "spark-operator-lock"
# -- Optionally store the lock in another namespace. Defaults to operator's namespace
lockNamespace: ""
istio:
# -- When using `istio`, spark jobs need to run without a sidecar to properly terminate
enabled: false
# labelSelectorFilter -- A comma-separated list of key=value, or key labels to filter resources during watch and list based on the specified labels.
labelSelectorFilter: ""
# priorityClassName -- A priority class to be used for running spark-operator pod.
priorityClassName: ""

View file

@ -1,11 +0,0 @@
---
resources:
- spark-master-controller.yml
- spark-master-service.yml
- spark-ui-proxy-controller.yml
- spark-ui-proxy-ingress.yml
- spark-ui-proxy-service.yml
- spark-worker-controller.yml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

Some files were not shown because too many files have changed in this diff Show more