Compare commits

..

No commits in common. "main" and "mandersson-kubedev" have entirely different histories.

143 changed files with 213 additions and 13470 deletions

View file

@ -11,7 +11,7 @@ spec:
service: service:
name: argocd-server name: argocd-server
port: port:
number: 80 number: 8443
ingressClassName: traefik ingressClassName: traefik
tls: tls:
- hosts: - hosts:

View file

@ -1,15 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: drive@sunet.se
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View file

@ -1,22 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
spec:
ingressClassName: nginx
tls:
- hosts:
- argocd.drive.test.sunet.se
secretName: tls-secret
rules:
- host: argocd.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
name: https

View file

@ -1,3 +0,0 @@
resources:
- argocd-ingress.yaml
- argocd-cert-issuer.yaml

View file

@ -1,28 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- argocd.drive.test.sunet.dev
secretName: tls-secret
rules:
- host: argocd.drive.test.sunet.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: argocd-ingress.yaml

View file

@ -1,30 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
namespace: argocd
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- sunet-argocd.drive.sunet.se
secretName: tls-secret
rules:
- host: sunet-argocd.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -1,30 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- vr-argocd.drive.sunet.se
secretName: tls-secret
rules:
- host: vr-argocd.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -1,27 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- argocd.drive.test.sunet.se
secretName: tls-secret
rules:
- host: argocd.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -1,30 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- sunet-argocd.drive.test.sunet.se
secretName: tls-secret
rules:
- host: sunet-argocd.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -1,30 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- vr-argocd.drive.test.sunet.se
secretName: tls-secret
rules:
- host: vr-argocd.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -3,4 +3,4 @@ kind: Kustomization
namespace: argocd namespace: argocd
resources: resources:
- https://raw.githubusercontent.com/argoproj/argo-cd/v2.10.4/manifests/ha/install.yaml - https://raw.githubusercontent.com/argoproj/argo-cd/v2.10.0/manifests/ha/install.yaml

View file

@ -69,7 +69,7 @@ spec:
- mountPath: /var/lib/csi/sockets/pluginproxy/ - mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir name: socket-dir
- name: csi-resizer - name: csi-resizer
image: registry.k8s.io/sig-storage/csi-resizer:v1.8.0 image: registry.k8s.io/sig-storage/csi-resizer:v1.7.0
args: args:
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"
- "--timeout=3m" - "--timeout=3m"
@ -93,7 +93,7 @@ spec:
- mountPath: /var/lib/csi/sockets/pluginproxy/ - mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir name: socket-dir
- name: cinder-csi-plugin - name: cinder-csi-plugin
image: registry.k8s.io/provider-os/cinder-csi-plugin:v1.28.2 image: registry.k8s.io/provider-os/cinder-csi-plugin:v1.27.1
args: args:
- /bin/cinder-csi-plugin - /bin/cinder-csi-plugin
- "--endpoint=$(CSI_ENDPOINT)" - "--endpoint=$(CSI_ENDPOINT)"

View file

@ -30,7 +30,7 @@ spec:
restartPolicy: Always restartPolicy: Always
containers: containers:
- name: customer - name: customer
image: docker.sunet.se/drive/nextcloud-custom:29.0.9.3-1 image: docker.sunet.se/drive/nextcloud-custom:27.1.6.3-5
volumeMounts: volumeMounts:
- name: nextcloud-data - name: nextcloud-data
mountPath: /var/www/html/config/ mountPath: /var/www/html/config/
@ -127,7 +127,7 @@ spec:
- name: NEXTCLOUD_ADMIN_USER - name: NEXTCLOUD_ADMIN_USER
value: admin value: admin
- name: NEXTCLOUD_VERSION_STRING - name: NEXTCLOUD_VERSION_STRING
value: "28.0.3.3" value: "26.0.1.2"
- name: NEXTCLOUD_ADMIN_PASSWORD - name: NEXTCLOUD_ADMIN_PASSWORD
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:

View file

@ -4,13 +4,15 @@ kind: Ingress
metadata: metadata:
name: customer-ingress name: customer-ingress
annotations: annotations:
kubernetes.io/ingress.class: nginx kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:
tls: tls:
- hosts: - hosts:
- nordunet.drive.test.sunet.se - nordunet.drive.test.sunet.se
secretName: tls-secret secretName: tls-secret
ingressClassName: nginx
rules: rules:
- host: nordunet.drive.test.sunet.se - host: nordunet.drive.test.sunet.se
http: http:

View file

@ -4,13 +4,15 @@ kind: Ingress
metadata: metadata:
name: customer-ingress name: customer-ingress
annotations: annotations:
kubernetes.io/ingress.class: nginx kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:
tls: tls:
- hosts: - hosts:
- vinnova.drive.test.sunet.se - vinnova.drive.test.sunet.se
secretName: tls-secret secretName: tls-secret
ingressClassName: nginx
rules: rules:
- host: vinnova.drive.test.sunet.se - host: vinnova.drive.test.sunet.se
http: http:

View file

@ -1,6 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1 apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
resources: resources:
- ../../base - ../../../base
patches: patches:
- path: argocd-ingress.yaml - path: nextcloud-deployment.yml
- path: nextcloud-ingress.yml

View file

@ -0,0 +1,35 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: customer-node
labels:
app: customer-node
spec:
replicas: 1
template:
metadata:
labels:
app: customer-node
spec:
initContainers:
- image: docker.sunet.se/sunet/docker-jinja:latest
name: init-config
env:
- name: MYSQL_DATABASE
value: "nextcloud_vr"
- name: MYSQL_USER
value: "nextcloud_vr"
- name: GSS_MASTER_URL
value: "https://drive.test.sunet.se"
- name: LOOKUP_SERVER
value: "https://lookup.drive.test.sunet.se"
- name: MAIL_DOMAIN
value: "drive.test.sunet.se"
- name: MAIL_SMTPNAME
value: "noreply@drive.test.sunet.se"
- name: NEXTCLOUD_TRUSTED_DOMAINS
value: "vr.drive.test.sunet.se"
- name: OBJECTSTORE_S3_BUCKET
value: "primary-vr-drive-test.sunet.se"
- name: SITE_NAME
value: "vr.drive.test.sunet.se"

View file

@ -1,31 +1,26 @@
---
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
metadata: metadata:
name: argocd-ingress name: customer-ingress
namespace: argocd
annotations: annotations:
kubernetes.io/ingress.class: traefik kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true" traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:
defaultBackend:
service:
name: argocd-server
port:
number: 8443
tls: tls:
- hosts: - hosts:
- argocd.drive.sunet.se - vr.drive.test.sunet.se
secretName: tls-secret secretName: tls-secret
rules: rules:
- host: argocd.drive.sunet.se - host: vr.drive.test.sunet.se
http: http:
paths: paths:
- path: / - path: /
pathType: Prefix pathType: Prefix
backend: backend:
service: service:
name: argocd-server name: customer-node
port: port:
number: 80 number: 80

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: health-ingress
namespace: health
annotations:
kubernetes.io/ingress.class: nginx
spec:
defaultBackend:
service:
name: health-node
port:
number: 8443
ingressClassName: nginx
tls:
- hosts:
- sunet-kube.drive.sunet.se
secretName: tls-secret
rules:
- host: sunet-kube.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: health-node
port:
number: 8080

View file

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: health-ingress.yml

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: health-ingress
namespace: health
annotations:
kubernetes.io/ingress.class: nginx
spec:
defaultBackend:
service:
name: health-node
port:
number: 8443
ingressClassName: nginx
tls:
- hosts:
- sunet-kube.drive.test.sunet.se
secretName: tls-secret
rules:
- host: sunet-kube.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: health-node
port:
number: 8080

View file

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: health-ingress.yml

View file

@ -5,18 +5,20 @@ metadata:
name: health-ingress name: health-ingress
namespace: health namespace: health
annotations: annotations:
kubernetes.io/ingress.class: nginx traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:
defaultBackend: defaultBackend:
service: service:
name: health-node name: health-node
port: port:
number: 8443 number: 8443
ingressClassName: nginx ingressClassName: traefik
tls: tls:
- hosts: - hosts:
- kube.drive.test.sunet.se - kube.drive.test.sunet.se
secretName: tls-secret secretName: tls-secret
rules: rules:
- host: kube.drive.test.sunet.se - host: kube.drive.test.sunet.se
http: http:

View file

@ -1,27 +1,27 @@
annotations: annotations:
artifacthub.io/images: | artifacthub.io/images: |
- image: quay.io/jupyterhub/configurable-http-proxy:4.6.2 - image: quay.io/jupyterhub/configurable-http-proxy:4.6.1
name: configurable-http-proxy name: configurable-http-proxy
- image: quay.io/jupyterhub/k8s-hub:4.0.0 - image: quay.io/jupyterhub/k8s-hub:3.2.1
name: k8s-hub name: k8s-hub
- image: quay.io/jupyterhub/k8s-image-awaiter:4.0.0 - image: quay.io/jupyterhub/k8s-image-awaiter:3.2.1
name: k8s-image-awaiter name: k8s-image-awaiter
- image: quay.io/jupyterhub/k8s-network-tools:4.0.0 - image: quay.io/jupyterhub/k8s-network-tools:3.2.1
name: k8s-network-tools name: k8s-network-tools
- image: quay.io/jupyterhub/k8s-secret-sync:4.0.0 - image: quay.io/jupyterhub/k8s-secret-sync:3.2.1
name: k8s-secret-sync name: k8s-secret-sync
- image: quay.io/jupyterhub/k8s-singleuser-sample:4.0.0 - image: quay.io/jupyterhub/k8s-singleuser-sample:3.2.1
name: k8s-singleuser-sample name: k8s-singleuser-sample
- image: registry.k8s.io/kube-scheduler:v1.30.6 - image: registry.k8s.io/kube-scheduler:v1.26.11
name: kube-scheduler name: kube-scheduler
- image: registry.k8s.io/pause:3.10 - image: registry.k8s.io/pause:3.9
name: pause name: pause
- image: registry.k8s.io/pause:3.10 - image: registry.k8s.io/pause:3.9
name: pause name: pause
- image: traefik:v3.2.0 - image: traefik:v2.10.5
name: traefik name: traefik
apiVersion: v2 apiVersion: v2
appVersion: 5.2.1 appVersion: 4.0.2
description: Multi-user Jupyter installation description: Multi-user Jupyter installation
home: https://z2jh.jupyter.org home: https://z2jh.jupyter.org
icon: https://hub.jupyter.org/helm-chart/images/hublogo.svg icon: https://hub.jupyter.org/helm-chart/images/hublogo.svg
@ -29,7 +29,7 @@ keywords:
- jupyter - jupyter
- jupyterhub - jupyterhub
- z2jh - z2jh
kubeVersion: '>=1.28.0-0' kubeVersion: '>=1.23.0-0'
maintainers: maintainers:
- email: erik@sundellopensource.se - email: erik@sundellopensource.se
name: Erik Sundell name: Erik Sundell
@ -38,4 +38,4 @@ maintainers:
name: jupyterhub name: jupyterhub
sources: sources:
- https://github.com/jupyterhub/zero-to-jupyterhub-k8s - https://github.com/jupyterhub/zero-to-jupyterhub-k8s
version: 4.0.0 version: 3.2.1

View file

@ -107,35 +107,28 @@ c.JupyterHub.hub_connect_url = (
) )
# implement common labels # implement common labels
# This mimics the jupyterhub.commonLabels helper, but declares managed-by to # this duplicates the jupyterhub.commonLabels helper
# kubespawner instead of helm.
#
# The labels app and release are old labels enabled to be deleted in z2jh 5, but
# for now retained to avoid a breaking change in z2jh 4 that would force user
# server restarts. Restarts would be required because NetworkPolicy resources
# must select old/new pods with labels that then needs to be seen on both
# old/new pods, and we want these resources to keep functioning for old/new user
# server pods during an upgrade.
#
common_labels = c.KubeSpawner.common_labels = {} common_labels = c.KubeSpawner.common_labels = {}
common_labels["app.kubernetes.io/name"] = common_labels["app"] = get_config( common_labels["app"] = get_config(
"nameOverride", "nameOverride",
default=get_config("Chart.Name", "jupyterhub"), default=get_config("Chart.Name", "jupyterhub"),
) )
release = get_config("Release.Name") common_labels["heritage"] = "jupyterhub"
if release:
common_labels["app.kubernetes.io/instance"] = common_labels["release"] = release
chart_name = get_config("Chart.Name") chart_name = get_config("Chart.Name")
chart_version = get_config("Chart.Version") chart_version = get_config("Chart.Version")
if chart_name and chart_version: if chart_name and chart_version:
common_labels["helm.sh/chart"] = common_labels["chart"] = ( common_labels["chart"] = "{}-{}".format(
f"{chart_name}-{chart_version.replace('+', '_')}" chart_name,
chart_version.replace("+", "_"),
) )
common_labels["app.kubernetes.io/managed-by"] = "kubespawner" release = get_config("Release.Name")
if release:
common_labels["release"] = release
c.KubeSpawner.namespace = os.environ.get("POD_NAMESPACE", "default") c.KubeSpawner.namespace = os.environ.get("POD_NAMESPACE", "default")
# Max number of consecutive failures before the Hub restarts itself # Max number of consecutive failures before the Hub restarts itself
# requires jupyterhub 0.9.2
set_config_if_not_none( set_config_if_not_none(
c.Spawner, c.Spawner,
"consecutive_failure_limit", "consecutive_failure_limit",
@ -256,8 +249,7 @@ if tolerations:
storage_type = get_config("singleuser.storage.type") storage_type = get_config("singleuser.storage.type")
if storage_type == "dynamic": if storage_type == "dynamic":
pvc_name_template = get_config("singleuser.storage.dynamic.pvcNameTemplate") pvc_name_template = get_config("singleuser.storage.dynamic.pvcNameTemplate")
if pvc_name_template: c.KubeSpawner.pvc_name_template = pvc_name_template
c.KubeSpawner.pvc_name_template = pvc_name_template
volume_name_template = get_config("singleuser.storage.dynamic.volumeNameTemplate") volume_name_template = get_config("singleuser.storage.dynamic.volumeNameTemplate")
c.KubeSpawner.storage_pvc_ensure = True c.KubeSpawner.storage_pvc_ensure = True
set_config_if_not_none( set_config_if_not_none(
@ -276,14 +268,13 @@ if storage_type == "dynamic":
c.KubeSpawner.volumes = [ c.KubeSpawner.volumes = [
{ {
"name": volume_name_template, "name": volume_name_template,
"persistentVolumeClaim": {"claimName": "{pvc_name}"}, "persistentVolumeClaim": {"claimName": pvc_name_template},
} }
] ]
c.KubeSpawner.volume_mounts = [ c.KubeSpawner.volume_mounts = [
{ {
"mountPath": get_config("singleuser.storage.homeMountPath"), "mountPath": get_config("singleuser.storage.homeMountPath"),
"name": volume_name_template, "name": volume_name_template,
"subPath": get_config("singleuser.storage.dynamic.subPath"),
} }
] ]
elif storage_type == "static": elif storage_type == "static":

View file

@ -3,7 +3,6 @@ Utility methods for use in jupyterhub_config.py and dynamic subconfigs.
Methods here can be imported by extraConfig in values.yaml Methods here can be imported by extraConfig in values.yaml
""" """
import os import os
from collections.abc import Mapping from collections.abc import Mapping
from functools import lru_cache from functools import lru_cache

View file

@ -48,6 +48,7 @@
- commonLabels | uses appLabel - commonLabels | uses appLabel
- labels | uses commonLabels - labels | uses commonLabels
- matchLabels | uses labels - matchLabels | uses labels
- podCullerSelector | uses matchLabels
## Example usage ## Example usage
@ -111,62 +112,31 @@
{{- /* {{- /*
jupyterhub.commonLabels: jupyterhub.commonLabels:
Foundation for "jupyterhub.labels". Foundation for "jupyterhub.labels".
Provides labels: app, release, (chart and heritage).
Provides old labels:
app
release
chart (omitted for matchLabels)
heritage (omitted for matchLabels)
Provides modern labels (omitted for matchLabels):
app.kubernetes.io/name ("app")
app.kubernetes.io/instance ("release")
helm.sh/chart ("chart")
app.kubernetes.io/managed-by ("heritage")
*/}} */}}
{{- define "jupyterhub.commonLabels" -}} {{- define "jupyterhub.commonLabels" -}}
app: {{ .appLabel | default (include "jupyterhub.appLabel" .) | quote }} app: {{ .appLabel | default (include "jupyterhub.appLabel" .) }}
release: {{ .Release.Name | quote }} release: {{ .Release.Name }}
{{- if not .matchLabels }} {{- if not .matchLabels }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
heritage: {{ .Release.Service }} heritage: {{ .heritageLabel | default .Release.Service }}
app.kubernetes.io/name: {{ .appLabel | default (include "jupyterhub.appLabel" .) | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- /* {{- /*
jupyterhub.labels: jupyterhub.labels:
Provides old labels: Provides labels: component, app, release, (chart and heritage).
component
app
release
chart (omitted for matchLabels)
heritage (omitted for matchLabels)
Provides modern labels (omitted for matchLabels):
app.kubernetes.io/component ("component")
app.kubernetes.io/name ("app")
app.kubernetes.io/instance release ("release")
helm.sh/chart ("chart")
app.kubernetes.io/managed-by ("heritage")
*/}} */}}
{{- define "jupyterhub.labels" -}} {{- define "jupyterhub.labels" -}}
component: {{ include "jupyterhub.componentLabel" . }} component: {{ include "jupyterhub.componentLabel" . }}
{{- if not .matchLabels }}
app.kubernetes.io/component: {{ include "jupyterhub.componentLabel" . }}
{{- end }}
{{ include "jupyterhub.commonLabels" . }} {{ include "jupyterhub.commonLabels" . }}
{{- end }} {{- end }}
{{- /* {{- /*
jupyterhub.matchLabels: jupyterhub.matchLabels:
Provides old labels: Used to provide pod selection labels: component, app, release.
component
app
release
*/}} */}}
{{- define "jupyterhub.matchLabels" -}} {{- define "jupyterhub.matchLabels" -}}
{{- $_ := merge (dict "matchLabels" true) . -}} {{- $_ := merge (dict "matchLabels" true) . -}}

View file

@ -5,7 +5,7 @@ metadata:
labels: labels:
{{- include "jupyterhub.labels" . | nindent 4 }} {{- include "jupyterhub.labels" . | nindent 4 }}
spec: spec:
{{- if not (typeIs "<nil>" .Values.hub.revisionHistoryLimit) }} {{- if typeIs "int" .Values.hub.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.hub.revisionHistoryLimit }} revisionHistoryLimit: {{ .Values.hub.revisionHistoryLimit }}
{{- end }} {{- end }}
replicas: 1 replicas: 1
@ -97,6 +97,9 @@ spec:
{{- . | toYaml | nindent 8 }} {{- . | toYaml | nindent 8 }}
{{- end }} {{- end }}
containers: containers:
{{- with .Values.hub.extraContainers }}
{{- . | toYaml | nindent 8 }}
{{- end }}
- name: hub - name: hub
image: {{ .Values.hub.image.name }}:{{ .Values.hub.image.tag }} image: {{ .Values.hub.image.name }}:{{ .Values.hub.image.tag }}
{{- with .Values.hub.command }} {{- with .Values.hub.command }}
@ -241,9 +244,6 @@ spec:
path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/health path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/health
port: http port: http
{{- end }} {{- end }}
{{- with .Values.hub.extraContainers }}
{{- . | toYaml | nindent 8 }}
{{- end }}
{{- with .Values.hub.extraPodSpec }} {{- with .Values.hub.extraPodSpec }}
{{- . | toYaml | nindent 6 }} {{- . | toYaml | nindent 6 }}
{{- end }} {{- end }}

View file

@ -8,7 +8,7 @@ type: Opaque
data: data:
{{- $values := merge dict .Values }} {{- $values := merge dict .Values }}
{{- /* also passthrough subset of Chart / Release */}} {{- /* also passthrough subset of Chart / Release */}}
{{- $_ := set $values "Chart" (dict "Name" .Chart.Name "Version" .Chart.Version "AppVersion" .Chart.AppVersion) }} {{- $_ := set $values "Chart" (dict "Name" .Chart.Name "Version" .Chart.Version) }}
{{- $_ := set $values "Release" (pick .Release "Name" "Namespace" "Service") }} {{- $_ := set $values "Release" (pick .Release "Name" "Namespace" "Service") }}
values.yaml: {{ $values | toYaml | b64enc | quote }} values.yaml: {{ $values | toYaml | b64enc | quote }}

View file

@ -31,9 +31,6 @@ spec:
{{- with .Values.hub.service.ports.nodePort }} {{- with .Values.hub.service.ports.nodePort }}
nodePort: {{ . }} nodePort: {{ . }}
{{- end }} {{- end }}
{{- with .Values.hub.service.ports.appProtocol }}
appProtocol: {{ . }}
{{- end }}
{{- with .Values.hub.service.extraPorts }} {{- with .Values.hub.service.extraPorts }}
{{- . | toYaml | nindent 4 }} {{- . | toYaml | nindent 4 }}

View file

@ -34,7 +34,7 @@ spec:
type: RollingUpdate type: RollingUpdate
rollingUpdate: rollingUpdate:
maxUnavailable: 100% maxUnavailable: 100%
{{- if not (typeIs "<nil>" .Values.prePuller.revisionHistoryLimit) }} {{- if typeIs "int" .Values.prePuller.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.prePuller.revisionHistoryLimit }} revisionHistoryLimit: {{ .Values.prePuller.revisionHistoryLimit }}
{{- end }} {{- end }}
template: template:

View file

@ -24,9 +24,6 @@ spec:
name: {{ include "jupyterhub.proxy-public.fullname" $ }} name: {{ include "jupyterhub.proxy-public.fullname" $ }}
port: port:
name: http name: http
{{- with $.Values.ingress.extraPaths }}
{{- . | toYaml | nindent 10 }}
{{- end }}
{{- if $host }} {{- if $host }}
host: {{ $host | quote }} host: {{ $host | quote }}
{{- end }} {{- end }}

View file

@ -8,7 +8,7 @@ metadata:
labels: labels:
{{- include "jupyterhub.labels" . | nindent 4 }} {{- include "jupyterhub.labels" . | nindent 4 }}
spec: spec:
{{- if not (typeIs "<nil>" .Values.proxy.traefik.revisionHistoryLimit) }} {{- if typeIs "int" .Values.proxy.traefik.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.proxy.traefik.revisionHistoryLimit }} revisionHistoryLimit: {{ .Values.proxy.traefik.revisionHistoryLimit }}
{{- end }} {{- end }}
replicas: 1 replicas: 1
@ -130,10 +130,10 @@ spec:
{{- end }} {{- end }}
args: args:
- watch-save - watch-save
- --label=app.kubernetes.io/name={{ include "jupyterhub.appLabel" . }} - --label=app={{ include "jupyterhub.appLabel" . }}
- --label=app.kubernetes.io/instance={{ .Release.Name }} - --label=release={{ .Release.Name }}
- --label=helm.sh/chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} - --label=chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
- --label=app.kubernetes.io/managed-by=secret-sync - --label=heritage=secret-sync
- {{ include "jupyterhub.proxy-public-tls.fullname" . }} - {{ include "jupyterhub.proxy-public-tls.fullname" . }}
- acme.json - acme.json
- /etc/acme/acme.json - /etc/acme/acme.json

View file

@ -7,7 +7,7 @@ metadata:
labels: labels:
{{- include "jupyterhub.labels" . | nindent 4 }} {{- include "jupyterhub.labels" . | nindent 4 }}
spec: spec:
{{- if not (typeIs "<nil>" .Values.proxy.chp.revisionHistoryLimit) }} {{- if typeIs "int" .Values.proxy.chp.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.proxy.chp.revisionHistoryLimit }} revisionHistoryLimit: {{ .Values.proxy.chp.revisionHistoryLimit }}
{{- end }} {{- end }}
replicas: 1 replicas: 1
@ -100,9 +100,6 @@ spec:
{{- if .Values.debug.enabled }} {{- if .Values.debug.enabled }}
- --log-level=debug - --log-level=debug
{{- end }} {{- end }}
{{- if .Values.hub.config.JupyterHub.subdomain_host }}
- --host-routing
{{- end }}
{{- range .Values.proxy.chp.extraCommandLineFlags }} {{- range .Values.proxy.chp.extraCommandLineFlags }}
- {{ tpl . $ }} - {{ tpl . $ }}
{{- end }} {{- end }}

View file

@ -16,7 +16,7 @@ metadata:
{{- include "jupyterhub.labels" . | nindent 4 }} {{- include "jupyterhub.labels" . | nindent 4 }}
spec: spec:
podManagementPolicy: Parallel podManagementPolicy: Parallel
{{- if not (typeIs "<nil>" .Values.scheduling.userPlaceholder.revisionHistoryLimit) }} {{- if typeIs "int" .Values.scheduling.userPlaceholder.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.scheduling.userPlaceholder.revisionHistoryLimit }} revisionHistoryLimit: {{ .Values.scheduling.userPlaceholder.revisionHistoryLimit }}
{{- end }} {{- end }}
replicas: {{ .Values.scheduling.userPlaceholder.replicas }} replicas: {{ .Values.scheduling.userPlaceholder.replicas }}

View file

@ -10,14 +10,26 @@ data:
This is configuration of a k8s official kube-scheduler binary running in the This is configuration of a k8s official kube-scheduler binary running in the
user-scheduler. user-scheduler.
The config version and kube-scheduler binary version has a fallback for k8s
clusters versioned v1.23 or lower because:
- v1 / v1beta3 config requires kube-scheduler binary >=1.25 / >=1.23
- kube-scheduler binary >=1.25 requires storage.k8s.io/v1/CSIStorageCapacity
available first in k8s >=1.24
ref: https://kubernetes.io/docs/reference/scheduling/config/ ref: https://kubernetes.io/docs/reference/scheduling/config/
ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1/ ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1/
ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1beta3/
*/}} */}}
config.yaml: | config.yaml: |
{{- if semverCompare ">=1.24.0-0" .Capabilities.KubeVersion.Version }}
apiVersion: kubescheduler.config.k8s.io/v1 apiVersion: kubescheduler.config.k8s.io/v1
{{- else }}
apiVersion: kubescheduler.config.k8s.io/v1beta3
{{- end }}
kind: KubeSchedulerConfiguration kind: KubeSchedulerConfiguration
leaderElection: leaderElection:
resourceLock: leases resourceLock: endpointsleases
resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }} resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
resourceNamespace: "{{ .Release.Namespace }}" resourceNamespace: "{{ .Release.Namespace }}"
profiles: profiles:

View file

@ -6,7 +6,7 @@ metadata:
labels: labels:
{{- include "jupyterhub.labels" . | nindent 4 }} {{- include "jupyterhub.labels" . | nindent 4 }}
spec: spec:
{{- if not (typeIs "<nil>" .Values.scheduling.userScheduler.revisionHistoryLimit) }} {{- if typeIs "int" .Values.scheduling.userScheduler.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.scheduling.userScheduler.revisionHistoryLimit }} revisionHistoryLimit: {{ .Values.scheduling.userScheduler.revisionHistoryLimit }}
{{- end }} {{- end }}
replicas: {{ .Values.scheduling.userScheduler.replicas }} replicas: {{ .Values.scheduling.userScheduler.replicas }}
@ -50,7 +50,17 @@ spec:
{{- end }} {{- end }}
containers: containers:
- name: kube-scheduler - name: kube-scheduler
{{- if semverCompare ">=1.24.0-0" .Capabilities.KubeVersion.Version }}
image: {{ .Values.scheduling.userScheduler.image.name }}:{{ .Values.scheduling.userScheduler.image.tag }} image: {{ .Values.scheduling.userScheduler.image.name }}:{{ .Values.scheduling.userScheduler.image.tag }}
{{- else }}
# WARNING: The tag of this image is hardcoded, and the
# "scheduling.userScheduler.image.tag" configuration of the
# Helm chart that generated this resource manifest isn't
# respected. If you install the Helm chart in a k8s cluster
# versioned 1.24 or higher, your configuration will be
# respected.
image: {{ .Values.scheduling.userScheduler.image.name }}:v1.23.14
{{- end }}
{{- with .Values.scheduling.userScheduler.image.pullPolicy }} {{- with .Values.scheduling.userScheduler.image.pullPolicy }}
imagePullPolicy: {{ . }} imagePullPolicy: {{ . }}
{{- end }} {{- end }}

View file

@ -20,12 +20,8 @@ rules:
# - changed in 1.21: get/list/watch permission for namespace, # - changed in 1.21: get/list/watch permission for namespace,
# csidrivers, csistoragecapacities was added. # csidrivers, csistoragecapacities was added.
# - unchanged between 1.22 and 1.27 # - unchanged between 1.22 and 1.27
# - changed in 1.28: permissions to get/update lock endpoint resource
# removed
# - unchanged between 1.28 and 1.30
# - (1.31 is known to bring some changes below)
# #
# ref: https://github.com/kubernetes/kubernetes/blob/v1.30.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L721-L862 # ref: https://github.com/kubernetes/kubernetes/blob/v1.27.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L736-L892
- apiGroups: - apiGroups:
- "" - ""
- events.k8s.io - events.k8s.io
@ -50,6 +46,21 @@ rules:
verbs: verbs:
- get - get
- update - update
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- apiGroups:
- ""
resourceNames:
- {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
resources:
- endpoints
verbs:
- get
- update
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -172,9 +183,9 @@ rules:
# Copied from the system:volume-scheduler ClusterRole of the k8s version # Copied from the system:volume-scheduler ClusterRole of the k8s version
# matching the kube-scheduler binary we use. # matching the kube-scheduler binary we use.
# #
# NOTE: These rules have not changed between 1.12 and 1.29. # NOTE: These rules have not changed between 1.12 and 1.27.
# #
# ref: https://github.com/kubernetes/kubernetes/blob/v1.29.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1283-L1310 # ref: https://github.com/kubernetes/kubernetes/blob/v1.27.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1311-L1338
- apiGroups: - apiGroups:
- "" - ""
resources: resources:

File diff suppressed because one or more lines are too long

View file

@ -41,7 +41,6 @@ hub:
annotations: {} annotations: {}
ports: ports:
nodePort: nodePort:
appProtocol:
extraPorts: [] extraPorts: []
loadBalancerIP: loadBalancerIP:
baseUrl: / baseUrl: /
@ -85,21 +84,16 @@ hub:
extraVolumeMounts: [] extraVolumeMounts: []
image: image:
name: quay.io/jupyterhub/k8s-hub name: quay.io/jupyterhub/k8s-hub
tag: "4.0.0" tag: "3.2.1"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
resources: {} resources: {}
podSecurityContext: podSecurityContext:
runAsNonRoot: true
fsGroup: 1000 fsGroup: 1000
seccompProfile:
type: "RuntimeDefault"
containerSecurityContext: containerSecurityContext:
runAsUser: 1000 runAsUser: 1000
runAsGroup: 1000 runAsGroup: 1000
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
lifecycle: {} lifecycle: {}
loadRoles: {} loadRoles: {}
services: {} services: {}
@ -203,20 +197,15 @@ proxy:
chp: chp:
revisionHistoryLimit: revisionHistoryLimit:
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image: image:
name: quay.io/jupyterhub/configurable-http-proxy name: quay.io/jupyterhub/configurable-http-proxy
# tag is automatically bumped to new patch versions by the # tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow. # watch-dependencies.yaml workflow.
# #
tag: "4.6.2" # https://github.com/jupyterhub/configurable-http-proxy/tags tag: "4.6.1" # https://github.com/jupyterhub/configurable-http-proxy/tags
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
extraCommandLineFlags: [] extraCommandLineFlags: []
@ -261,20 +250,15 @@ proxy:
traefik: traefik:
revisionHistoryLimit: revisionHistoryLimit:
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image: image:
name: traefik name: traefik
# tag is automatically bumped to new patch versions by the # tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow. # watch-dependencies.yaml workflow.
# #
tag: "v3.2.0" # ref: https://hub.docker.com/_/traefik?tab=tags tag: "v2.10.5" # ref: https://hub.docker.com/_/traefik?tab=tags
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
hsts: hsts:
@ -316,17 +300,12 @@ proxy:
extraPodSpec: {} extraPodSpec: {}
secretSync: secretSync:
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image: image:
name: quay.io/jupyterhub/k8s-secret-sync name: quay.io/jupyterhub/k8s-secret-sync
tag: "4.0.0" tag: "3.2.1"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
resources: {} resources: {}
@ -366,7 +345,7 @@ singleuser:
networkTools: networkTools:
image: image:
name: quay.io/jupyterhub/k8s-network-tools name: quay.io/jupyterhub/k8s-network-tools
tag: "4.0.0" tag: "3.2.1"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
resources: {} resources: {}
@ -413,13 +392,12 @@ singleuser:
homeMountPath: /home/jovyan homeMountPath: /home/jovyan
dynamic: dynamic:
storageClass: storageClass:
pvcNameTemplate: pvcNameTemplate: claim-{username}{servername}
volumeNameTemplate: volume-{user_server} volumeNameTemplate: volume-{username}{servername}
storageAccessModes: [ReadWriteOnce] storageAccessModes: [ReadWriteOnce]
subPath:
image: image:
name: quay.io/jupyterhub/k8s-singleuser-sample name: quay.io/jupyterhub/k8s-singleuser-sample
tag: "4.0.0" tag: "3.2.1"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
startTimeout: 300 startTimeout: 300
@ -454,24 +432,19 @@ scheduling:
# #
plugins: plugins:
score: score:
# We make use of the default scoring plugins, but we re-enable some with # These scoring plugins are enabled by default according to
# a new priority, leave some enabled with their lower default priority, # https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins
# and disable some. # 2022-02-22.
# #
# Below are the default scoring plugins as of 2024-09-23 according to # Enabled with high priority:
# https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins.
#
# Re-enabled with high priority:
# - NodeAffinity # - NodeAffinity
# - InterPodAffinity # - InterPodAffinity
# - NodeResourcesFit # - NodeResourcesFit
# - ImageLocality # - ImageLocality
#
# Remains enabled with low default priority: # Remains enabled with low default priority:
# - TaintToleration # - TaintToleration
# - PodTopologySpread # - PodTopologySpread
# - VolumeBinding # - VolumeBinding
#
# Disabled for scoring: # Disabled for scoring:
# - NodeResourcesBalancedAllocation # - NodeResourcesBalancedAllocation
# #
@ -500,25 +473,20 @@ scheduling:
- name: NodeResourcesFit - name: NodeResourcesFit
args: args:
scoringStrategy: scoringStrategy:
type: MostAllocated
resources: resources:
- name: cpu - name: cpu
weight: 1 weight: 1
- name: memory - name: memory
weight: 1 weight: 1
type: MostAllocated
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image: image:
# IMPORTANT: Bumping the minor version of this binary should go hand in # IMPORTANT: Bumping the minor version of this binary should go hand in
# hand with an inspection of the user-scheduelr's RBAC # hand with an inspection of the user-scheduelrs RBAC resources
# resources that we have forked in # that we have forked in
# templates/scheduling/user-scheduler/rbac.yaml. # templates/scheduling/user-scheduler/rbac.yaml.
# #
# Debugging advice: # Debugging advice:
@ -551,7 +519,7 @@ scheduling:
# here. We aim to stay around 1 minor version behind the latest k8s # here. We aim to stay around 1 minor version behind the latest k8s
# version. # version.
# #
tag: "v1.30.6" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG tag: "v1.26.11" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
nodeSelector: {} nodeSelector: {}
@ -583,7 +551,7 @@ scheduling:
# #
# If you update this, also update prePuller.pause.image.tag # If you update this, also update prePuller.pause.image.tag
# #
tag: "3.10" tag: "3.9"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
revisionHistoryLimit: revisionHistoryLimit:
@ -591,14 +559,9 @@ scheduling:
labels: {} labels: {}
annotations: {} annotations: {}
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
resources: {} resources: {}
corePods: corePods:
tolerations: tolerations:
@ -632,14 +595,9 @@ prePuller:
annotations: {} annotations: {}
resources: {} resources: {}
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
extraTolerations: [] extraTolerations: []
# hook relates to the hook-image-awaiter Job and hook-image-puller DaemonSet # hook relates to the hook-image-awaiter Job and hook-image-puller DaemonSet
hook: hook:
@ -648,18 +606,13 @@ prePuller:
# image and the configuration below relates to the hook-image-awaiter Job # image and the configuration below relates to the hook-image-awaiter Job
image: image:
name: quay.io/jupyterhub/k8s-image-awaiter name: quay.io/jupyterhub/k8s-image-awaiter
tag: "4.0.0" tag: "3.2.1"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
podSchedulingWaitDuration: 10 podSchedulingWaitDuration: 10
nodeSelector: {} nodeSelector: {}
tolerations: [] tolerations: []
@ -674,14 +627,9 @@ prePuller:
extraImages: {} extraImages: {}
pause: pause:
containerSecurityContext: containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image: image:
name: registry.k8s.io/pause name: registry.k8s.io/pause
# tag is automatically bumped to new patch versions by the # tag is automatically bumped to new patch versions by the
@ -689,7 +637,7 @@ prePuller:
# #
# If you update this, also update scheduling.userPlaceholder.image.tag # If you update this, also update scheduling.userPlaceholder.image.tag
# #
tag: "3.10" tag: "3.9"
pullPolicy: pullPolicy:
pullSecrets: [] pullSecrets: []
@ -701,7 +649,6 @@ ingress:
pathSuffix: pathSuffix:
pathType: Prefix pathType: Prefix
tls: [] tls: []
extraPaths: []
# cull relates to the jupyterhub-idle-culler service, responsible for evicting # cull relates to the jupyterhub-idle-culler service, responsible for evicting
# inactive singleuser pods. # inactive singleuser pods.

View file

@ -1,15 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: drive@sunet.se
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View file

@ -4,6 +4,7 @@ kind: Ingress
metadata: metadata:
name: jupyterhub-ingress name: jupyterhub-ingress
annotations: annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true" traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:

View file

@ -1,4 +1,4 @@
--- ---
resources: [jupyterhub-ingress.yml, jupyterhub-service.yml, jupyterhub-cert-issuer.yaml] resources: [jupyterhub-ingress.yml, jupyterhub-service.yml]
apiVersion: kustomize.config.k8s.io/v1beta1 apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization

View file

@ -1,28 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jupyterhub-ingress
annotations:
kubernetes.io/ingress.class: nginx
spec:
defaultBackend:
service:
name: proxy-public
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- vr-jupyter.drive.sunet.se
secretName: prod-tls-secret
rules:
- host: vr-jupyter.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: proxy-public
port:
number: 80

View file

@ -1,24 +0,0 @@
---
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
labels:
app: jupyterhub-node
name: jupyterhub-node
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: jupyterhub-node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kind: List
metadata:
resourceVersion: ""
selfLink: ""

View file

@ -1,16 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base/]
helmCharts:
- includeCRDs: true
name: jupyterhub
releaseName: vr-jupyterhub
valuesFile: ./values/values.yaml
version: 3.2.1
namespace: vr-jupyterhub
helmGlobals:
chartHome: ../../../base/charts/
patches:
- path: jupyterhub-ingress.yml
- path: jupyterhub-service.yml

View file

@ -1,335 +0,0 @@
debug:
enabled: true
hub:
config:
Authenticator:
auto_login: true
enable_auth_state: true
JupyterHub:
tornado_settings:
headers: { 'Content-Security-Policy': "frame-ancestors *;" }
db:
pvc:
storageClassName: csi-sc-cinderplugin
extraConfig:
oauthCode: |
import time
import requests
from datetime import datetime
from oauthenticator.generic import GenericOAuthenticator
token_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/api/v1/token'
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def get_nextcloud_access_token(refresh_token):
client_id = os.environ['NEXTCLOUD_CLIENT_ID']
client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
code = refresh_token
data = {
'grant_type': 'refresh_token',
'code': code,
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret
}
response = requests.post(token_url, data=data)
if debug:
print(response.text)
return response.json()
def post_auth_hook(authenticator, handler, authentication):
user = authentication['auth_state']['oauth_user']['ocs']['data']['id']
auth_state = authentication['auth_state']
auth_state['token_expires'] = time.time() + auth_state['token_response']['expires_in']
authentication['auth_state'] = auth_state
return authentication
class NextcloudOAuthenticator(GenericOAuthenticator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_dict = {}
async def pre_spawn_start(self, user, spawner):
super().pre_spawn_start(user, spawner)
auth_state = await user.get_auth_state()
if not auth_state:
return
access_token = auth_state['access_token']
spawner.environment['NEXTCLOUD_ACCESS_TOKEN'] = access_token
async def refresh_user(self, user, handler=None):
auth_state = await user.get_auth_state()
if not auth_state:
if debug:
print(f'auth_state missing for {user}')
return False
access_token = auth_state['access_token']
refresh_token = auth_state['refresh_token']
token_response = auth_state['token_response']
now = time.time()
now_hr = datetime.fromtimestamp(now)
expires = auth_state['token_expires']
expires_hr = datetime.fromtimestamp(expires)
expires = 0
if debug:
print(f'auth_state for {user}: {auth_state}')
if now >= expires:
if debug:
print(f'Time is: {now_hr}, token expired: {expires_hr}')
print(f'Refreshing token for {user}')
try:
token_response = get_nextcloud_access_token(refresh_token)
auth_state['access_token'] = token_response['access_token']
auth_state['refresh_token'] = token_response['refresh_token']
auth_state['token_expires'] = now + token_response['expires_in']
auth_state['token_response'] = token_response
if debug:
print(f'Successfully refreshed token for {user.name}')
print(f'auth_state for {user.name}: {auth_state}')
return {'name': user.name, 'auth_state': auth_state}
except Exception as e:
if debug:
print(f'Failed to refresh token for {user}')
return False
return False
if debug:
print(f'Time is: {now_hr}, token expires: {expires_hr}')
return True
c.JupyterHub.authenticator_class = NextcloudOAuthenticator
c.NextcloudOAuthenticator.client_id = os.environ['NEXTCLOUD_CLIENT_ID']
c.NextcloudOAuthenticator.client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
c.NextcloudOAuthenticator.login_service = 'Sunet Drive'
c.NextcloudOAuthenticator.username_claim = lambda r: r.get('ocs', {}).get('data', {}).get('id')
c.NextcloudOAuthenticator.userdata_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/ocs/v2.php/cloud/user?format=json'
c.NextcloudOAuthenticator.authorize_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/authorize'
c.NextcloudOAuthenticator.token_url = token_url
c.NextcloudOAuthenticator.oauth_callback_url = 'https://' + os.environ['JUPYTER_HOST'] + '/hub/oauth_callback'
c.NextcloudOAuthenticator.allow_all = True
c.NextcloudOAuthenticator.refresh_pre_spawn = True
c.NextcloudOAuthenticator.enable_auth_state = True
c.NextcloudOAuthenticator.auth_refresh_age = 3600
c.NextcloudOAuthenticator.post_auth_hook = post_auth_hook
serviceCode: |
import sys
c.JupyterHub.load_roles = [
{
"name": "refresh-token",
"services": [
"refresh-token"
],
"scopes": [
"read:users",
"admin:auth_state"
]
},
{
"name": "user",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"self",
],
},
{
"name": "server",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"inherit",
],
}
]
c.JupyterHub.services = [
{
'name': 'refresh-token',
'url': 'http://' + os.environ.get('HUB_SERVICE_HOST', 'hub') + ':' + os.environ.get('HUB_SERVICE_PORT_REFRESH_TOKEN', '8082'),
'display': False,
'oauth_no_confirm': True,
'api_token': os.environ['JUPYTERHUB_API_KEY'],
'command': [sys.executable, '/usr/local/etc/jupyterhub/refresh-token.py']
}
]
c.JupyterHub.admin_users = {"refresh-token"}
c.JupyterHub.api_tokens = {
os.environ['JUPYTERHUB_API_KEY']: "refresh-token",
}
extraFiles:
refresh-token.py:
mountPath: /usr/local/etc/jupyterhub/refresh-token.py
stringData: |
"""A token refresh service authenticating with the Hub.
This service serves `/services/refresh-token/`,
authenticated with the Hub,
showing the user their own info.
"""
import json
import os
import requests
import socket
from jupyterhub.services.auth import HubAuthenticated
from jupyterhub.utils import url_path_join
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, HTTPError, RequestHandler, authenticated
from urllib.parse import urlparse
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def my_debug(s):
if debug:
with open("/proc/1/fd/1", "a") as stdout:
print(s, file=stdout)
class RefreshHandler(HubAuthenticated, RequestHandler):
def api_request(self, method, url, **kwargs):
my_debug(f'{self.hub_auth}')
url = url_path_join(self.hub_auth.api_url, url)
allow_404 = kwargs.pop('allow_404', False)
headers = kwargs.setdefault('headers', {})
headers.setdefault('Authorization', f'token {self.hub_auth.api_token}')
try:
r = requests.request(method, url, **kwargs)
except requests.ConnectionError as e:
my_debug(f'Error connecting to {url}: {e}')
msg = f'Failed to connect to Hub API at {url}.'
msg += f' Is the Hub accessible at this URL (from host: {socket.gethostname()})?'
if '127.0.0.1' in url:
msg += ' Make sure to set c.JupyterHub.hub_ip to an IP accessible to' + \
' single-user servers if the servers are not on the same host as the Hub.'
raise HTTPError(500, msg)
data = None
if r.status_code == 404 and allow_404:
pass
elif r.status_code == 403:
my_debug(
'Lacking permission to check authorization with JupyterHub,' +
f' my auth token may have expired: [{r.status_code}] {r.reason}'
)
my_debug(r.text)
raise HTTPError(
500,
'Permission failure checking authorization, I may need a new token'
)
elif r.status_code >= 500:
my_debug(f'Upstream failure verifying auth token: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(
502, 'Failed to check authorization (upstream problem)')
elif r.status_code >= 400:
my_debug(f'Failed to check authorization: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(500, 'Failed to check authorization')
else:
data = r.json()
return data
@authenticated
def get(self):
user_model = self.get_current_user()
# Fetch current auth state
user_data = self.api_request('GET', url_path_join('users', user_model['name']))
auth_state = user_data['auth_state']
access_token = auth_state['access_token']
token_expires = auth_state['token_expires']
self.set_header('content-type', 'application/json')
self.write(json.dumps({'access_token': access_token, 'token_expires': token_expires}, indent=1, sort_keys=True))
class PingHandler(RequestHandler):
def get(self):
my_debug(f"DEBUG: In ping get")
self.set_header('content-type', 'application/json')
self.write(json.dumps({'ping': 1}))
def main():
app = Application([
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + 'tokens', RefreshHandler),
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + '/?', PingHandler),
])
http_server = HTTPServer(app)
url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL'])
http_server.listen(url.port)
IOLoop.current().start()
if __name__ == '__main__':
main()
networkPolicy:
ingress:
- ports:
- port: 8082
from:
- podSelector:
matchLabels:
hub.jupyter.org/network-access-hub: "true"
service:
extraPorts:
- port: 8082
targetPort: 8082
name: refresh-token
extraEnv:
NEXTCLOUD_DEBUG_OAUTH: "no"
NEXTCLOUD_HOST: vr.drive.sunet.se
JUPYTER_HOST: vr-jupyter.drive.sunet.se
JUPYTERHUB_API_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: api-key
JUPYTERHUB_CRYPT_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: crypt-key
NEXTCLOUD_CLIENT_ID:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-id
NEXTCLOUD_CLIENT_SECRET:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-secret
proxy:
chp:
networkPolicy:
egress:
- to:
- podSelector:
matchLabels:
app: jupyterhub
component: hub
ports:
- port: 8082
singleuser:
image:
name: docker.sunet.se/drive/jupyter-custom
tag: lab-4.0.10-sunet4
storage:
dynamic:
storageClass: csi-sc-cinderplugin
extraEnv:
JUPYTER_ENABLE_LAB: "yes"
JUPYTER_HOST: vr-jupyter.drive.sunet.se
NEXTCLOUD_HOST: vr.drive.sunet.se
extraFiles:
jupyter_notebook_config:
mountPath: /home/jovyan/.jupyter/jupyter_server_config.py
stringData: |
import os
c = get_config()
c.NotebookApp.allow_origin = '*'
c.NotebookApp.tornado_settings = {
'headers': { 'Content-Security-Policy': "frame-ancestors *;" }
}
os.system('/usr/local/bin/nc-sync')
mode: 0644

View file

@ -1,32 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jupyterhub-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
kubernetes.io/ingress.class: nginx
spec:
ingressClassName: nginx
defaultBackend:
service:
name: proxy-public
port:
number: 80
tls:
- hosts:
- sunet-jupyter.drive.sunet.se
secretName: tls-secret
rules:
- host: sunet-jupyter.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: proxy-public
port:
number: 80

View file

@ -1,24 +0,0 @@
---
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
labels:
app: jupyterhub-node
name: jupyterhub-node
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: jupyterhub-node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kind: List
metadata:
resourceVersion: ""
selfLink: ""

View file

@ -1,16 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base/]
helmCharts:
- includeCRDs: true
name: jupyterhub
releaseName: sunet-jupyterhub
valuesFile: ./values/values.yaml
version: 3.2.1
namespace: sunet-jupyterhub
helmGlobals:
chartHome: ../../../base/charts/
patches:
- path: jupyterhub-ingress.yml
- path: jupyterhub-service.yml

View file

@ -1,337 +0,0 @@
debug:
enabled: true
hub:
config:
Authenticator:
auto_login: true
enable_auth_state: true
JupyterHub:
tornado_settings:
headers: { 'Content-Security-Policy': "frame-ancestors *;" }
db:
pvc:
storageClassName: csi-sc-cinderplugin
extraConfig:
oauthCode: |
import time
import requests
from datetime import datetime
from oauthenticator.generic import GenericOAuthenticator
token_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/api/v1/token'
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def get_nextcloud_access_token(refresh_token):
client_id = os.environ['NEXTCLOUD_CLIENT_ID']
client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
code = refresh_token
data = {
'grant_type': 'refresh_token',
'code': code,
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret
}
response = requests.post(token_url, data=data)
if debug:
print(response.text)
return response.json()
def post_auth_hook(authenticator, handler, authentication):
user = authentication['auth_state']['oauth_user']['ocs']['data']['id']
auth_state = authentication['auth_state']
auth_state['token_expires'] = time.time() + auth_state['token_response']['expires_in']
authentication['auth_state'] = auth_state
return authentication
class NextcloudOAuthenticator(GenericOAuthenticator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_dict = {}
async def pre_spawn_start(self, user, spawner):
super().pre_spawn_start(user, spawner)
auth_state = await user.get_auth_state()
if not auth_state:
return
access_token = auth_state['access_token']
spawner.environment['NEXTCLOUD_ACCESS_TOKEN'] = access_token
async def refresh_user(self, user, handler=None):
auth_state = await user.get_auth_state()
if not auth_state:
if debug:
print(f'auth_state missing for {user}')
return False
access_token = auth_state['access_token']
refresh_token = auth_state['refresh_token']
token_response = auth_state['token_response']
now = time.time()
now_hr = datetime.fromtimestamp(now)
expires = auth_state['token_expires']
expires_hr = datetime.fromtimestamp(expires)
expires = 0
if debug:
print(f'auth_state for {user}: {auth_state}')
if now >= expires:
if debug:
print(f'Time is: {now_hr}, token expired: {expires_hr}')
print(f'Refreshing token for {user}')
try:
token_response = get_nextcloud_access_token(refresh_token)
auth_state['access_token'] = token_response['access_token']
auth_state['refresh_token'] = token_response['refresh_token']
auth_state['token_expires'] = now + token_response['expires_in']
auth_state['token_response'] = token_response
if debug:
print(f'Successfully refreshed token for {user.name}')
print(f'auth_state for {user.name}: {auth_state}')
return {'name': user.name, 'auth_state': auth_state}
except Exception as e:
if debug:
print(f'Failed to refresh token for {user}')
return False
return False
if debug:
print(f'Time is: {now_hr}, token expires: {expires_hr}')
return True
c.JupyterHub.authenticator_class = NextcloudOAuthenticator
c.NextcloudOAuthenticator.client_id = os.environ['NEXTCLOUD_CLIENT_ID']
c.NextcloudOAuthenticator.client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
c.NextcloudOAuthenticator.login_service = 'Sunet Drive'
c.NextcloudOAuthenticator.username_claim = lambda r: r.get('ocs', {}).get('data', {}).get('id')
c.NextcloudOAuthenticator.userdata_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/ocs/v2.php/cloud/user?format=json'
c.NextcloudOAuthenticator.authorize_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/authorize'
c.NextcloudOAuthenticator.token_url = token_url
c.NextcloudOAuthenticator.oauth_callback_url = 'https://' + os.environ['JUPYTER_HOST'] + '/hub/oauth_callback'
c.NextcloudOAuthenticator.allow_all = True
c.NextcloudOAuthenticator.refresh_pre_spawn = True
c.NextcloudOAuthenticator.enable_auth_state = True
c.NextcloudOAuthenticator.auth_refresh_age = 3600
c.NextcloudOAuthenticator.post_auth_hook = post_auth_hook
serviceCode: |
import sys
c.JupyterHub.load_roles = [
{
"name": "refresh-token",
"services": [
"refresh-token"
],
"scopes": [
"read:users",
"admin:auth_state"
]
},
{
"name": "user",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"self",
],
},
{
"name": "server",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"inherit",
],
}
]
c.JupyterHub.services = [
{
'name': 'refresh-token',
'url': 'http://' + os.environ.get('HUB_SERVICE_HOST', 'hub') + ':' + os.environ.get('HUB_SERVICE_PORT_REFRESH_TOKEN', '8082'),
'display': False,
'oauth_no_confirm': True,
'api_token': os.environ['JUPYTERHUB_API_KEY'],
'command': [sys.executable, '/usr/local/etc/jupyterhub/refresh-token.py']
}
]
c.JupyterHub.admin_users = {"refresh-token"}
c.JupyterHub.api_tokens = {
os.environ['JUPYTERHUB_API_KEY']: "refresh-token",
}
extraFiles:
refresh-token.py:
mountPath: /usr/local/etc/jupyterhub/refresh-token.py
stringData: |
"""A token refresh service authenticating with the Hub.
This service serves `/services/refresh-token/`,
authenticated with the Hub,
showing the user their own info.
"""
import json
import os
import requests
import socket
from jupyterhub.services.auth import HubAuthenticated
from jupyterhub.utils import url_path_join
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, HTTPError, RequestHandler, authenticated
from urllib.parse import urlparse
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def my_debug(s):
if debug:
with open("/proc/1/fd/1", "a") as stdout:
print(s, file=stdout)
class RefreshHandler(HubAuthenticated, RequestHandler):
def api_request(self, method, url, **kwargs):
my_debug(f'{self.hub_auth}')
url = url_path_join(self.hub_auth.api_url, url)
allow_404 = kwargs.pop('allow_404', False)
headers = kwargs.setdefault('headers', {})
headers.setdefault('Authorization', f'token {self.hub_auth.api_token}')
try:
r = requests.request(method, url, **kwargs)
except requests.ConnectionError as e:
my_debug(f'Error connecting to {url}: {e}')
msg = f'Failed to connect to Hub API at {url}.'
msg += f' Is the Hub accessible at this URL (from host: {socket.gethostname()})?'
if '127.0.0.1' in url:
msg += ' Make sure to set c.JupyterHub.hub_ip to an IP accessible to' + \
' single-user servers if the servers are not on the same host as the Hub.'
raise HTTPError(500, msg)
data = None
if r.status_code == 404 and allow_404:
pass
elif r.status_code == 403:
my_debug(
'Lacking permission to check authorization with JupyterHub,' +
f' my auth token may have expired: [{r.status_code}] {r.reason}'
)
my_debug(r.text)
raise HTTPError(
500,
'Permission failure checking authorization, I may need a new token'
)
elif r.status_code >= 500:
my_debug(f'Upstream failure verifying auth token: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(
502, 'Failed to check authorization (upstream problem)')
elif r.status_code >= 400:
my_debug(f'Failed to check authorization: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(500, 'Failed to check authorization')
else:
data = r.json()
return data
@authenticated
def get(self):
user_model = self.get_current_user()
# Fetch current auth state
user_data = self.api_request('GET', url_path_join('users', user_model['name']))
auth_state = user_data['auth_state']
access_token = auth_state['access_token']
token_expires = auth_state['token_expires']
self.set_header('content-type', 'application/json')
self.write(json.dumps({'access_token': access_token, 'token_expires': token_expires}, indent=1, sort_keys=True))
class PingHandler(RequestHandler):
def get(self):
my_debug(f"DEBUG: In ping get")
self.set_header('content-type', 'application/json')
self.write(json.dumps({'ping': 1}))
def main():
app = Application([
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + 'tokens', RefreshHandler),
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + '/?', PingHandler),
])
http_server = HTTPServer(app)
url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL'])
http_server.listen(url.port)
IOLoop.current().start()
if __name__ == '__main__':
main()
networkPolicy:
ingress:
- ports:
- port: 8082
from:
- podSelector:
matchLabels:
hub.jupyter.org/network-access-hub: "true"
service:
extraPorts:
- port: 8082
targetPort: 8082
name: refresh-token
extraEnv:
NEXTCLOUD_DEBUG_OAUTH: "no"
NEXTCLOUD_HOST: sunet.drive.sunet.se
JUPYTER_HOST: sunet-jupyter.drive.sunet.se
JUPYTERHUB_API_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: api-key
JUPYTERHUB_CRYPT_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: crypt-key
NEXTCLOUD_CLIENT_ID:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-id
NEXTCLOUD_CLIENT_SECRET:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-secret
networkPolicy:
enabled: false
proxy:
chp:
networkPolicy:
egress:
- to:
- podSelector:
matchLabels:
app: jupyterhub
component: hub
ports:
- port: 8082
singleuser:
image:
name: docker.sunet.se/drive/jupyter-custom
tag: lab-4.0.10-sunet5
storage:
dynamic:
storageClass: csi-sc-cinderplugin
extraEnv:
JUPYTER_ENABLE_LAB: "yes"
JUPYTER_HOST: sunet-jupyter.drive.sunet.se
NEXTCLOUD_HOST: sunet.drive.sunet.se
extraFiles:
jupyter_notebook_config:
mountPath: /home/jovyan/.jupyter/jupyter_server_config.py
stringData: |
import os
c = get_config()
c.NotebookApp.allow_origin = '*'
c.NotebookApp.tornado_settings = {
'headers': { 'Content-Security-Policy': "frame-ancestors *;" }
}
os.system('/usr/local/bin/nc-sync')
mode: 0644

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jupyterhub-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: proxy-public
port:
number: 8443
tls:
- hosts:
- vr-jupyter.drive.sunet.se
secretName: tls-secret
ingressClassName: nginx
rules:
- host: vr-jupyter.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: proxy-public
port:
number: 80

View file

@ -1,25 +0,0 @@
---
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
labels:
app: jupyterhub-node
name: jupyterhub-node
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: jupyterhub-node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kind: List
metadata:
resourceVersion: ""
selfLink: ""

View file

@ -1,16 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base/]
helmCharts:
- includeCRDs: true
name: jupyterhub
releaseName: vr-jupyterhub
valuesFile: ./values/values.yaml
version: 4.0.0
namespace: vr-jupyterhub
helmGlobals:
chartHome: ../../../base/charts/
patches:
- path: jupyterhub-ingress.yml
- path: jupyterhub-service.yml

View file

@ -1,337 +0,0 @@
debug:
enabled: true
hub:
config:
Authenticator:
auto_login: true
enable_auth_state: true
JupyterHub:
tornado_settings:
headers: { 'Content-Security-Policy': "frame-ancestors *;" }
db:
pvc:
storageClassName: csi-sc-cinderplugin
extraConfig:
oauthCode: |
import time
import requests
from datetime import datetime
from oauthenticator.generic import GenericOAuthenticator
token_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/api/v1/token'
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def get_nextcloud_access_token(refresh_token):
client_id = os.environ['NEXTCLOUD_CLIENT_ID']
client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
code = refresh_token
data = {
'grant_type': 'refresh_token',
'code': code,
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret
}
response = requests.post(token_url, data=data)
if debug:
print(response.text)
return response.json()
def post_auth_hook(authenticator, handler, authentication):
user = authentication['auth_state']['oauth_user']['ocs']['data']['id']
auth_state = authentication['auth_state']
auth_state['token_expires'] = time.time() + auth_state['token_response']['expires_in']
authentication['auth_state'] = auth_state
return authentication
class NextcloudOAuthenticator(GenericOAuthenticator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_dict = {}
async def pre_spawn_start(self, user, spawner):
super().pre_spawn_start(user, spawner)
auth_state = await user.get_auth_state()
if not auth_state:
return
access_token = auth_state['access_token']
spawner.environment['NEXTCLOUD_ACCESS_TOKEN'] = access_token
async def refresh_user(self, user, handler=None):
auth_state = await user.get_auth_state()
if not auth_state:
if debug:
print(f'auth_state missing for {user}')
return False
access_token = auth_state['access_token']
refresh_token = auth_state['refresh_token']
token_response = auth_state['token_response']
now = time.time()
now_hr = datetime.fromtimestamp(now)
expires = auth_state['token_expires']
expires_hr = datetime.fromtimestamp(expires)
expires = 0
if debug:
print(f'auth_state for {user}: {auth_state}')
if now >= expires:
if debug:
print(f'Time is: {now_hr}, token expired: {expires_hr}')
print(f'Refreshing token for {user}')
try:
token_response = get_nextcloud_access_token(refresh_token)
auth_state['access_token'] = token_response['access_token']
auth_state['refresh_token'] = token_response['refresh_token']
auth_state['token_expires'] = now + token_response['expires_in']
auth_state['token_response'] = token_response
if debug:
print(f'Successfully refreshed token for {user.name}')
print(f'auth_state for {user.name}: {auth_state}')
return {'name': user.name, 'auth_state': auth_state}
except Exception as e:
if debug:
print(f'Failed to refresh token for {user}')
return False
return False
if debug:
print(f'Time is: {now_hr}, token expires: {expires_hr}')
return True
c.JupyterHub.authenticator_class = NextcloudOAuthenticator
c.NextcloudOAuthenticator.client_id = os.environ['NEXTCLOUD_CLIENT_ID']
c.NextcloudOAuthenticator.client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
c.NextcloudOAuthenticator.login_service = 'Sunet Drive'
c.NextcloudOAuthenticator.username_claim = lambda r: r.get('ocs', {}).get('data', {}).get('id')
c.NextcloudOAuthenticator.userdata_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/ocs/v2.php/cloud/user?format=json'
c.NextcloudOAuthenticator.authorize_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/authorize'
c.NextcloudOAuthenticator.token_url = token_url
c.NextcloudOAuthenticator.oauth_callback_url = 'https://' + os.environ['JUPYTER_HOST'] + '/hub/oauth_callback'
c.NextcloudOAuthenticator.allow_all = True
c.NextcloudOAuthenticator.refresh_pre_spawn = True
c.NextcloudOAuthenticator.enable_auth_state = True
c.NextcloudOAuthenticator.auth_refresh_age = 3600
c.NextcloudOAuthenticator.post_auth_hook = post_auth_hook
serviceCode: |
import sys
c.JupyterHub.load_roles = [
{
"name": "refresh-token",
"services": [
"refresh-token"
],
"scopes": [
"read:users",
"admin:auth_state"
]
},
{
"name": "user",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"self",
],
},
{
"name": "server",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"inherit",
],
}
]
c.JupyterHub.services = [
{
'name': 'refresh-token',
'url': 'http://' + os.environ.get('HUB_SERVICE_HOST', 'hub') + ':' + os.environ.get('HUB_SERVICE_PORT_REFRESH_TOKEN', '8082'),
'display': False,
'oauth_no_confirm': True,
'api_token': os.environ['JUPYTERHUB_API_KEY'],
'command': [sys.executable, '/usr/local/etc/jupyterhub/refresh-token.py']
}
]
c.JupyterHub.admin_users = {"refresh-token"}
c.JupyterHub.api_tokens = {
os.environ['JUPYTERHUB_API_KEY']: "refresh-token",
}
extraFiles:
refresh-token.py:
mountPath: /usr/local/etc/jupyterhub/refresh-token.py
stringData: |
"""A token refresh service authenticating with the Hub.
This service serves `/services/refresh-token/`,
authenticated with the Hub,
showing the user their own info.
"""
import json
import os
import requests
import socket
from jupyterhub.services.auth import HubAuthenticated
from jupyterhub.utils import url_path_join
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, HTTPError, RequestHandler, authenticated
from urllib.parse import urlparse
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def my_debug(s):
if debug:
with open("/proc/1/fd/1", "a") as stdout:
print(s, file=stdout)
class RefreshHandler(HubAuthenticated, RequestHandler):
def api_request(self, method, url, **kwargs):
my_debug(f'{self.hub_auth}')
url = url_path_join(self.hub_auth.api_url, url)
allow_404 = kwargs.pop('allow_404', False)
headers = kwargs.setdefault('headers', {})
headers.setdefault('Authorization', f'token {self.hub_auth.api_token}')
try:
r = requests.request(method, url, **kwargs)
except requests.ConnectionError as e:
my_debug(f'Error connecting to {url}: {e}')
msg = f'Failed to connect to Hub API at {url}.'
msg += f' Is the Hub accessible at this URL (from host: {socket.gethostname()})?'
if '127.0.0.1' in url:
msg += ' Make sure to set c.JupyterHub.hub_ip to an IP accessible to' + \
' single-user servers if the servers are not on the same host as the Hub.'
raise HTTPError(500, msg)
data = None
if r.status_code == 404 and allow_404:
pass
elif r.status_code == 403:
my_debug(
'Lacking permission to check authorization with JupyterHub,' +
f' my auth token may have expired: [{r.status_code}] {r.reason}'
)
my_debug(r.text)
raise HTTPError(
500,
'Permission failure checking authorization, I may need a new token'
)
elif r.status_code >= 500:
my_debug(f'Upstream failure verifying auth token: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(
502, 'Failed to check authorization (upstream problem)')
elif r.status_code >= 400:
my_debug(f'Failed to check authorization: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(500, 'Failed to check authorization')
else:
data = r.json()
return data
@authenticated
def get(self):
user_model = self.get_current_user()
# Fetch current auth state
user_data = self.api_request('GET', url_path_join('users', user_model['name']))
auth_state = user_data['auth_state']
access_token = auth_state['access_token']
token_expires = auth_state['token_expires']
self.set_header('content-type', 'application/json')
self.write(json.dumps({'access_token': access_token, 'token_expires': token_expires}, indent=1, sort_keys=True))
class PingHandler(RequestHandler):
def get(self):
my_debug(f"DEBUG: In ping get")
self.set_header('content-type', 'application/json')
self.write(json.dumps({'ping': 1}))
def main():
app = Application([
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + 'tokens', RefreshHandler),
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + '/?', PingHandler),
])
http_server = HTTPServer(app)
url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL'])
http_server.listen(url.port)
IOLoop.current().start()
if __name__ == '__main__':
main()
networkPolicy:
ingress:
- ports:
- port: 8082
from:
- podSelector:
matchLabels:
hub.jupyter.org/network-access-hub: "true"
service:
extraPorts:
- port: 8082
targetPort: 8082
name: refresh-token
extraEnv:
NEXTCLOUD_DEBUG_OAUTH: "no"
NEXTCLOUD_HOST: vr.drive.sunet.se
JUPYTER_HOST: vr-jupyter.drive.sunet.se
JUPYTERHUB_API_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: api-key
JUPYTERHUB_CRYPT_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: crypt-key
NEXTCLOUD_CLIENT_ID:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-id
NEXTCLOUD_CLIENT_SECRET:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-secret
networkPolicy:
enabled: false
proxy:
chp:
networkPolicy:
egress:
- to:
- podSelector:
matchLabels:
app: jupyterhub
component: hub
ports:
- port: 8082
singleuser:
image:
name: docker.sunet.se/drive/jupyter-custom
tag: lab-4.0.10-sunet4
storage:
dynamic:
storageClass: csi-sc-cinderplugin
extraEnv:
JUPYTER_ENABLE_LAB: "yes"
JUPYTER_HOST: vr-jupyter.drive.sunet.se
NEXTCLOUD_HOST: vr.drive.sunet.se
extraFiles:
jupyter_notebook_config:
mountPath: /home/jovyan/.jupyter/jupyter_server_config.py
stringData: |
import os
c = get_config()
c.NotebookApp.allow_origin = '*'
c.NotebookApp.tornado_settings = {
'headers': { 'Content-Security-Policy': "frame-ancestors *;" }
}
os.system('/usr/local/bin/nc-sync')
mode: 0644

View file

@ -4,16 +4,15 @@ kind: Ingress
metadata: metadata:
name: jupyterhub-ingress name: jupyterhub-ingress
annotations: annotations:
kubernetes.io/ingress.class: nginx kubernetes.io/ingress.class: traefik
cert-manager.io/issuer: "letsencrypt" traefik.ingress.kubernetes.io/router.entrypoints: websecure
acme.cert-manager.io/http01-edit-in-place: "true" traefik.ingress.kubernetes.io/router.tls: "true"
spec: spec:
ingressClassName: nginx
defaultBackend: defaultBackend:
service: service:
name: proxy-public name: proxy-public
port: port:
number: 80 number: 8443
tls: tls:
- hosts: - hosts:
- sunet-jupyter.drive.test.sunet.se - sunet-jupyter.drive.test.sunet.se

View file

@ -7,7 +7,7 @@ helmCharts:
name: jupyterhub name: jupyterhub
releaseName: sunet-jupyterhub releaseName: sunet-jupyterhub
valuesFile: ./values/values.yaml valuesFile: ./values/values.yaml
version: 4.0.0 version: 3.2.1
namespace: sunet-jupyterhub namespace: sunet-jupyterhub
helmGlobals: helmGlobals:
chartHome: ../../../base/charts/ chartHome: ../../../base/charts/

View file

@ -315,7 +315,7 @@ proxy:
singleuser: singleuser:
image: image:
name: docker.sunet.se/drive/jupyter-custom name: docker.sunet.se/drive/jupyter-custom
tag: lab-4.0.10-sunet5 tag: lab-4.0.10-sunet4
storage: storage:
dynamic: dynamic:
storageClass: csi-sc-cinderplugin storageClass: csi-sc-cinderplugin

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jupyterhub-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: proxy-public
port:
number: 8443
tls:
- hosts:
- vr-jupyter.drive.test.sunet.se
secretName: tls-secret
ingressClassName: nginx
rules:
- host: vr-jupyter.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: proxy-public
port:
number: 80

View file

@ -1,24 +0,0 @@
---
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
labels:
app: jupyterhub-node
name: jupyterhub-node
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: jupyterhub-node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kind: List
metadata:
resourceVersion: ""
selfLink: ""

View file

@ -1,16 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base/]
helmCharts:
- includeCRDs: true
name: jupyterhub
releaseName: vr-jupyterhub
valuesFile: ./values/values.yaml
version: 4.0.0
namespace: vr-jupyterhub
helmGlobals:
chartHome: ../../../base/charts/
patches:
- path: jupyterhub-ingress.yml
- path: jupyterhub-service.yml

View file

@ -1,337 +0,0 @@
debug:
enabled: true
hub:
config:
Authenticator:
auto_login: true
enable_auth_state: true
JupyterHub:
tornado_settings:
headers: { 'Content-Security-Policy': "frame-ancestors *;" }
db:
pvc:
storageClassName: csi-sc-cinderplugin
extraConfig:
oauthCode: |
import time
import requests
from datetime import datetime
from oauthenticator.generic import GenericOAuthenticator
token_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/api/v1/token'
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def get_nextcloud_access_token(refresh_token):
client_id = os.environ['NEXTCLOUD_CLIENT_ID']
client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
code = refresh_token
data = {
'grant_type': 'refresh_token',
'code': code,
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret
}
response = requests.post(token_url, data=data)
if debug:
print(response.text)
return response.json()
def post_auth_hook(authenticator, handler, authentication):
user = authentication['auth_state']['oauth_user']['ocs']['data']['id']
auth_state = authentication['auth_state']
auth_state['token_expires'] = time.time() + auth_state['token_response']['expires_in']
authentication['auth_state'] = auth_state
return authentication
class NextcloudOAuthenticator(GenericOAuthenticator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_dict = {}
async def pre_spawn_start(self, user, spawner):
super().pre_spawn_start(user, spawner)
auth_state = await user.get_auth_state()
if not auth_state:
return
access_token = auth_state['access_token']
spawner.environment['NEXTCLOUD_ACCESS_TOKEN'] = access_token
async def refresh_user(self, user, handler=None):
auth_state = await user.get_auth_state()
if not auth_state:
if debug:
print(f'auth_state missing for {user}')
return False
access_token = auth_state['access_token']
refresh_token = auth_state['refresh_token']
token_response = auth_state['token_response']
now = time.time()
now_hr = datetime.fromtimestamp(now)
expires = auth_state['token_expires']
expires_hr = datetime.fromtimestamp(expires)
expires = 0
if debug:
print(f'auth_state for {user}: {auth_state}')
if now >= expires:
if debug:
print(f'Time is: {now_hr}, token expired: {expires_hr}')
print(f'Refreshing token for {user}')
try:
token_response = get_nextcloud_access_token(refresh_token)
auth_state['access_token'] = token_response['access_token']
auth_state['refresh_token'] = token_response['refresh_token']
auth_state['token_expires'] = now + token_response['expires_in']
auth_state['token_response'] = token_response
if debug:
print(f'Successfully refreshed token for {user.name}')
print(f'auth_state for {user.name}: {auth_state}')
return {'name': user.name, 'auth_state': auth_state}
except Exception as e:
if debug:
print(f'Failed to refresh token for {user}')
return False
return False
if debug:
print(f'Time is: {now_hr}, token expires: {expires_hr}')
return True
c.JupyterHub.authenticator_class = NextcloudOAuthenticator
c.NextcloudOAuthenticator.client_id = os.environ['NEXTCLOUD_CLIENT_ID']
c.NextcloudOAuthenticator.client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
c.NextcloudOAuthenticator.login_service = 'Sunet Drive'
c.NextcloudOAuthenticator.username_claim = lambda r: r.get('ocs', {}).get('data', {}).get('id')
c.NextcloudOAuthenticator.userdata_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/ocs/v2.php/cloud/user?format=json'
c.NextcloudOAuthenticator.authorize_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/authorize'
c.NextcloudOAuthenticator.token_url = token_url
c.NextcloudOAuthenticator.oauth_callback_url = 'https://' + os.environ['JUPYTER_HOST'] + '/hub/oauth_callback'
c.NextcloudOAuthenticator.allow_all = True
c.NextcloudOAuthenticator.refresh_pre_spawn = True
c.NextcloudOAuthenticator.enable_auth_state = True
c.NextcloudOAuthenticator.auth_refresh_age = 3600
c.NextcloudOAuthenticator.post_auth_hook = post_auth_hook
serviceCode: |
import sys
c.JupyterHub.load_roles = [
{
"name": "refresh-token",
"services": [
"refresh-token"
],
"scopes": [
"read:users",
"admin:auth_state"
]
},
{
"name": "user",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"self",
],
},
{
"name": "server",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"inherit",
],
}
]
c.JupyterHub.services = [
{
'name': 'refresh-token',
'url': 'http://' + os.environ.get('HUB_SERVICE_HOST', 'hub') + ':' + os.environ.get('HUB_SERVICE_PORT_REFRESH_TOKEN', '8082'),
'display': False,
'oauth_no_confirm': True,
'api_token': os.environ['JUPYTERHUB_API_KEY'],
'command': [sys.executable, '/usr/local/etc/jupyterhub/refresh-token.py']
}
]
c.JupyterHub.admin_users = {"refresh-token"}
c.JupyterHub.api_tokens = {
os.environ['JUPYTERHUB_API_KEY']: "refresh-token",
}
extraFiles:
refresh-token.py:
mountPath: /usr/local/etc/jupyterhub/refresh-token.py
stringData: |
"""A token refresh service authenticating with the Hub.
This service serves `/services/refresh-token/`,
authenticated with the Hub,
showing the user their own info.
"""
import json
import os
import requests
import socket
from jupyterhub.services.auth import HubAuthenticated
from jupyterhub.utils import url_path_join
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, HTTPError, RequestHandler, authenticated
from urllib.parse import urlparse
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def my_debug(s):
if debug:
with open("/proc/1/fd/1", "a") as stdout:
print(s, file=stdout)
class RefreshHandler(HubAuthenticated, RequestHandler):
def api_request(self, method, url, **kwargs):
my_debug(f'{self.hub_auth}')
url = url_path_join(self.hub_auth.api_url, url)
allow_404 = kwargs.pop('allow_404', False)
headers = kwargs.setdefault('headers', {})
headers.setdefault('Authorization', f'token {self.hub_auth.api_token}')
try:
r = requests.request(method, url, **kwargs)
except requests.ConnectionError as e:
my_debug(f'Error connecting to {url}: {e}')
msg = f'Failed to connect to Hub API at {url}.'
msg += f' Is the Hub accessible at this URL (from host: {socket.gethostname()})?'
if '127.0.0.1' in url:
msg += ' Make sure to set c.JupyterHub.hub_ip to an IP accessible to' + \
' single-user servers if the servers are not on the same host as the Hub.'
raise HTTPError(500, msg)
data = None
if r.status_code == 404 and allow_404:
pass
elif r.status_code == 403:
my_debug(
'Lacking permission to check authorization with JupyterHub,' +
f' my auth token may have expired: [{r.status_code}] {r.reason}'
)
my_debug(r.text)
raise HTTPError(
500,
'Permission failure checking authorization, I may need a new token'
)
elif r.status_code >= 500:
my_debug(f'Upstream failure verifying auth token: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(
502, 'Failed to check authorization (upstream problem)')
elif r.status_code >= 400:
my_debug(f'Failed to check authorization: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(500, 'Failed to check authorization')
else:
data = r.json()
return data
@authenticated
def get(self):
user_model = self.get_current_user()
# Fetch current auth state
user_data = self.api_request('GET', url_path_join('users', user_model['name']))
auth_state = user_data['auth_state']
access_token = auth_state['access_token']
token_expires = auth_state['token_expires']
self.set_header('content-type', 'application/json')
self.write(json.dumps({'access_token': access_token, 'token_expires': token_expires}, indent=1, sort_keys=True))
class PingHandler(RequestHandler):
def get(self):
my_debug(f"DEBUG: In ping get")
self.set_header('content-type', 'application/json')
self.write(json.dumps({'ping': 1}))
def main():
app = Application([
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + 'tokens', RefreshHandler),
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + '/?', PingHandler),
])
http_server = HTTPServer(app)
url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL'])
http_server.listen(url.port)
IOLoop.current().start()
if __name__ == '__main__':
main()
networkPolicy:
ingress:
- ports:
- port: 8082
from:
- podSelector:
matchLabels:
hub.jupyter.org/network-access-hub: "true"
service:
extraPorts:
- port: 8082
targetPort: 8082
name: refresh-token
extraEnv:
NEXTCLOUD_DEBUG_OAUTH: "no"
NEXTCLOUD_HOST: vr.drive.test.sunet.se
JUPYTER_HOST: vr-jupyter.drive.test.sunet.se
JUPYTERHUB_API_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: api-key
JUPYTERHUB_CRYPT_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: crypt-key
NEXTCLOUD_CLIENT_ID:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-id
NEXTCLOUD_CLIENT_SECRET:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-secret
networkPolicy:
enabled: false
proxy:
chp:
networkPolicy:
egress:
- to:
- podSelector:
matchLabels:
app: jupyterhub
component: hub
ports:
- port: 8082
singleuser:
image:
name: docker.sunet.se/drive/jupyter-custom
tag: lab-4.0.10-sunet4
storage:
dynamic:
storageClass: csi-sc-cinderplugin
extraEnv:
JUPYTER_ENABLE_LAB: "yes"
JUPYTER_HOST: vr-jupyter.drive.test.sunet.se
NEXTCLOUD_HOST: vr.drive.test.sunet.se
extraFiles:
jupyter_notebook_config:
mountPath: /home/jovyan/.jupyter/jupyter_server_config.py
stringData: |
import os
c = get_config()
c.NotebookApp.allow_origin = '*'
c.NotebookApp.tornado_settings = {
'headers': { 'Content-Security-Policy': "frame-ancestors *;" }
}
os.system('/usr/local/bin/nc-sync')
mode: 0644

View file

@ -1,8 +0,0 @@
resources:
- portal-cert-manager.yml
- portal-deployment.yml
- portal-ingress.yml
- portal-namespace.yml
- portal-service.yml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View file

@ -1,15 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: drive@sunet.se
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View file

@ -1,30 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: portal-node
namespace: portal
creationTimestamp:
labels:
app: portal-node
spec:
replicas: 3
selector:
matchLabels:
app: portal-node
template:
metadata:
creationTimestamp:
labels:
app: portal-node
spec:
containers:
- name: portal
image: docker.sunet.se/drive/portal:0.1.2-1
imagePullPolicy: Always
resources: {}
env:
- name: DRIVE_DOMAIN
value: "drive.test.sunet.se"
strategy: {}
status: {}

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: portal-ingress
namespace: portal
annotations:
kubernetes.io/ingress.class: nginx
spec:
defaultBackend:
service:
name: portal-node
port:
number: 8080
tls:
- hosts:
- portal.drive.test.sunet.se
secretName: tls-secret
ingressClassName: nginx
rules:
- host: portal.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: portal-node
port:
number: 8080

View file

@ -1,8 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: portal
spec:
finalizers:
- kubernetes

View file

@ -1,25 +0,0 @@
---
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
labels:
app: portal-node
name: portal-node
namespace: portal
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: portal-node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kind: List
metadata:
resourceVersion: ""
selfLink: ""

View file

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: portal-ingress.yml

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: portal-ingress
namespace: portal
annotations:
kubernetes.io/ingress.class: nginx
spec:
defaultBackend:
service:
name: portal-node
port:
number: 8080
ingressClassName: nginx
tls:
- hosts:
- portal.drive.sunet.se
secretName: tls-secret
rules:
- host: portal.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: portal-node
port:
number: 8080

View file

@ -1,7 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: portal-ingress.yml
- path: portal-deployment.yml

View file

@ -1,27 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: portal-node
namespace: portal
creationTimestamp:
labels:
app: portal-node
spec:
replicas: 3
selector:
matchLabels:
app: portal-node
template:
metadata:
creationTimestamp:
labels:
app: portal-node
spec:
containers:
- name: portal
env:
- name: DRIVE_DOMAIN
value: "drive.test.sunet.se"
strategy: {}
status: {}

View file

@ -1,32 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: portal-ingress
namespace: portal
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: portal-node
port:
number: 8080
ingressClassName: nginx
tls:
- hosts:
- drive.test.sunet.se
secretName: tls-secret
rules:
- host: drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: portal-node
port:
number: 8080

Binary file not shown.

View file

@ -19,34 +19,35 @@ spec:
spec: spec:
containers: containers:
- name: doris - name: doris
image: docker.sunet.se/rds/doris-rds:git-15de3c5b9 image: docker.sunet.se/rds/doris-rds:ci-RDS-Connectors-13
env: env:
- name: Logging__LogLevel__Default - name: ASPNETCORE_ENVIRONMENT
value: Debug value: Development
- name: ScieboRds__ConnectorServiceName - name: ASPNETCORE_URLS
value: layer1-port-doris value: http://+:80
- name: Domain
value: sunet.se
- name: ScieboRds__TokenStorageUrl - name: ScieboRds__TokenStorageUrl
value: http://layer3-token-storage value: http://layer3-token-storage
- name: Doris__ApiKey - name: S3__Url
value: dummy
- name: ManifestIndex__Url
value: https://snd-storage-metadata-index-test-snd-dev.apps.k8s.gu.se
- name: ManifestIndex__ApiKey
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: doris-gu-secrets name: doris-api-key
key: "api-key" key: "api-key"
- name: Doris__DorisApiEnabled - name: S3__AccessKey
value: 'true'
- name: Doris__PrincipalDomain
value: gu.se
- name: Doris__ApiUrl
value: https://dev.snd.se/doris/api/rocrate
- name: NextCloud__BaseUrl
value: https://gu.drive.test.sunet.se
- name: NextCloud__User
value: _doris_datasets
- name: NextCloud__Password
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: doris-gu-secret name: doris-s3-key
key: "nextcloudpw" key: "s3-key"
- name: S3__SecretKey
valueFrom:
secretKeyRef:
name: doris-s3-secret
key: "s3-secret"
resources: {} resources: {}
strategy: {} strategy: {}
status: {} status: {}

View file

@ -12,7 +12,7 @@ items:
ports: ports:
- port: 80 - port: 80
protocol: TCP protocol: TCP
targetPort: 8080 targetPort: 80
selector: selector:
app: layer1-port-doris app: layer1-port-doris
sessionAffinity: None sessionAffinity: None

View file

@ -1,11 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: gu-drive
namespace: helmrds
spec:
type: ExternalName
externalName: gu.drive.test.sunet.se
ports:
- port: 443

View file

@ -3,7 +3,6 @@ resources:
- doris-deployment.yml - doris-deployment.yml
- rds-ingress.yml - rds-ingress.yml
- sunet-service.yml - sunet-service.yml
- gu-service.yml
apiVersion: kustomize.config.k8s.io/v1beta1 apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization

Some files were not shown because too many files have changed in this diff Show more