Compare commits

..

No commits in common. "main" and "mandersson-kubedev" have entirely different histories.

143 changed files with 213 additions and 13470 deletions

View file

@ -11,7 +11,7 @@ spec:
service:
name: argocd-server
port:
number: 80
number: 8443
ingressClassName: traefik
tls:
- hosts:

View file

@ -1,15 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: drive@sunet.se
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View file

@ -1,22 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
spec:
ingressClassName: nginx
tls:
- hosts:
- argocd.drive.test.sunet.se
secretName: tls-secret
rules:
- host: argocd.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
name: https

View file

@ -1,3 +0,0 @@
resources:
- argocd-ingress.yaml
- argocd-cert-issuer.yaml

View file

@ -1,28 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- argocd.drive.test.sunet.dev
secretName: tls-secret
rules:
- host: argocd.drive.test.sunet.dev
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: argocd-ingress.yaml

View file

@ -1,30 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
namespace: argocd
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- sunet-argocd.drive.sunet.se
secretName: tls-secret
rules:
- host: sunet-argocd.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -1,30 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- vr-argocd.drive.sunet.se
secretName: tls-secret
rules:
- host: vr-argocd.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -1,27 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- argocd.drive.test.sunet.se
secretName: tls-secret
rules:
- host: argocd.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -1,30 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- sunet-argocd.drive.test.sunet.se
secretName: tls-secret
rules:
- host: sunet-argocd.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -1,30 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- vr-argocd.drive.test.sunet.se
secretName: tls-secret
rules:
- host: vr-argocd.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 80

View file

@ -1,6 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base]
patches:
- path: argocd-ingress.yaml

View file

@ -3,4 +3,4 @@ kind: Kustomization
namespace: argocd
resources:
- https://raw.githubusercontent.com/argoproj/argo-cd/v2.10.4/manifests/ha/install.yaml
- https://raw.githubusercontent.com/argoproj/argo-cd/v2.10.0/manifests/ha/install.yaml

View file

@ -69,7 +69,7 @@ spec:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- name: csi-resizer
image: registry.k8s.io/sig-storage/csi-resizer:v1.8.0
image: registry.k8s.io/sig-storage/csi-resizer:v1.7.0
args:
- "--csi-address=$(ADDRESS)"
- "--timeout=3m"
@ -93,7 +93,7 @@ spec:
- mountPath: /var/lib/csi/sockets/pluginproxy/
name: socket-dir
- name: cinder-csi-plugin
image: registry.k8s.io/provider-os/cinder-csi-plugin:v1.28.2
image: registry.k8s.io/provider-os/cinder-csi-plugin:v1.27.1
args:
- /bin/cinder-csi-plugin
- "--endpoint=$(CSI_ENDPOINT)"

View file

@ -30,7 +30,7 @@ spec:
restartPolicy: Always
containers:
- name: customer
image: docker.sunet.se/drive/nextcloud-custom:29.0.9.3-1
image: docker.sunet.se/drive/nextcloud-custom:27.1.6.3-5
volumeMounts:
- name: nextcloud-data
mountPath: /var/www/html/config/
@ -127,7 +127,7 @@ spec:
- name: NEXTCLOUD_ADMIN_USER
value: admin
- name: NEXTCLOUD_VERSION_STRING
value: "28.0.3.3"
value: "26.0.1.2"
- name: NEXTCLOUD_ADMIN_PASSWORD
valueFrom:
secretKeyRef:

View file

@ -4,13 +4,15 @@ kind: Ingress
metadata:
name: customer-ingress
annotations:
kubernetes.io/ingress.class: nginx
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- hosts:
- nordunet.drive.test.sunet.se
secretName: tls-secret
ingressClassName: nginx
rules:
- host: nordunet.drive.test.sunet.se
http:

View file

@ -4,13 +4,15 @@ kind: Ingress
metadata:
name: customer-ingress
annotations:
kubernetes.io/ingress.class: nginx
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- hosts:
- vinnova.drive.test.sunet.se
secretName: tls-secret
ingressClassName: nginx
rules:
- host: vinnova.drive.test.sunet.se
http:

View file

@ -1,6 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
- ../../../base
patches:
- path: argocd-ingress.yaml
- path: nextcloud-deployment.yml
- path: nextcloud-ingress.yml

View file

@ -0,0 +1,35 @@
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: customer-node
labels:
app: customer-node
spec:
replicas: 1
template:
metadata:
labels:
app: customer-node
spec:
initContainers:
- image: docker.sunet.se/sunet/docker-jinja:latest
name: init-config
env:
- name: MYSQL_DATABASE
value: "nextcloud_vr"
- name: MYSQL_USER
value: "nextcloud_vr"
- name: GSS_MASTER_URL
value: "https://drive.test.sunet.se"
- name: LOOKUP_SERVER
value: "https://lookup.drive.test.sunet.se"
- name: MAIL_DOMAIN
value: "drive.test.sunet.se"
- name: MAIL_SMTPNAME
value: "noreply@drive.test.sunet.se"
- name: NEXTCLOUD_TRUSTED_DOMAINS
value: "vr.drive.test.sunet.se"
- name: OBJECTSTORE_S3_BUCKET
value: "primary-vr-drive-test.sunet.se"
- name: SITE_NAME
value: "vr.drive.test.sunet.se"

View file

@ -1,31 +1,26 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-ingress
namespace: argocd
name: customer-ingress
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
defaultBackend:
service:
name: argocd-server
port:
number: 8443
tls:
- hosts:
- argocd.drive.sunet.se
- vr.drive.test.sunet.se
secretName: tls-secret
rules:
- host: argocd.drive.sunet.se
- host: vr.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
name: customer-node
port:
number: 80

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: health-ingress
namespace: health
annotations:
kubernetes.io/ingress.class: nginx
spec:
defaultBackend:
service:
name: health-node
port:
number: 8443
ingressClassName: nginx
tls:
- hosts:
- sunet-kube.drive.sunet.se
secretName: tls-secret
rules:
- host: sunet-kube.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: health-node
port:
number: 8080

View file

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: health-ingress.yml

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: health-ingress
namespace: health
annotations:
kubernetes.io/ingress.class: nginx
spec:
defaultBackend:
service:
name: health-node
port:
number: 8443
ingressClassName: nginx
tls:
- hosts:
- sunet-kube.drive.test.sunet.se
secretName: tls-secret
rules:
- host: sunet-kube.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: health-node
port:
number: 8080

View file

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: health-ingress.yml

View file

@ -5,18 +5,20 @@ metadata:
name: health-ingress
namespace: health
annotations:
kubernetes.io/ingress.class: nginx
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
defaultBackend:
service:
name: health-node
port:
number: 8443
ingressClassName: nginx
ingressClassName: traefik
tls:
- hosts:
- kube.drive.test.sunet.se
secretName: tls-secret
rules:
- host: kube.drive.test.sunet.se
http:

View file

@ -1,27 +1,27 @@
annotations:
artifacthub.io/images: |
- image: quay.io/jupyterhub/configurable-http-proxy:4.6.2
- image: quay.io/jupyterhub/configurable-http-proxy:4.6.1
name: configurable-http-proxy
- image: quay.io/jupyterhub/k8s-hub:4.0.0
- image: quay.io/jupyterhub/k8s-hub:3.2.1
name: k8s-hub
- image: quay.io/jupyterhub/k8s-image-awaiter:4.0.0
- image: quay.io/jupyterhub/k8s-image-awaiter:3.2.1
name: k8s-image-awaiter
- image: quay.io/jupyterhub/k8s-network-tools:4.0.0
- image: quay.io/jupyterhub/k8s-network-tools:3.2.1
name: k8s-network-tools
- image: quay.io/jupyterhub/k8s-secret-sync:4.0.0
- image: quay.io/jupyterhub/k8s-secret-sync:3.2.1
name: k8s-secret-sync
- image: quay.io/jupyterhub/k8s-singleuser-sample:4.0.0
- image: quay.io/jupyterhub/k8s-singleuser-sample:3.2.1
name: k8s-singleuser-sample
- image: registry.k8s.io/kube-scheduler:v1.30.6
- image: registry.k8s.io/kube-scheduler:v1.26.11
name: kube-scheduler
- image: registry.k8s.io/pause:3.10
- image: registry.k8s.io/pause:3.9
name: pause
- image: registry.k8s.io/pause:3.10
- image: registry.k8s.io/pause:3.9
name: pause
- image: traefik:v3.2.0
- image: traefik:v2.10.5
name: traefik
apiVersion: v2
appVersion: 5.2.1
appVersion: 4.0.2
description: Multi-user Jupyter installation
home: https://z2jh.jupyter.org
icon: https://hub.jupyter.org/helm-chart/images/hublogo.svg
@ -29,7 +29,7 @@ keywords:
- jupyter
- jupyterhub
- z2jh
kubeVersion: '>=1.28.0-0'
kubeVersion: '>=1.23.0-0'
maintainers:
- email: erik@sundellopensource.se
name: Erik Sundell
@ -38,4 +38,4 @@ maintainers:
name: jupyterhub
sources:
- https://github.com/jupyterhub/zero-to-jupyterhub-k8s
version: 4.0.0
version: 3.2.1

View file

@ -107,35 +107,28 @@ c.JupyterHub.hub_connect_url = (
)
# implement common labels
# This mimics the jupyterhub.commonLabels helper, but declares managed-by to
# kubespawner instead of helm.
#
# The labels app and release are old labels enabled to be deleted in z2jh 5, but
# for now retained to avoid a breaking change in z2jh 4 that would force user
# server restarts. Restarts would be required because NetworkPolicy resources
# must select old/new pods with labels that then needs to be seen on both
# old/new pods, and we want these resources to keep functioning for old/new user
# server pods during an upgrade.
#
# this duplicates the jupyterhub.commonLabels helper
common_labels = c.KubeSpawner.common_labels = {}
common_labels["app.kubernetes.io/name"] = common_labels["app"] = get_config(
common_labels["app"] = get_config(
"nameOverride",
default=get_config("Chart.Name", "jupyterhub"),
)
release = get_config("Release.Name")
if release:
common_labels["app.kubernetes.io/instance"] = common_labels["release"] = release
common_labels["heritage"] = "jupyterhub"
chart_name = get_config("Chart.Name")
chart_version = get_config("Chart.Version")
if chart_name and chart_version:
common_labels["helm.sh/chart"] = common_labels["chart"] = (
f"{chart_name}-{chart_version.replace('+', '_')}"
common_labels["chart"] = "{}-{}".format(
chart_name,
chart_version.replace("+", "_"),
)
common_labels["app.kubernetes.io/managed-by"] = "kubespawner"
release = get_config("Release.Name")
if release:
common_labels["release"] = release
c.KubeSpawner.namespace = os.environ.get("POD_NAMESPACE", "default")
# Max number of consecutive failures before the Hub restarts itself
# requires jupyterhub 0.9.2
set_config_if_not_none(
c.Spawner,
"consecutive_failure_limit",
@ -256,8 +249,7 @@ if tolerations:
storage_type = get_config("singleuser.storage.type")
if storage_type == "dynamic":
pvc_name_template = get_config("singleuser.storage.dynamic.pvcNameTemplate")
if pvc_name_template:
c.KubeSpawner.pvc_name_template = pvc_name_template
c.KubeSpawner.pvc_name_template = pvc_name_template
volume_name_template = get_config("singleuser.storage.dynamic.volumeNameTemplate")
c.KubeSpawner.storage_pvc_ensure = True
set_config_if_not_none(
@ -276,14 +268,13 @@ if storage_type == "dynamic":
c.KubeSpawner.volumes = [
{
"name": volume_name_template,
"persistentVolumeClaim": {"claimName": "{pvc_name}"},
"persistentVolumeClaim": {"claimName": pvc_name_template},
}
]
c.KubeSpawner.volume_mounts = [
{
"mountPath": get_config("singleuser.storage.homeMountPath"),
"name": volume_name_template,
"subPath": get_config("singleuser.storage.dynamic.subPath"),
}
]
elif storage_type == "static":

View file

@ -3,7 +3,6 @@ Utility methods for use in jupyterhub_config.py and dynamic subconfigs.
Methods here can be imported by extraConfig in values.yaml
"""
import os
from collections.abc import Mapping
from functools import lru_cache

View file

@ -48,6 +48,7 @@
- commonLabels | uses appLabel
- labels | uses commonLabels
- matchLabels | uses labels
- podCullerSelector | uses matchLabels
## Example usage
@ -111,62 +112,31 @@
{{- /*
jupyterhub.commonLabels:
Foundation for "jupyterhub.labels".
Provides old labels:
app
release
chart (omitted for matchLabels)
heritage (omitted for matchLabels)
Provides modern labels (omitted for matchLabels):
app.kubernetes.io/name ("app")
app.kubernetes.io/instance ("release")
helm.sh/chart ("chart")
app.kubernetes.io/managed-by ("heritage")
Provides labels: app, release, (chart and heritage).
*/}}
{{- define "jupyterhub.commonLabels" -}}
app: {{ .appLabel | default (include "jupyterhub.appLabel" .) | quote }}
release: {{ .Release.Name | quote }}
app: {{ .appLabel | default (include "jupyterhub.appLabel" .) }}
release: {{ .Release.Name }}
{{- if not .matchLabels }}
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
heritage: {{ .Release.Service }}
app.kubernetes.io/name: {{ .appLabel | default (include "jupyterhub.appLabel" .) | quote }}
app.kubernetes.io/instance: {{ .Release.Name | quote }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
heritage: {{ .heritageLabel | default .Release.Service }}
{{- end }}
{{- end }}
{{- /*
jupyterhub.labels:
Provides old labels:
component
app
release
chart (omitted for matchLabels)
heritage (omitted for matchLabels)
Provides modern labels (omitted for matchLabels):
app.kubernetes.io/component ("component")
app.kubernetes.io/name ("app")
app.kubernetes.io/instance release ("release")
helm.sh/chart ("chart")
app.kubernetes.io/managed-by ("heritage")
Provides labels: component, app, release, (chart and heritage).
*/}}
{{- define "jupyterhub.labels" -}}
component: {{ include "jupyterhub.componentLabel" . }}
{{- if not .matchLabels }}
app.kubernetes.io/component: {{ include "jupyterhub.componentLabel" . }}
{{- end }}
{{ include "jupyterhub.commonLabels" . }}
{{- end }}
{{- /*
jupyterhub.matchLabels:
Provides old labels:
component
app
release
Used to provide pod selection labels: component, app, release.
*/}}
{{- define "jupyterhub.matchLabels" -}}
{{- $_ := merge (dict "matchLabels" true) . -}}

View file

@ -5,7 +5,7 @@ metadata:
labels:
{{- include "jupyterhub.labels" . | nindent 4 }}
spec:
{{- if not (typeIs "<nil>" .Values.hub.revisionHistoryLimit) }}
{{- if typeIs "int" .Values.hub.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.hub.revisionHistoryLimit }}
{{- end }}
replicas: 1
@ -97,6 +97,9 @@ spec:
{{- . | toYaml | nindent 8 }}
{{- end }}
containers:
{{- with .Values.hub.extraContainers }}
{{- . | toYaml | nindent 8 }}
{{- end }}
- name: hub
image: {{ .Values.hub.image.name }}:{{ .Values.hub.image.tag }}
{{- with .Values.hub.command }}
@ -241,9 +244,6 @@ spec:
path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/health
port: http
{{- end }}
{{- with .Values.hub.extraContainers }}
{{- . | toYaml | nindent 8 }}
{{- end }}
{{- with .Values.hub.extraPodSpec }}
{{- . | toYaml | nindent 6 }}
{{- end }}

View file

@ -8,7 +8,7 @@ type: Opaque
data:
{{- $values := merge dict .Values }}
{{- /* also passthrough subset of Chart / Release */}}
{{- $_ := set $values "Chart" (dict "Name" .Chart.Name "Version" .Chart.Version "AppVersion" .Chart.AppVersion) }}
{{- $_ := set $values "Chart" (dict "Name" .Chart.Name "Version" .Chart.Version) }}
{{- $_ := set $values "Release" (pick .Release "Name" "Namespace" "Service") }}
values.yaml: {{ $values | toYaml | b64enc | quote }}

View file

@ -31,9 +31,6 @@ spec:
{{- with .Values.hub.service.ports.nodePort }}
nodePort: {{ . }}
{{- end }}
{{- with .Values.hub.service.ports.appProtocol }}
appProtocol: {{ . }}
{{- end }}
{{- with .Values.hub.service.extraPorts }}
{{- . | toYaml | nindent 4 }}

View file

@ -34,7 +34,7 @@ spec:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 100%
{{- if not (typeIs "<nil>" .Values.prePuller.revisionHistoryLimit) }}
{{- if typeIs "int" .Values.prePuller.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.prePuller.revisionHistoryLimit }}
{{- end }}
template:

View file

@ -24,9 +24,6 @@ spec:
name: {{ include "jupyterhub.proxy-public.fullname" $ }}
port:
name: http
{{- with $.Values.ingress.extraPaths }}
{{- . | toYaml | nindent 10 }}
{{- end }}
{{- if $host }}
host: {{ $host | quote }}
{{- end }}

View file

@ -8,7 +8,7 @@ metadata:
labels:
{{- include "jupyterhub.labels" . | nindent 4 }}
spec:
{{- if not (typeIs "<nil>" .Values.proxy.traefik.revisionHistoryLimit) }}
{{- if typeIs "int" .Values.proxy.traefik.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.proxy.traefik.revisionHistoryLimit }}
{{- end }}
replicas: 1
@ -130,10 +130,10 @@ spec:
{{- end }}
args:
- watch-save
- --label=app.kubernetes.io/name={{ include "jupyterhub.appLabel" . }}
- --label=app.kubernetes.io/instance={{ .Release.Name }}
- --label=helm.sh/chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
- --label=app.kubernetes.io/managed-by=secret-sync
- --label=app={{ include "jupyterhub.appLabel" . }}
- --label=release={{ .Release.Name }}
- --label=chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
- --label=heritage=secret-sync
- {{ include "jupyterhub.proxy-public-tls.fullname" . }}
- acme.json
- /etc/acme/acme.json

View file

@ -7,7 +7,7 @@ metadata:
labels:
{{- include "jupyterhub.labels" . | nindent 4 }}
spec:
{{- if not (typeIs "<nil>" .Values.proxy.chp.revisionHistoryLimit) }}
{{- if typeIs "int" .Values.proxy.chp.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.proxy.chp.revisionHistoryLimit }}
{{- end }}
replicas: 1
@ -100,9 +100,6 @@ spec:
{{- if .Values.debug.enabled }}
- --log-level=debug
{{- end }}
{{- if .Values.hub.config.JupyterHub.subdomain_host }}
- --host-routing
{{- end }}
{{- range .Values.proxy.chp.extraCommandLineFlags }}
- {{ tpl . $ }}
{{- end }}

View file

@ -16,7 +16,7 @@ metadata:
{{- include "jupyterhub.labels" . | nindent 4 }}
spec:
podManagementPolicy: Parallel
{{- if not (typeIs "<nil>" .Values.scheduling.userPlaceholder.revisionHistoryLimit) }}
{{- if typeIs "int" .Values.scheduling.userPlaceholder.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.scheduling.userPlaceholder.revisionHistoryLimit }}
{{- end }}
replicas: {{ .Values.scheduling.userPlaceholder.replicas }}

View file

@ -10,14 +10,26 @@ data:
This is configuration of a k8s official kube-scheduler binary running in the
user-scheduler.
The config version and kube-scheduler binary version has a fallback for k8s
clusters versioned v1.23 or lower because:
- v1 / v1beta3 config requires kube-scheduler binary >=1.25 / >=1.23
- kube-scheduler binary >=1.25 requires storage.k8s.io/v1/CSIStorageCapacity
available first in k8s >=1.24
ref: https://kubernetes.io/docs/reference/scheduling/config/
ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1/
ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1beta3/
*/}}
config.yaml: |
{{- if semverCompare ">=1.24.0-0" .Capabilities.KubeVersion.Version }}
apiVersion: kubescheduler.config.k8s.io/v1
{{- else }}
apiVersion: kubescheduler.config.k8s.io/v1beta3
{{- end }}
kind: KubeSchedulerConfiguration
leaderElection:
resourceLock: leases
resourceLock: endpointsleases
resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
resourceNamespace: "{{ .Release.Namespace }}"
profiles:

View file

@ -6,7 +6,7 @@ metadata:
labels:
{{- include "jupyterhub.labels" . | nindent 4 }}
spec:
{{- if not (typeIs "<nil>" .Values.scheduling.userScheduler.revisionHistoryLimit) }}
{{- if typeIs "int" .Values.scheduling.userScheduler.revisionHistoryLimit }}
revisionHistoryLimit: {{ .Values.scheduling.userScheduler.revisionHistoryLimit }}
{{- end }}
replicas: {{ .Values.scheduling.userScheduler.replicas }}
@ -50,7 +50,17 @@ spec:
{{- end }}
containers:
- name: kube-scheduler
{{- if semverCompare ">=1.24.0-0" .Capabilities.KubeVersion.Version }}
image: {{ .Values.scheduling.userScheduler.image.name }}:{{ .Values.scheduling.userScheduler.image.tag }}
{{- else }}
# WARNING: The tag of this image is hardcoded, and the
# "scheduling.userScheduler.image.tag" configuration of the
# Helm chart that generated this resource manifest isn't
# respected. If you install the Helm chart in a k8s cluster
# versioned 1.24 or higher, your configuration will be
# respected.
image: {{ .Values.scheduling.userScheduler.image.name }}:v1.23.14
{{- end }}
{{- with .Values.scheduling.userScheduler.image.pullPolicy }}
imagePullPolicy: {{ . }}
{{- end }}

View file

@ -20,12 +20,8 @@ rules:
# - changed in 1.21: get/list/watch permission for namespace,
# csidrivers, csistoragecapacities was added.
# - unchanged between 1.22 and 1.27
# - changed in 1.28: permissions to get/update lock endpoint resource
# removed
# - unchanged between 1.28 and 1.30
# - (1.31 is known to bring some changes below)
#
# ref: https://github.com/kubernetes/kubernetes/blob/v1.30.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L721-L862
# ref: https://github.com/kubernetes/kubernetes/blob/v1.27.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L736-L892
- apiGroups:
- ""
- events.k8s.io
@ -50,6 +46,21 @@ rules:
verbs:
- get
- update
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- apiGroups:
- ""
resourceNames:
- {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
resources:
- endpoints
verbs:
- get
- update
- apiGroups:
- ""
resources:
@ -172,9 +183,9 @@ rules:
# Copied from the system:volume-scheduler ClusterRole of the k8s version
# matching the kube-scheduler binary we use.
#
# NOTE: These rules have not changed between 1.12 and 1.29.
# NOTE: These rules have not changed between 1.12 and 1.27.
#
# ref: https://github.com/kubernetes/kubernetes/blob/v1.29.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1283-L1310
# ref: https://github.com/kubernetes/kubernetes/blob/v1.27.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1311-L1338
- apiGroups:
- ""
resources:

File diff suppressed because one or more lines are too long

View file

@ -41,7 +41,6 @@ hub:
annotations: {}
ports:
nodePort:
appProtocol:
extraPorts: []
loadBalancerIP:
baseUrl: /
@ -85,21 +84,16 @@ hub:
extraVolumeMounts: []
image:
name: quay.io/jupyterhub/k8s-hub
tag: "4.0.0"
tag: "3.2.1"
pullPolicy:
pullSecrets: []
resources: {}
podSecurityContext:
runAsNonRoot: true
fsGroup: 1000
seccompProfile:
type: "RuntimeDefault"
containerSecurityContext:
runAsUser: 1000
runAsGroup: 1000
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
lifecycle: {}
loadRoles: {}
services: {}
@ -203,20 +197,15 @@ proxy:
chp:
revisionHistoryLimit:
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image:
name: quay.io/jupyterhub/configurable-http-proxy
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow.
#
tag: "4.6.2" # https://github.com/jupyterhub/configurable-http-proxy/tags
tag: "4.6.1" # https://github.com/jupyterhub/configurable-http-proxy/tags
pullPolicy:
pullSecrets: []
extraCommandLineFlags: []
@ -261,20 +250,15 @@ proxy:
traefik:
revisionHistoryLimit:
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image:
name: traefik
# tag is automatically bumped to new patch versions by the
# watch-dependencies.yaml workflow.
#
tag: "v3.2.0" # ref: https://hub.docker.com/_/traefik?tab=tags
tag: "v2.10.5" # ref: https://hub.docker.com/_/traefik?tab=tags
pullPolicy:
pullSecrets: []
hsts:
@ -316,17 +300,12 @@ proxy:
extraPodSpec: {}
secretSync:
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image:
name: quay.io/jupyterhub/k8s-secret-sync
tag: "4.0.0"
tag: "3.2.1"
pullPolicy:
pullSecrets: []
resources: {}
@ -366,7 +345,7 @@ singleuser:
networkTools:
image:
name: quay.io/jupyterhub/k8s-network-tools
tag: "4.0.0"
tag: "3.2.1"
pullPolicy:
pullSecrets: []
resources: {}
@ -413,13 +392,12 @@ singleuser:
homeMountPath: /home/jovyan
dynamic:
storageClass:
pvcNameTemplate:
volumeNameTemplate: volume-{user_server}
pvcNameTemplate: claim-{username}{servername}
volumeNameTemplate: volume-{username}{servername}
storageAccessModes: [ReadWriteOnce]
subPath:
image:
name: quay.io/jupyterhub/k8s-singleuser-sample
tag: "4.0.0"
tag: "3.2.1"
pullPolicy:
pullSecrets: []
startTimeout: 300
@ -454,24 +432,19 @@ scheduling:
#
plugins:
score:
# We make use of the default scoring plugins, but we re-enable some with
# a new priority, leave some enabled with their lower default priority,
# and disable some.
# These scoring plugins are enabled by default according to
# https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins
# 2022-02-22.
#
# Below are the default scoring plugins as of 2024-09-23 according to
# https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins.
#
# Re-enabled with high priority:
# Enabled with high priority:
# - NodeAffinity
# - InterPodAffinity
# - NodeResourcesFit
# - ImageLocality
#
# Remains enabled with low default priority:
# - TaintToleration
# - PodTopologySpread
# - VolumeBinding
#
# Disabled for scoring:
# - NodeResourcesBalancedAllocation
#
@ -500,25 +473,20 @@ scheduling:
- name: NodeResourcesFit
args:
scoringStrategy:
type: MostAllocated
resources:
- name: cpu
weight: 1
- name: memory
weight: 1
type: MostAllocated
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image:
# IMPORTANT: Bumping the minor version of this binary should go hand in
# hand with an inspection of the user-scheduelr's RBAC
# resources that we have forked in
# hand with an inspection of the user-scheduelrs RBAC resources
# that we have forked in
# templates/scheduling/user-scheduler/rbac.yaml.
#
# Debugging advice:
@ -551,7 +519,7 @@ scheduling:
# here. We aim to stay around 1 minor version behind the latest k8s
# version.
#
tag: "v1.30.6" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
tag: "v1.26.11" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
pullPolicy:
pullSecrets: []
nodeSelector: {}
@ -583,7 +551,7 @@ scheduling:
#
# If you update this, also update prePuller.pause.image.tag
#
tag: "3.10"
tag: "3.9"
pullPolicy:
pullSecrets: []
revisionHistoryLimit:
@ -591,14 +559,9 @@ scheduling:
labels: {}
annotations: {}
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
resources: {}
corePods:
tolerations:
@ -632,14 +595,9 @@ prePuller:
annotations: {}
resources: {}
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
extraTolerations: []
# hook relates to the hook-image-awaiter Job and hook-image-puller DaemonSet
hook:
@ -648,18 +606,13 @@ prePuller:
# image and the configuration below relates to the hook-image-awaiter Job
image:
name: quay.io/jupyterhub/k8s-image-awaiter
tag: "4.0.0"
tag: "3.2.1"
pullPolicy:
pullSecrets: []
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
podSchedulingWaitDuration: 10
nodeSelector: {}
tolerations: []
@ -674,14 +627,9 @@ prePuller:
extraImages: {}
pause:
containerSecurityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody user
runAsGroup: 65534 # nobody group
allowPrivilegeEscalation: false
capabilities:
drop: ["ALL"]
seccompProfile:
type: "RuntimeDefault"
image:
name: registry.k8s.io/pause
# tag is automatically bumped to new patch versions by the
@ -689,7 +637,7 @@ prePuller:
#
# If you update this, also update scheduling.userPlaceholder.image.tag
#
tag: "3.10"
tag: "3.9"
pullPolicy:
pullSecrets: []
@ -701,7 +649,6 @@ ingress:
pathSuffix:
pathType: Prefix
tls: []
extraPaths: []
# cull relates to the jupyterhub-idle-culler service, responsible for evicting
# inactive singleuser pods.

View file

@ -1,15 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: drive@sunet.se
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View file

@ -4,6 +4,7 @@ kind: Ingress
metadata:
name: jupyterhub-ingress
annotations:
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:

View file

@ -1,4 +1,4 @@
---
resources: [jupyterhub-ingress.yml, jupyterhub-service.yml, jupyterhub-cert-issuer.yaml]
resources: [jupyterhub-ingress.yml, jupyterhub-service.yml]
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View file

@ -1,28 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jupyterhub-ingress
annotations:
kubernetes.io/ingress.class: nginx
spec:
defaultBackend:
service:
name: proxy-public
port:
number: 80
ingressClassName: nginx
tls:
- hosts:
- vr-jupyter.drive.sunet.se
secretName: prod-tls-secret
rules:
- host: vr-jupyter.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: proxy-public
port:
number: 80

View file

@ -1,24 +0,0 @@
---
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
labels:
app: jupyterhub-node
name: jupyterhub-node
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: jupyterhub-node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kind: List
metadata:
resourceVersion: ""
selfLink: ""

View file

@ -1,16 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base/]
helmCharts:
- includeCRDs: true
name: jupyterhub
releaseName: vr-jupyterhub
valuesFile: ./values/values.yaml
version: 3.2.1
namespace: vr-jupyterhub
helmGlobals:
chartHome: ../../../base/charts/
patches:
- path: jupyterhub-ingress.yml
- path: jupyterhub-service.yml

View file

@ -1,335 +0,0 @@
debug:
enabled: true
hub:
config:
Authenticator:
auto_login: true
enable_auth_state: true
JupyterHub:
tornado_settings:
headers: { 'Content-Security-Policy': "frame-ancestors *;" }
db:
pvc:
storageClassName: csi-sc-cinderplugin
extraConfig:
oauthCode: |
import time
import requests
from datetime import datetime
from oauthenticator.generic import GenericOAuthenticator
token_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/api/v1/token'
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def get_nextcloud_access_token(refresh_token):
client_id = os.environ['NEXTCLOUD_CLIENT_ID']
client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
code = refresh_token
data = {
'grant_type': 'refresh_token',
'code': code,
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret
}
response = requests.post(token_url, data=data)
if debug:
print(response.text)
return response.json()
def post_auth_hook(authenticator, handler, authentication):
user = authentication['auth_state']['oauth_user']['ocs']['data']['id']
auth_state = authentication['auth_state']
auth_state['token_expires'] = time.time() + auth_state['token_response']['expires_in']
authentication['auth_state'] = auth_state
return authentication
class NextcloudOAuthenticator(GenericOAuthenticator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_dict = {}
async def pre_spawn_start(self, user, spawner):
super().pre_spawn_start(user, spawner)
auth_state = await user.get_auth_state()
if not auth_state:
return
access_token = auth_state['access_token']
spawner.environment['NEXTCLOUD_ACCESS_TOKEN'] = access_token
async def refresh_user(self, user, handler=None):
auth_state = await user.get_auth_state()
if not auth_state:
if debug:
print(f'auth_state missing for {user}')
return False
access_token = auth_state['access_token']
refresh_token = auth_state['refresh_token']
token_response = auth_state['token_response']
now = time.time()
now_hr = datetime.fromtimestamp(now)
expires = auth_state['token_expires']
expires_hr = datetime.fromtimestamp(expires)
expires = 0
if debug:
print(f'auth_state for {user}: {auth_state}')
if now >= expires:
if debug:
print(f'Time is: {now_hr}, token expired: {expires_hr}')
print(f'Refreshing token for {user}')
try:
token_response = get_nextcloud_access_token(refresh_token)
auth_state['access_token'] = token_response['access_token']
auth_state['refresh_token'] = token_response['refresh_token']
auth_state['token_expires'] = now + token_response['expires_in']
auth_state['token_response'] = token_response
if debug:
print(f'Successfully refreshed token for {user.name}')
print(f'auth_state for {user.name}: {auth_state}')
return {'name': user.name, 'auth_state': auth_state}
except Exception as e:
if debug:
print(f'Failed to refresh token for {user}')
return False
return False
if debug:
print(f'Time is: {now_hr}, token expires: {expires_hr}')
return True
c.JupyterHub.authenticator_class = NextcloudOAuthenticator
c.NextcloudOAuthenticator.client_id = os.environ['NEXTCLOUD_CLIENT_ID']
c.NextcloudOAuthenticator.client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
c.NextcloudOAuthenticator.login_service = 'Sunet Drive'
c.NextcloudOAuthenticator.username_claim = lambda r: r.get('ocs', {}).get('data', {}).get('id')
c.NextcloudOAuthenticator.userdata_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/ocs/v2.php/cloud/user?format=json'
c.NextcloudOAuthenticator.authorize_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/authorize'
c.NextcloudOAuthenticator.token_url = token_url
c.NextcloudOAuthenticator.oauth_callback_url = 'https://' + os.environ['JUPYTER_HOST'] + '/hub/oauth_callback'
c.NextcloudOAuthenticator.allow_all = True
c.NextcloudOAuthenticator.refresh_pre_spawn = True
c.NextcloudOAuthenticator.enable_auth_state = True
c.NextcloudOAuthenticator.auth_refresh_age = 3600
c.NextcloudOAuthenticator.post_auth_hook = post_auth_hook
serviceCode: |
import sys
c.JupyterHub.load_roles = [
{
"name": "refresh-token",
"services": [
"refresh-token"
],
"scopes": [
"read:users",
"admin:auth_state"
]
},
{
"name": "user",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"self",
],
},
{
"name": "server",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"inherit",
],
}
]
c.JupyterHub.services = [
{
'name': 'refresh-token',
'url': 'http://' + os.environ.get('HUB_SERVICE_HOST', 'hub') + ':' + os.environ.get('HUB_SERVICE_PORT_REFRESH_TOKEN', '8082'),
'display': False,
'oauth_no_confirm': True,
'api_token': os.environ['JUPYTERHUB_API_KEY'],
'command': [sys.executable, '/usr/local/etc/jupyterhub/refresh-token.py']
}
]
c.JupyterHub.admin_users = {"refresh-token"}
c.JupyterHub.api_tokens = {
os.environ['JUPYTERHUB_API_KEY']: "refresh-token",
}
extraFiles:
refresh-token.py:
mountPath: /usr/local/etc/jupyterhub/refresh-token.py
stringData: |
"""A token refresh service authenticating with the Hub.
This service serves `/services/refresh-token/`,
authenticated with the Hub,
showing the user their own info.
"""
import json
import os
import requests
import socket
from jupyterhub.services.auth import HubAuthenticated
from jupyterhub.utils import url_path_join
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, HTTPError, RequestHandler, authenticated
from urllib.parse import urlparse
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def my_debug(s):
if debug:
with open("/proc/1/fd/1", "a") as stdout:
print(s, file=stdout)
class RefreshHandler(HubAuthenticated, RequestHandler):
def api_request(self, method, url, **kwargs):
my_debug(f'{self.hub_auth}')
url = url_path_join(self.hub_auth.api_url, url)
allow_404 = kwargs.pop('allow_404', False)
headers = kwargs.setdefault('headers', {})
headers.setdefault('Authorization', f'token {self.hub_auth.api_token}')
try:
r = requests.request(method, url, **kwargs)
except requests.ConnectionError as e:
my_debug(f'Error connecting to {url}: {e}')
msg = f'Failed to connect to Hub API at {url}.'
msg += f' Is the Hub accessible at this URL (from host: {socket.gethostname()})?'
if '127.0.0.1' in url:
msg += ' Make sure to set c.JupyterHub.hub_ip to an IP accessible to' + \
' single-user servers if the servers are not on the same host as the Hub.'
raise HTTPError(500, msg)
data = None
if r.status_code == 404 and allow_404:
pass
elif r.status_code == 403:
my_debug(
'Lacking permission to check authorization with JupyterHub,' +
f' my auth token may have expired: [{r.status_code}] {r.reason}'
)
my_debug(r.text)
raise HTTPError(
500,
'Permission failure checking authorization, I may need a new token'
)
elif r.status_code >= 500:
my_debug(f'Upstream failure verifying auth token: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(
502, 'Failed to check authorization (upstream problem)')
elif r.status_code >= 400:
my_debug(f'Failed to check authorization: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(500, 'Failed to check authorization')
else:
data = r.json()
return data
@authenticated
def get(self):
user_model = self.get_current_user()
# Fetch current auth state
user_data = self.api_request('GET', url_path_join('users', user_model['name']))
auth_state = user_data['auth_state']
access_token = auth_state['access_token']
token_expires = auth_state['token_expires']
self.set_header('content-type', 'application/json')
self.write(json.dumps({'access_token': access_token, 'token_expires': token_expires}, indent=1, sort_keys=True))
class PingHandler(RequestHandler):
def get(self):
my_debug(f"DEBUG: In ping get")
self.set_header('content-type', 'application/json')
self.write(json.dumps({'ping': 1}))
def main():
app = Application([
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + 'tokens', RefreshHandler),
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + '/?', PingHandler),
])
http_server = HTTPServer(app)
url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL'])
http_server.listen(url.port)
IOLoop.current().start()
if __name__ == '__main__':
main()
networkPolicy:
ingress:
- ports:
- port: 8082
from:
- podSelector:
matchLabels:
hub.jupyter.org/network-access-hub: "true"
service:
extraPorts:
- port: 8082
targetPort: 8082
name: refresh-token
extraEnv:
NEXTCLOUD_DEBUG_OAUTH: "no"
NEXTCLOUD_HOST: vr.drive.sunet.se
JUPYTER_HOST: vr-jupyter.drive.sunet.se
JUPYTERHUB_API_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: api-key
JUPYTERHUB_CRYPT_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: crypt-key
NEXTCLOUD_CLIENT_ID:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-id
NEXTCLOUD_CLIENT_SECRET:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-secret
proxy:
chp:
networkPolicy:
egress:
- to:
- podSelector:
matchLabels:
app: jupyterhub
component: hub
ports:
- port: 8082
singleuser:
image:
name: docker.sunet.se/drive/jupyter-custom
tag: lab-4.0.10-sunet4
storage:
dynamic:
storageClass: csi-sc-cinderplugin
extraEnv:
JUPYTER_ENABLE_LAB: "yes"
JUPYTER_HOST: vr-jupyter.drive.sunet.se
NEXTCLOUD_HOST: vr.drive.sunet.se
extraFiles:
jupyter_notebook_config:
mountPath: /home/jovyan/.jupyter/jupyter_server_config.py
stringData: |
import os
c = get_config()
c.NotebookApp.allow_origin = '*'
c.NotebookApp.tornado_settings = {
'headers': { 'Content-Security-Policy': "frame-ancestors *;" }
}
os.system('/usr/local/bin/nc-sync')
mode: 0644

View file

@ -1,32 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jupyterhub-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
kubernetes.io/ingress.class: nginx
spec:
ingressClassName: nginx
defaultBackend:
service:
name: proxy-public
port:
number: 80
tls:
- hosts:
- sunet-jupyter.drive.sunet.se
secretName: tls-secret
rules:
- host: sunet-jupyter.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: proxy-public
port:
number: 80

View file

@ -1,24 +0,0 @@
---
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
labels:
app: jupyterhub-node
name: jupyterhub-node
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: jupyterhub-node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kind: List
metadata:
resourceVersion: ""
selfLink: ""

View file

@ -1,16 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base/]
helmCharts:
- includeCRDs: true
name: jupyterhub
releaseName: sunet-jupyterhub
valuesFile: ./values/values.yaml
version: 3.2.1
namespace: sunet-jupyterhub
helmGlobals:
chartHome: ../../../base/charts/
patches:
- path: jupyterhub-ingress.yml
- path: jupyterhub-service.yml

View file

@ -1,337 +0,0 @@
debug:
enabled: true
hub:
config:
Authenticator:
auto_login: true
enable_auth_state: true
JupyterHub:
tornado_settings:
headers: { 'Content-Security-Policy': "frame-ancestors *;" }
db:
pvc:
storageClassName: csi-sc-cinderplugin
extraConfig:
oauthCode: |
import time
import requests
from datetime import datetime
from oauthenticator.generic import GenericOAuthenticator
token_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/api/v1/token'
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def get_nextcloud_access_token(refresh_token):
client_id = os.environ['NEXTCLOUD_CLIENT_ID']
client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
code = refresh_token
data = {
'grant_type': 'refresh_token',
'code': code,
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret
}
response = requests.post(token_url, data=data)
if debug:
print(response.text)
return response.json()
def post_auth_hook(authenticator, handler, authentication):
user = authentication['auth_state']['oauth_user']['ocs']['data']['id']
auth_state = authentication['auth_state']
auth_state['token_expires'] = time.time() + auth_state['token_response']['expires_in']
authentication['auth_state'] = auth_state
return authentication
class NextcloudOAuthenticator(GenericOAuthenticator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_dict = {}
async def pre_spawn_start(self, user, spawner):
super().pre_spawn_start(user, spawner)
auth_state = await user.get_auth_state()
if not auth_state:
return
access_token = auth_state['access_token']
spawner.environment['NEXTCLOUD_ACCESS_TOKEN'] = access_token
async def refresh_user(self, user, handler=None):
auth_state = await user.get_auth_state()
if not auth_state:
if debug:
print(f'auth_state missing for {user}')
return False
access_token = auth_state['access_token']
refresh_token = auth_state['refresh_token']
token_response = auth_state['token_response']
now = time.time()
now_hr = datetime.fromtimestamp(now)
expires = auth_state['token_expires']
expires_hr = datetime.fromtimestamp(expires)
expires = 0
if debug:
print(f'auth_state for {user}: {auth_state}')
if now >= expires:
if debug:
print(f'Time is: {now_hr}, token expired: {expires_hr}')
print(f'Refreshing token for {user}')
try:
token_response = get_nextcloud_access_token(refresh_token)
auth_state['access_token'] = token_response['access_token']
auth_state['refresh_token'] = token_response['refresh_token']
auth_state['token_expires'] = now + token_response['expires_in']
auth_state['token_response'] = token_response
if debug:
print(f'Successfully refreshed token for {user.name}')
print(f'auth_state for {user.name}: {auth_state}')
return {'name': user.name, 'auth_state': auth_state}
except Exception as e:
if debug:
print(f'Failed to refresh token for {user}')
return False
return False
if debug:
print(f'Time is: {now_hr}, token expires: {expires_hr}')
return True
c.JupyterHub.authenticator_class = NextcloudOAuthenticator
c.NextcloudOAuthenticator.client_id = os.environ['NEXTCLOUD_CLIENT_ID']
c.NextcloudOAuthenticator.client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
c.NextcloudOAuthenticator.login_service = 'Sunet Drive'
c.NextcloudOAuthenticator.username_claim = lambda r: r.get('ocs', {}).get('data', {}).get('id')
c.NextcloudOAuthenticator.userdata_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/ocs/v2.php/cloud/user?format=json'
c.NextcloudOAuthenticator.authorize_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/authorize'
c.NextcloudOAuthenticator.token_url = token_url
c.NextcloudOAuthenticator.oauth_callback_url = 'https://' + os.environ['JUPYTER_HOST'] + '/hub/oauth_callback'
c.NextcloudOAuthenticator.allow_all = True
c.NextcloudOAuthenticator.refresh_pre_spawn = True
c.NextcloudOAuthenticator.enable_auth_state = True
c.NextcloudOAuthenticator.auth_refresh_age = 3600
c.NextcloudOAuthenticator.post_auth_hook = post_auth_hook
serviceCode: |
import sys
c.JupyterHub.load_roles = [
{
"name": "refresh-token",
"services": [
"refresh-token"
],
"scopes": [
"read:users",
"admin:auth_state"
]
},
{
"name": "user",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"self",
],
},
{
"name": "server",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"inherit",
],
}
]
c.JupyterHub.services = [
{
'name': 'refresh-token',
'url': 'http://' + os.environ.get('HUB_SERVICE_HOST', 'hub') + ':' + os.environ.get('HUB_SERVICE_PORT_REFRESH_TOKEN', '8082'),
'display': False,
'oauth_no_confirm': True,
'api_token': os.environ['JUPYTERHUB_API_KEY'],
'command': [sys.executable, '/usr/local/etc/jupyterhub/refresh-token.py']
}
]
c.JupyterHub.admin_users = {"refresh-token"}
c.JupyterHub.api_tokens = {
os.environ['JUPYTERHUB_API_KEY']: "refresh-token",
}
extraFiles:
refresh-token.py:
mountPath: /usr/local/etc/jupyterhub/refresh-token.py
stringData: |
"""A token refresh service authenticating with the Hub.
This service serves `/services/refresh-token/`,
authenticated with the Hub,
showing the user their own info.
"""
import json
import os
import requests
import socket
from jupyterhub.services.auth import HubAuthenticated
from jupyterhub.utils import url_path_join
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, HTTPError, RequestHandler, authenticated
from urllib.parse import urlparse
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def my_debug(s):
if debug:
with open("/proc/1/fd/1", "a") as stdout:
print(s, file=stdout)
class RefreshHandler(HubAuthenticated, RequestHandler):
def api_request(self, method, url, **kwargs):
my_debug(f'{self.hub_auth}')
url = url_path_join(self.hub_auth.api_url, url)
allow_404 = kwargs.pop('allow_404', False)
headers = kwargs.setdefault('headers', {})
headers.setdefault('Authorization', f'token {self.hub_auth.api_token}')
try:
r = requests.request(method, url, **kwargs)
except requests.ConnectionError as e:
my_debug(f'Error connecting to {url}: {e}')
msg = f'Failed to connect to Hub API at {url}.'
msg += f' Is the Hub accessible at this URL (from host: {socket.gethostname()})?'
if '127.0.0.1' in url:
msg += ' Make sure to set c.JupyterHub.hub_ip to an IP accessible to' + \
' single-user servers if the servers are not on the same host as the Hub.'
raise HTTPError(500, msg)
data = None
if r.status_code == 404 and allow_404:
pass
elif r.status_code == 403:
my_debug(
'Lacking permission to check authorization with JupyterHub,' +
f' my auth token may have expired: [{r.status_code}] {r.reason}'
)
my_debug(r.text)
raise HTTPError(
500,
'Permission failure checking authorization, I may need a new token'
)
elif r.status_code >= 500:
my_debug(f'Upstream failure verifying auth token: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(
502, 'Failed to check authorization (upstream problem)')
elif r.status_code >= 400:
my_debug(f'Failed to check authorization: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(500, 'Failed to check authorization')
else:
data = r.json()
return data
@authenticated
def get(self):
user_model = self.get_current_user()
# Fetch current auth state
user_data = self.api_request('GET', url_path_join('users', user_model['name']))
auth_state = user_data['auth_state']
access_token = auth_state['access_token']
token_expires = auth_state['token_expires']
self.set_header('content-type', 'application/json')
self.write(json.dumps({'access_token': access_token, 'token_expires': token_expires}, indent=1, sort_keys=True))
class PingHandler(RequestHandler):
def get(self):
my_debug(f"DEBUG: In ping get")
self.set_header('content-type', 'application/json')
self.write(json.dumps({'ping': 1}))
def main():
app = Application([
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + 'tokens', RefreshHandler),
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + '/?', PingHandler),
])
http_server = HTTPServer(app)
url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL'])
http_server.listen(url.port)
IOLoop.current().start()
if __name__ == '__main__':
main()
networkPolicy:
ingress:
- ports:
- port: 8082
from:
- podSelector:
matchLabels:
hub.jupyter.org/network-access-hub: "true"
service:
extraPorts:
- port: 8082
targetPort: 8082
name: refresh-token
extraEnv:
NEXTCLOUD_DEBUG_OAUTH: "no"
NEXTCLOUD_HOST: sunet.drive.sunet.se
JUPYTER_HOST: sunet-jupyter.drive.sunet.se
JUPYTERHUB_API_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: api-key
JUPYTERHUB_CRYPT_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: crypt-key
NEXTCLOUD_CLIENT_ID:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-id
NEXTCLOUD_CLIENT_SECRET:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-secret
networkPolicy:
enabled: false
proxy:
chp:
networkPolicy:
egress:
- to:
- podSelector:
matchLabels:
app: jupyterhub
component: hub
ports:
- port: 8082
singleuser:
image:
name: docker.sunet.se/drive/jupyter-custom
tag: lab-4.0.10-sunet5
storage:
dynamic:
storageClass: csi-sc-cinderplugin
extraEnv:
JUPYTER_ENABLE_LAB: "yes"
JUPYTER_HOST: sunet-jupyter.drive.sunet.se
NEXTCLOUD_HOST: sunet.drive.sunet.se
extraFiles:
jupyter_notebook_config:
mountPath: /home/jovyan/.jupyter/jupyter_server_config.py
stringData: |
import os
c = get_config()
c.NotebookApp.allow_origin = '*'
c.NotebookApp.tornado_settings = {
'headers': { 'Content-Security-Policy': "frame-ancestors *;" }
}
os.system('/usr/local/bin/nc-sync')
mode: 0644

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jupyterhub-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: proxy-public
port:
number: 8443
tls:
- hosts:
- vr-jupyter.drive.sunet.se
secretName: tls-secret
ingressClassName: nginx
rules:
- host: vr-jupyter.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: proxy-public
port:
number: 80

View file

@ -1,25 +0,0 @@
---
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
labels:
app: jupyterhub-node
name: jupyterhub-node
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: jupyterhub-node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kind: List
metadata:
resourceVersion: ""
selfLink: ""

View file

@ -1,16 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base/]
helmCharts:
- includeCRDs: true
name: jupyterhub
releaseName: vr-jupyterhub
valuesFile: ./values/values.yaml
version: 4.0.0
namespace: vr-jupyterhub
helmGlobals:
chartHome: ../../../base/charts/
patches:
- path: jupyterhub-ingress.yml
- path: jupyterhub-service.yml

View file

@ -1,337 +0,0 @@
debug:
enabled: true
hub:
config:
Authenticator:
auto_login: true
enable_auth_state: true
JupyterHub:
tornado_settings:
headers: { 'Content-Security-Policy': "frame-ancestors *;" }
db:
pvc:
storageClassName: csi-sc-cinderplugin
extraConfig:
oauthCode: |
import time
import requests
from datetime import datetime
from oauthenticator.generic import GenericOAuthenticator
token_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/api/v1/token'
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def get_nextcloud_access_token(refresh_token):
client_id = os.environ['NEXTCLOUD_CLIENT_ID']
client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
code = refresh_token
data = {
'grant_type': 'refresh_token',
'code': code,
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret
}
response = requests.post(token_url, data=data)
if debug:
print(response.text)
return response.json()
def post_auth_hook(authenticator, handler, authentication):
user = authentication['auth_state']['oauth_user']['ocs']['data']['id']
auth_state = authentication['auth_state']
auth_state['token_expires'] = time.time() + auth_state['token_response']['expires_in']
authentication['auth_state'] = auth_state
return authentication
class NextcloudOAuthenticator(GenericOAuthenticator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_dict = {}
async def pre_spawn_start(self, user, spawner):
super().pre_spawn_start(user, spawner)
auth_state = await user.get_auth_state()
if not auth_state:
return
access_token = auth_state['access_token']
spawner.environment['NEXTCLOUD_ACCESS_TOKEN'] = access_token
async def refresh_user(self, user, handler=None):
auth_state = await user.get_auth_state()
if not auth_state:
if debug:
print(f'auth_state missing for {user}')
return False
access_token = auth_state['access_token']
refresh_token = auth_state['refresh_token']
token_response = auth_state['token_response']
now = time.time()
now_hr = datetime.fromtimestamp(now)
expires = auth_state['token_expires']
expires_hr = datetime.fromtimestamp(expires)
expires = 0
if debug:
print(f'auth_state for {user}: {auth_state}')
if now >= expires:
if debug:
print(f'Time is: {now_hr}, token expired: {expires_hr}')
print(f'Refreshing token for {user}')
try:
token_response = get_nextcloud_access_token(refresh_token)
auth_state['access_token'] = token_response['access_token']
auth_state['refresh_token'] = token_response['refresh_token']
auth_state['token_expires'] = now + token_response['expires_in']
auth_state['token_response'] = token_response
if debug:
print(f'Successfully refreshed token for {user.name}')
print(f'auth_state for {user.name}: {auth_state}')
return {'name': user.name, 'auth_state': auth_state}
except Exception as e:
if debug:
print(f'Failed to refresh token for {user}')
return False
return False
if debug:
print(f'Time is: {now_hr}, token expires: {expires_hr}')
return True
c.JupyterHub.authenticator_class = NextcloudOAuthenticator
c.NextcloudOAuthenticator.client_id = os.environ['NEXTCLOUD_CLIENT_ID']
c.NextcloudOAuthenticator.client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
c.NextcloudOAuthenticator.login_service = 'Sunet Drive'
c.NextcloudOAuthenticator.username_claim = lambda r: r.get('ocs', {}).get('data', {}).get('id')
c.NextcloudOAuthenticator.userdata_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/ocs/v2.php/cloud/user?format=json'
c.NextcloudOAuthenticator.authorize_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/authorize'
c.NextcloudOAuthenticator.token_url = token_url
c.NextcloudOAuthenticator.oauth_callback_url = 'https://' + os.environ['JUPYTER_HOST'] + '/hub/oauth_callback'
c.NextcloudOAuthenticator.allow_all = True
c.NextcloudOAuthenticator.refresh_pre_spawn = True
c.NextcloudOAuthenticator.enable_auth_state = True
c.NextcloudOAuthenticator.auth_refresh_age = 3600
c.NextcloudOAuthenticator.post_auth_hook = post_auth_hook
serviceCode: |
import sys
c.JupyterHub.load_roles = [
{
"name": "refresh-token",
"services": [
"refresh-token"
],
"scopes": [
"read:users",
"admin:auth_state"
]
},
{
"name": "user",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"self",
],
},
{
"name": "server",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"inherit",
],
}
]
c.JupyterHub.services = [
{
'name': 'refresh-token',
'url': 'http://' + os.environ.get('HUB_SERVICE_HOST', 'hub') + ':' + os.environ.get('HUB_SERVICE_PORT_REFRESH_TOKEN', '8082'),
'display': False,
'oauth_no_confirm': True,
'api_token': os.environ['JUPYTERHUB_API_KEY'],
'command': [sys.executable, '/usr/local/etc/jupyterhub/refresh-token.py']
}
]
c.JupyterHub.admin_users = {"refresh-token"}
c.JupyterHub.api_tokens = {
os.environ['JUPYTERHUB_API_KEY']: "refresh-token",
}
extraFiles:
refresh-token.py:
mountPath: /usr/local/etc/jupyterhub/refresh-token.py
stringData: |
"""A token refresh service authenticating with the Hub.
This service serves `/services/refresh-token/`,
authenticated with the Hub,
showing the user their own info.
"""
import json
import os
import requests
import socket
from jupyterhub.services.auth import HubAuthenticated
from jupyterhub.utils import url_path_join
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, HTTPError, RequestHandler, authenticated
from urllib.parse import urlparse
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def my_debug(s):
if debug:
with open("/proc/1/fd/1", "a") as stdout:
print(s, file=stdout)
class RefreshHandler(HubAuthenticated, RequestHandler):
def api_request(self, method, url, **kwargs):
my_debug(f'{self.hub_auth}')
url = url_path_join(self.hub_auth.api_url, url)
allow_404 = kwargs.pop('allow_404', False)
headers = kwargs.setdefault('headers', {})
headers.setdefault('Authorization', f'token {self.hub_auth.api_token}')
try:
r = requests.request(method, url, **kwargs)
except requests.ConnectionError as e:
my_debug(f'Error connecting to {url}: {e}')
msg = f'Failed to connect to Hub API at {url}.'
msg += f' Is the Hub accessible at this URL (from host: {socket.gethostname()})?'
if '127.0.0.1' in url:
msg += ' Make sure to set c.JupyterHub.hub_ip to an IP accessible to' + \
' single-user servers if the servers are not on the same host as the Hub.'
raise HTTPError(500, msg)
data = None
if r.status_code == 404 and allow_404:
pass
elif r.status_code == 403:
my_debug(
'Lacking permission to check authorization with JupyterHub,' +
f' my auth token may have expired: [{r.status_code}] {r.reason}'
)
my_debug(r.text)
raise HTTPError(
500,
'Permission failure checking authorization, I may need a new token'
)
elif r.status_code >= 500:
my_debug(f'Upstream failure verifying auth token: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(
502, 'Failed to check authorization (upstream problem)')
elif r.status_code >= 400:
my_debug(f'Failed to check authorization: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(500, 'Failed to check authorization')
else:
data = r.json()
return data
@authenticated
def get(self):
user_model = self.get_current_user()
# Fetch current auth state
user_data = self.api_request('GET', url_path_join('users', user_model['name']))
auth_state = user_data['auth_state']
access_token = auth_state['access_token']
token_expires = auth_state['token_expires']
self.set_header('content-type', 'application/json')
self.write(json.dumps({'access_token': access_token, 'token_expires': token_expires}, indent=1, sort_keys=True))
class PingHandler(RequestHandler):
def get(self):
my_debug(f"DEBUG: In ping get")
self.set_header('content-type', 'application/json')
self.write(json.dumps({'ping': 1}))
def main():
app = Application([
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + 'tokens', RefreshHandler),
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + '/?', PingHandler),
])
http_server = HTTPServer(app)
url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL'])
http_server.listen(url.port)
IOLoop.current().start()
if __name__ == '__main__':
main()
networkPolicy:
ingress:
- ports:
- port: 8082
from:
- podSelector:
matchLabels:
hub.jupyter.org/network-access-hub: "true"
service:
extraPorts:
- port: 8082
targetPort: 8082
name: refresh-token
extraEnv:
NEXTCLOUD_DEBUG_OAUTH: "no"
NEXTCLOUD_HOST: vr.drive.sunet.se
JUPYTER_HOST: vr-jupyter.drive.sunet.se
JUPYTERHUB_API_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: api-key
JUPYTERHUB_CRYPT_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: crypt-key
NEXTCLOUD_CLIENT_ID:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-id
NEXTCLOUD_CLIENT_SECRET:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-secret
networkPolicy:
enabled: false
proxy:
chp:
networkPolicy:
egress:
- to:
- podSelector:
matchLabels:
app: jupyterhub
component: hub
ports:
- port: 8082
singleuser:
image:
name: docker.sunet.se/drive/jupyter-custom
tag: lab-4.0.10-sunet4
storage:
dynamic:
storageClass: csi-sc-cinderplugin
extraEnv:
JUPYTER_ENABLE_LAB: "yes"
JUPYTER_HOST: vr-jupyter.drive.sunet.se
NEXTCLOUD_HOST: vr.drive.sunet.se
extraFiles:
jupyter_notebook_config:
mountPath: /home/jovyan/.jupyter/jupyter_server_config.py
stringData: |
import os
c = get_config()
c.NotebookApp.allow_origin = '*'
c.NotebookApp.tornado_settings = {
'headers': { 'Content-Security-Policy': "frame-ancestors *;" }
}
os.system('/usr/local/bin/nc-sync')
mode: 0644

View file

@ -4,16 +4,15 @@ kind: Ingress
metadata:
name: jupyterhub-ingress
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
kubernetes.io/ingress.class: traefik
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
ingressClassName: nginx
defaultBackend:
service:
name: proxy-public
port:
number: 80
number: 8443
tls:
- hosts:
- sunet-jupyter.drive.test.sunet.se

View file

@ -7,7 +7,7 @@ helmCharts:
name: jupyterhub
releaseName: sunet-jupyterhub
valuesFile: ./values/values.yaml
version: 4.0.0
version: 3.2.1
namespace: sunet-jupyterhub
helmGlobals:
chartHome: ../../../base/charts/

View file

@ -315,7 +315,7 @@ proxy:
singleuser:
image:
name: docker.sunet.se/drive/jupyter-custom
tag: lab-4.0.10-sunet5
tag: lab-4.0.10-sunet4
storage:
dynamic:
storageClass: csi-sc-cinderplugin

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jupyterhub-ingress
annotations:
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: proxy-public
port:
number: 8443
tls:
- hosts:
- vr-jupyter.drive.test.sunet.se
secretName: tls-secret
ingressClassName: nginx
rules:
- host: vr-jupyter.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: proxy-public
port:
number: 80

View file

@ -1,24 +0,0 @@
---
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
labels:
app: jupyterhub-node
name: jupyterhub-node
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: jupyterhub-node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kind: List
metadata:
resourceVersion: ""
selfLink: ""

View file

@ -1,16 +0,0 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources: [../../../base/]
helmCharts:
- includeCRDs: true
name: jupyterhub
releaseName: vr-jupyterhub
valuesFile: ./values/values.yaml
version: 4.0.0
namespace: vr-jupyterhub
helmGlobals:
chartHome: ../../../base/charts/
patches:
- path: jupyterhub-ingress.yml
- path: jupyterhub-service.yml

View file

@ -1,337 +0,0 @@
debug:
enabled: true
hub:
config:
Authenticator:
auto_login: true
enable_auth_state: true
JupyterHub:
tornado_settings:
headers: { 'Content-Security-Policy': "frame-ancestors *;" }
db:
pvc:
storageClassName: csi-sc-cinderplugin
extraConfig:
oauthCode: |
import time
import requests
from datetime import datetime
from oauthenticator.generic import GenericOAuthenticator
token_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/api/v1/token'
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def get_nextcloud_access_token(refresh_token):
client_id = os.environ['NEXTCLOUD_CLIENT_ID']
client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
code = refresh_token
data = {
'grant_type': 'refresh_token',
'code': code,
'refresh_token': refresh_token,
'client_id': client_id,
'client_secret': client_secret
}
response = requests.post(token_url, data=data)
if debug:
print(response.text)
return response.json()
def post_auth_hook(authenticator, handler, authentication):
user = authentication['auth_state']['oauth_user']['ocs']['data']['id']
auth_state = authentication['auth_state']
auth_state['token_expires'] = time.time() + auth_state['token_response']['expires_in']
authentication['auth_state'] = auth_state
return authentication
class NextcloudOAuthenticator(GenericOAuthenticator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_dict = {}
async def pre_spawn_start(self, user, spawner):
super().pre_spawn_start(user, spawner)
auth_state = await user.get_auth_state()
if not auth_state:
return
access_token = auth_state['access_token']
spawner.environment['NEXTCLOUD_ACCESS_TOKEN'] = access_token
async def refresh_user(self, user, handler=None):
auth_state = await user.get_auth_state()
if not auth_state:
if debug:
print(f'auth_state missing for {user}')
return False
access_token = auth_state['access_token']
refresh_token = auth_state['refresh_token']
token_response = auth_state['token_response']
now = time.time()
now_hr = datetime.fromtimestamp(now)
expires = auth_state['token_expires']
expires_hr = datetime.fromtimestamp(expires)
expires = 0
if debug:
print(f'auth_state for {user}: {auth_state}')
if now >= expires:
if debug:
print(f'Time is: {now_hr}, token expired: {expires_hr}')
print(f'Refreshing token for {user}')
try:
token_response = get_nextcloud_access_token(refresh_token)
auth_state['access_token'] = token_response['access_token']
auth_state['refresh_token'] = token_response['refresh_token']
auth_state['token_expires'] = now + token_response['expires_in']
auth_state['token_response'] = token_response
if debug:
print(f'Successfully refreshed token for {user.name}')
print(f'auth_state for {user.name}: {auth_state}')
return {'name': user.name, 'auth_state': auth_state}
except Exception as e:
if debug:
print(f'Failed to refresh token for {user}')
return False
return False
if debug:
print(f'Time is: {now_hr}, token expires: {expires_hr}')
return True
c.JupyterHub.authenticator_class = NextcloudOAuthenticator
c.NextcloudOAuthenticator.client_id = os.environ['NEXTCLOUD_CLIENT_ID']
c.NextcloudOAuthenticator.client_secret = os.environ['NEXTCLOUD_CLIENT_SECRET']
c.NextcloudOAuthenticator.login_service = 'Sunet Drive'
c.NextcloudOAuthenticator.username_claim = lambda r: r.get('ocs', {}).get('data', {}).get('id')
c.NextcloudOAuthenticator.userdata_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/ocs/v2.php/cloud/user?format=json'
c.NextcloudOAuthenticator.authorize_url = 'https://' + os.environ['NEXTCLOUD_HOST'] + '/index.php/apps/oauth2/authorize'
c.NextcloudOAuthenticator.token_url = token_url
c.NextcloudOAuthenticator.oauth_callback_url = 'https://' + os.environ['JUPYTER_HOST'] + '/hub/oauth_callback'
c.NextcloudOAuthenticator.allow_all = True
c.NextcloudOAuthenticator.refresh_pre_spawn = True
c.NextcloudOAuthenticator.enable_auth_state = True
c.NextcloudOAuthenticator.auth_refresh_age = 3600
c.NextcloudOAuthenticator.post_auth_hook = post_auth_hook
serviceCode: |
import sys
c.JupyterHub.load_roles = [
{
"name": "refresh-token",
"services": [
"refresh-token"
],
"scopes": [
"read:users",
"admin:auth_state"
]
},
{
"name": "user",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"self",
],
},
{
"name": "server",
"scopes": [
"access:services!service=refresh-token",
"read:services!service=refresh-token",
"inherit",
],
}
]
c.JupyterHub.services = [
{
'name': 'refresh-token',
'url': 'http://' + os.environ.get('HUB_SERVICE_HOST', 'hub') + ':' + os.environ.get('HUB_SERVICE_PORT_REFRESH_TOKEN', '8082'),
'display': False,
'oauth_no_confirm': True,
'api_token': os.environ['JUPYTERHUB_API_KEY'],
'command': [sys.executable, '/usr/local/etc/jupyterhub/refresh-token.py']
}
]
c.JupyterHub.admin_users = {"refresh-token"}
c.JupyterHub.api_tokens = {
os.environ['JUPYTERHUB_API_KEY']: "refresh-token",
}
extraFiles:
refresh-token.py:
mountPath: /usr/local/etc/jupyterhub/refresh-token.py
stringData: |
"""A token refresh service authenticating with the Hub.
This service serves `/services/refresh-token/`,
authenticated with the Hub,
showing the user their own info.
"""
import json
import os
import requests
import socket
from jupyterhub.services.auth import HubAuthenticated
from jupyterhub.utils import url_path_join
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.web import Application, HTTPError, RequestHandler, authenticated
from urllib.parse import urlparse
debug = os.environ.get('NEXTCLOUD_DEBUG_OAUTH', 'false').lower() in ['true', '1', 'yes']
def my_debug(s):
if debug:
with open("/proc/1/fd/1", "a") as stdout:
print(s, file=stdout)
class RefreshHandler(HubAuthenticated, RequestHandler):
def api_request(self, method, url, **kwargs):
my_debug(f'{self.hub_auth}')
url = url_path_join(self.hub_auth.api_url, url)
allow_404 = kwargs.pop('allow_404', False)
headers = kwargs.setdefault('headers', {})
headers.setdefault('Authorization', f'token {self.hub_auth.api_token}')
try:
r = requests.request(method, url, **kwargs)
except requests.ConnectionError as e:
my_debug(f'Error connecting to {url}: {e}')
msg = f'Failed to connect to Hub API at {url}.'
msg += f' Is the Hub accessible at this URL (from host: {socket.gethostname()})?'
if '127.0.0.1' in url:
msg += ' Make sure to set c.JupyterHub.hub_ip to an IP accessible to' + \
' single-user servers if the servers are not on the same host as the Hub.'
raise HTTPError(500, msg)
data = None
if r.status_code == 404 and allow_404:
pass
elif r.status_code == 403:
my_debug(
'Lacking permission to check authorization with JupyterHub,' +
f' my auth token may have expired: [{r.status_code}] {r.reason}'
)
my_debug(r.text)
raise HTTPError(
500,
'Permission failure checking authorization, I may need a new token'
)
elif r.status_code >= 500:
my_debug(f'Upstream failure verifying auth token: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(
502, 'Failed to check authorization (upstream problem)')
elif r.status_code >= 400:
my_debug(f'Failed to check authorization: [{r.status_code}] {r.reason}')
my_debug(r.text)
raise HTTPError(500, 'Failed to check authorization')
else:
data = r.json()
return data
@authenticated
def get(self):
user_model = self.get_current_user()
# Fetch current auth state
user_data = self.api_request('GET', url_path_join('users', user_model['name']))
auth_state = user_data['auth_state']
access_token = auth_state['access_token']
token_expires = auth_state['token_expires']
self.set_header('content-type', 'application/json')
self.write(json.dumps({'access_token': access_token, 'token_expires': token_expires}, indent=1, sort_keys=True))
class PingHandler(RequestHandler):
def get(self):
my_debug(f"DEBUG: In ping get")
self.set_header('content-type', 'application/json')
self.write(json.dumps({'ping': 1}))
def main():
app = Application([
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + 'tokens', RefreshHandler),
(os.environ['JUPYTERHUB_SERVICE_PREFIX'] + '/?', PingHandler),
])
http_server = HTTPServer(app)
url = urlparse(os.environ['JUPYTERHUB_SERVICE_URL'])
http_server.listen(url.port)
IOLoop.current().start()
if __name__ == '__main__':
main()
networkPolicy:
ingress:
- ports:
- port: 8082
from:
- podSelector:
matchLabels:
hub.jupyter.org/network-access-hub: "true"
service:
extraPorts:
- port: 8082
targetPort: 8082
name: refresh-token
extraEnv:
NEXTCLOUD_DEBUG_OAUTH: "no"
NEXTCLOUD_HOST: vr.drive.test.sunet.se
JUPYTER_HOST: vr-jupyter.drive.test.sunet.se
JUPYTERHUB_API_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: api-key
JUPYTERHUB_CRYPT_KEY:
valueFrom:
secretKeyRef:
name: jupyterhub-secrets
key: crypt-key
NEXTCLOUD_CLIENT_ID:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-id
NEXTCLOUD_CLIENT_SECRET:
valueFrom:
secretKeyRef:
name: nextcloud-oauth-secrets
key: client-secret
networkPolicy:
enabled: false
proxy:
chp:
networkPolicy:
egress:
- to:
- podSelector:
matchLabels:
app: jupyterhub
component: hub
ports:
- port: 8082
singleuser:
image:
name: docker.sunet.se/drive/jupyter-custom
tag: lab-4.0.10-sunet4
storage:
dynamic:
storageClass: csi-sc-cinderplugin
extraEnv:
JUPYTER_ENABLE_LAB: "yes"
JUPYTER_HOST: vr-jupyter.drive.test.sunet.se
NEXTCLOUD_HOST: vr.drive.test.sunet.se
extraFiles:
jupyter_notebook_config:
mountPath: /home/jovyan/.jupyter/jupyter_server_config.py
stringData: |
import os
c = get_config()
c.NotebookApp.allow_origin = '*'
c.NotebookApp.tornado_settings = {
'headers': { 'Content-Security-Policy': "frame-ancestors *;" }
}
os.system('/usr/local/bin/nc-sync')
mode: 0644

View file

@ -1,8 +0,0 @@
resources:
- portal-cert-manager.yml
- portal-deployment.yml
- portal-ingress.yml
- portal-namespace.yml
- portal-service.yml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

View file

@ -1,15 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: letsencrypt
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: drive@sunet.se
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View file

@ -1,30 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: portal-node
namespace: portal
creationTimestamp:
labels:
app: portal-node
spec:
replicas: 3
selector:
matchLabels:
app: portal-node
template:
metadata:
creationTimestamp:
labels:
app: portal-node
spec:
containers:
- name: portal
image: docker.sunet.se/drive/portal:0.1.2-1
imagePullPolicy: Always
resources: {}
env:
- name: DRIVE_DOMAIN
value: "drive.test.sunet.se"
strategy: {}
status: {}

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: portal-ingress
namespace: portal
annotations:
kubernetes.io/ingress.class: nginx
spec:
defaultBackend:
service:
name: portal-node
port:
number: 8080
tls:
- hosts:
- portal.drive.test.sunet.se
secretName: tls-secret
ingressClassName: nginx
rules:
- host: portal.drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: portal-node
port:
number: 8080

View file

@ -1,8 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: portal
spec:
finalizers:
- kubernetes

View file

@ -1,25 +0,0 @@
---
apiVersion: v1
items:
- apiVersion: v1
kind: Service
metadata:
labels:
app: portal-node
name: portal-node
namespace: portal
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: portal-node
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
kind: List
metadata:
resourceVersion: ""
selfLink: ""

View file

@ -1,6 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: portal-ingress.yml

View file

@ -1,30 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: portal-ingress
namespace: portal
annotations:
kubernetes.io/ingress.class: nginx
spec:
defaultBackend:
service:
name: portal-node
port:
number: 8080
ingressClassName: nginx
tls:
- hosts:
- portal.drive.sunet.se
secretName: tls-secret
rules:
- host: portal.drive.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: portal-node
port:
number: 8080

View file

@ -1,7 +0,0 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: portal-ingress.yml
- path: portal-deployment.yml

View file

@ -1,27 +0,0 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: portal-node
namespace: portal
creationTimestamp:
labels:
app: portal-node
spec:
replicas: 3
selector:
matchLabels:
app: portal-node
template:
metadata:
creationTimestamp:
labels:
app: portal-node
spec:
containers:
- name: portal
env:
- name: DRIVE_DOMAIN
value: "drive.test.sunet.se"
strategy: {}
status: {}

View file

@ -1,32 +0,0 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: portal-ingress
namespace: portal
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/issuer: "letsencrypt"
acme.cert-manager.io/http01-edit-in-place: "true"
spec:
defaultBackend:
service:
name: portal-node
port:
number: 8080
ingressClassName: nginx
tls:
- hosts:
- drive.test.sunet.se
secretName: tls-secret
rules:
- host: drive.test.sunet.se
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: portal-node
port:
number: 8080

Binary file not shown.

View file

@ -19,34 +19,35 @@ spec:
spec:
containers:
- name: doris
image: docker.sunet.se/rds/doris-rds:git-15de3c5b9
image: docker.sunet.se/rds/doris-rds:ci-RDS-Connectors-13
env:
- name: Logging__LogLevel__Default
value: Debug
- name: ScieboRds__ConnectorServiceName
value: layer1-port-doris
- name: ASPNETCORE_ENVIRONMENT
value: Development
- name: ASPNETCORE_URLS
value: http://+:80
- name: Domain
value: sunet.se
- name: ScieboRds__TokenStorageUrl
value: http://layer3-token-storage
- name: Doris__ApiKey
- name: S3__Url
value: dummy
- name: ManifestIndex__Url
value: https://snd-storage-metadata-index-test-snd-dev.apps.k8s.gu.se
- name: ManifestIndex__ApiKey
valueFrom:
secretKeyRef:
name: doris-gu-secrets
name: doris-api-key
key: "api-key"
- name: Doris__DorisApiEnabled
value: 'true'
- name: Doris__PrincipalDomain
value: gu.se
- name: Doris__ApiUrl
value: https://dev.snd.se/doris/api/rocrate
- name: NextCloud__BaseUrl
value: https://gu.drive.test.sunet.se
- name: NextCloud__User
value: _doris_datasets
- name: NextCloud__Password
- name: S3__AccessKey
valueFrom:
secretKeyRef:
name: doris-gu-secret
key: "nextcloudpw"
name: doris-s3-key
key: "s3-key"
- name: S3__SecretKey
valueFrom:
secretKeyRef:
name: doris-s3-secret
key: "s3-secret"
resources: {}
strategy: {}
status: {}

View file

@ -12,7 +12,7 @@ items:
ports:
- port: 80
protocol: TCP
targetPort: 8080
targetPort: 80
selector:
app: layer1-port-doris
sessionAffinity: None

View file

@ -1,11 +0,0 @@
---
apiVersion: v1
kind: Service
metadata:
name: gu-drive
namespace: helmrds
spec:
type: ExternalName
externalName: gu.drive.test.sunet.se
ports:
- port: 443

View file

@ -3,7 +3,6 @@ resources:
- doris-deployment.yml
- rds-ingress.yml
- sunet-service.yml
- gu-service.yml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

Some files were not shown because too many files have changed in this diff Show more