Jupytherhub:
Export 3.0.3 like so: ``` helm repo add jupyterhub https://hub.jupyter.org/helm-chart/ helm fetch jupyterhub/jupyterhub --version 3.0.3 --untar --untardir . ```
This commit is contained in:
parent
88928fc93f
commit
324724c3b1
31
jupyter/base/charts/jupyterhub/.helmignore
Normal file
31
jupyter/base/charts/jupyterhub/.helmignore
Normal file
|
@ -0,0 +1,31 @@
|
|||
# Anything within the root folder of the Helm chart, where Chart.yaml resides,
|
||||
# will be embedded into the packaged Helm chart. This is reasonable since only
|
||||
# when the templates render after the chart has been packaged and distributed,
|
||||
# will the templates logic evaluate that determines if other files were
|
||||
# referenced, such as our our files/hub/jupyterhub_config.py.
|
||||
#
|
||||
# Here are files that we intentionally ignore to avoid them being packaged,
|
||||
# because we don't want to reference them from our templates anyhow.
|
||||
values.schema.yaml
|
||||
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
41
jupyter/base/charts/jupyterhub/Chart.yaml
Normal file
41
jupyter/base/charts/jupyterhub/Chart.yaml
Normal file
|
@ -0,0 +1,41 @@
|
|||
annotations:
|
||||
artifacthub.io/images: |
|
||||
- image: jupyterhub/configurable-http-proxy:4.5.6
|
||||
name: configurable-http-proxy
|
||||
- image: jupyterhub/k8s-hub:3.0.3
|
||||
name: k8s-hub
|
||||
- image: jupyterhub/k8s-image-awaiter:3.0.3
|
||||
name: k8s-image-awaiter
|
||||
- image: jupyterhub/k8s-network-tools:3.0.3
|
||||
name: k8s-network-tools
|
||||
- image: jupyterhub/k8s-secret-sync:3.0.3
|
||||
name: k8s-secret-sync
|
||||
- image: jupyterhub/k8s-singleuser-sample:3.0.3
|
||||
name: k8s-singleuser-sample
|
||||
- image: registry.k8s.io/kube-scheduler:v1.26.8
|
||||
name: kube-scheduler
|
||||
- image: registry.k8s.io/pause:3.9
|
||||
name: pause
|
||||
- image: registry.k8s.io/pause:3.9
|
||||
name: pause
|
||||
- image: traefik:v2.10.4
|
||||
name: traefik
|
||||
apiVersion: v2
|
||||
appVersion: 4.0.2
|
||||
description: Multi-user Jupyter installation
|
||||
home: https://z2jh.jupyter.org
|
||||
icon: https://hub.jupyter.org/helm-chart/images/hublogo.svg
|
||||
keywords:
|
||||
- jupyter
|
||||
- jupyterhub
|
||||
- z2jh
|
||||
kubeVersion: '>=1.23.0-0'
|
||||
maintainers:
|
||||
- email: erik@sundellopensource.se
|
||||
name: Erik Sundell
|
||||
- name: Simon Li
|
||||
url: https://github.com/manics/
|
||||
name: jupyterhub
|
||||
sources:
|
||||
- https://github.com/jupyterhub/zero-to-jupyterhub-k8s
|
||||
version: 3.0.3
|
18
jupyter/base/charts/jupyterhub/README.md
Normal file
18
jupyter/base/charts/jupyterhub/README.md
Normal file
|
@ -0,0 +1,18 @@
|
|||
# JupyterHub Helm chart
|
||||
|
||||
[![Documentation](https://img.shields.io/badge/Documentation-z2jh.jupyter.org-blue?logo=read-the-docs&logoColor=white)](https://z2jh.jupyter.org)
|
||||
[![GitHub](https://img.shields.io/badge/Source_code-github-blue?logo=github&logoColor=white)](https://github.com/jupyterhub/zero-to-jupyterhub-k8s)
|
||||
[![Discourse](https://img.shields.io/badge/Help_forum-discourse-blue?logo=discourse&logoColor=white)](https://discourse.jupyter.org/c/jupyterhub/z2jh-k8s)
|
||||
[![Gitter](https://img.shields.io/badge/Social_chat-gitter-blue?logo=gitter&logoColor=white)](https://gitter.im/jupyterhub/jupyterhub)
|
||||
<br>
|
||||
[![Latest stable release of the Helm chart](https://img.shields.io/badge/dynamic/json.svg?label=Latest%20stable%20release&url=https://hub.jupyter.org/helm-chart/info.json&query=$.jupyterhub.stable&logo=helm&logoColor=white)](https://jupyterhub.github.io/helm-chart#jupyterhub)
|
||||
[![Latest pre-release of the Helm chart](https://img.shields.io/badge/dynamic/json.svg?label=Latest%20pre-release&url=https://hub.jupyter.org/helm-chart/info.json&query=$.jupyterhub.pre&logo=helm&logoColor=white)](https://jupyterhub.github.io/helm-chart#development-releases-jupyterhub)
|
||||
[![Latest development release of the Helm chart](https://img.shields.io/badge/dynamic/json.svg?label=Latest%20dev%20release&url=https://hub.jupyter.org/helm-chart/info.json&query=$.jupyterhub.latest&logo=helm&logoColor=white)](https://jupyterhub.github.io/helm-chart#development-releases-jupyterhub)
|
||||
|
||||
The JupyterHub Helm chart is accompanied with an installation guide at [z2jh.jupyter.org](https://z2jh.jupyter.org). Together they enable you to deploy [JupyterHub](https://jupyterhub.readthedocs.io) in a Kubernetes cluster that can make Jupyter environments available to several thousands of simultaneous users.
|
||||
|
||||
## History
|
||||
|
||||
Much of the initial groundwork for this documentation is information learned from the successful use of JupyterHub and Kubernetes at UC Berkeley in their [Data 8](http://data8.org/) program.
|
||||
|
||||
![](https://raw.githubusercontent.com/jupyterhub/zero-to-jupyterhub-k8s/HEAD/docs/source/_static/images/data8_massive_audience.jpg)
|
497
jupyter/base/charts/jupyterhub/files/hub/jupyterhub_config.py
Normal file
497
jupyter/base/charts/jupyterhub/files/hub/jupyterhub_config.py
Normal file
|
@ -0,0 +1,497 @@
|
|||
# load the config object (satisfies linters)
|
||||
c = get_config() # noqa
|
||||
|
||||
import glob
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
from jupyterhub.utils import url_path_join
|
||||
from kubernetes_asyncio import client
|
||||
from tornado.httpclient import AsyncHTTPClient
|
||||
|
||||
# Make sure that modules placed in the same directory as the jupyterhub config are added to the pythonpath
|
||||
configuration_directory = os.path.dirname(os.path.realpath(__file__))
|
||||
sys.path.insert(0, configuration_directory)
|
||||
|
||||
from z2jh import (
|
||||
get_config,
|
||||
get_name,
|
||||
get_name_env,
|
||||
get_secret_value,
|
||||
set_config_if_not_none,
|
||||
)
|
||||
|
||||
|
||||
def camelCaseify(s):
|
||||
"""convert snake_case to camelCase
|
||||
|
||||
For the common case where some_value is set from someValue
|
||||
so we don't have to specify the name twice.
|
||||
"""
|
||||
return re.sub(r"_([a-z])", lambda m: m.group(1).upper(), s)
|
||||
|
||||
|
||||
# Configure JupyterHub to use the curl backend for making HTTP requests,
|
||||
# rather than the pure-python implementations. The default one starts
|
||||
# being too slow to make a large number of requests to the proxy API
|
||||
# at the rate required.
|
||||
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
|
||||
|
||||
c.JupyterHub.spawner_class = "kubespawner.KubeSpawner"
|
||||
|
||||
# Connect to a proxy running in a different pod. Note that *_SERVICE_*
|
||||
# environment variables are set by Kubernetes for Services
|
||||
c.ConfigurableHTTPProxy.api_url = (
|
||||
f'http://{get_name("proxy-api")}:{get_name_env("proxy-api", "_SERVICE_PORT")}'
|
||||
)
|
||||
c.ConfigurableHTTPProxy.should_start = False
|
||||
|
||||
# Do not shut down user pods when hub is restarted
|
||||
c.JupyterHub.cleanup_servers = False
|
||||
|
||||
# Check that the proxy has routes appropriately setup
|
||||
c.JupyterHub.last_activity_interval = 60
|
||||
|
||||
# Don't wait at all before redirecting a spawning user to the progress page
|
||||
c.JupyterHub.tornado_settings = {
|
||||
"slow_spawn_timeout": 0,
|
||||
}
|
||||
|
||||
|
||||
# configure the hub db connection
|
||||
db_type = get_config("hub.db.type")
|
||||
if db_type == "sqlite-pvc":
|
||||
c.JupyterHub.db_url = "sqlite:///jupyterhub.sqlite"
|
||||
elif db_type == "sqlite-memory":
|
||||
c.JupyterHub.db_url = "sqlite://"
|
||||
else:
|
||||
set_config_if_not_none(c.JupyterHub, "db_url", "hub.db.url")
|
||||
db_password = get_secret_value("hub.db.password", None)
|
||||
if db_password is not None:
|
||||
if db_type == "mysql":
|
||||
os.environ["MYSQL_PWD"] = db_password
|
||||
elif db_type == "postgres":
|
||||
os.environ["PGPASSWORD"] = db_password
|
||||
else:
|
||||
print(f"Warning: hub.db.password is ignored for hub.db.type={db_type}")
|
||||
|
||||
|
||||
# c.JupyterHub configuration from Helm chart's configmap
|
||||
for trait, cfg_key in (
|
||||
("concurrent_spawn_limit", None),
|
||||
("active_server_limit", None),
|
||||
("base_url", None),
|
||||
("allow_named_servers", None),
|
||||
("named_server_limit_per_user", None),
|
||||
("authenticate_prometheus", None),
|
||||
("redirect_to_server", None),
|
||||
("shutdown_on_logout", None),
|
||||
("template_paths", None),
|
||||
("template_vars", None),
|
||||
):
|
||||
if cfg_key is None:
|
||||
cfg_key = camelCaseify(trait)
|
||||
set_config_if_not_none(c.JupyterHub, trait, "hub." + cfg_key)
|
||||
|
||||
# hub_bind_url configures what the JupyterHub process within the hub pod's
|
||||
# container should listen to.
|
||||
hub_container_port = 8081
|
||||
c.JupyterHub.hub_bind_url = f"http://:{hub_container_port}"
|
||||
|
||||
# hub_connect_url is the URL for connecting to the hub for use by external
|
||||
# JupyterHub services such as the proxy. Note that *_SERVICE_* environment
|
||||
# variables are set by Kubernetes for Services.
|
||||
c.JupyterHub.hub_connect_url = (
|
||||
f'http://{get_name("hub")}:{get_name_env("hub", "_SERVICE_PORT")}'
|
||||
)
|
||||
|
||||
# implement common labels
|
||||
# this duplicates the jupyterhub.commonLabels helper
|
||||
common_labels = c.KubeSpawner.common_labels = {}
|
||||
common_labels["app"] = get_config(
|
||||
"nameOverride",
|
||||
default=get_config("Chart.Name", "jupyterhub"),
|
||||
)
|
||||
common_labels["heritage"] = "jupyterhub"
|
||||
chart_name = get_config("Chart.Name")
|
||||
chart_version = get_config("Chart.Version")
|
||||
if chart_name and chart_version:
|
||||
common_labels["chart"] = "{}-{}".format(
|
||||
chart_name,
|
||||
chart_version.replace("+", "_"),
|
||||
)
|
||||
release = get_config("Release.Name")
|
||||
if release:
|
||||
common_labels["release"] = release
|
||||
|
||||
c.KubeSpawner.namespace = os.environ.get("POD_NAMESPACE", "default")
|
||||
|
||||
# Max number of consecutive failures before the Hub restarts itself
|
||||
# requires jupyterhub 0.9.2
|
||||
set_config_if_not_none(
|
||||
c.Spawner,
|
||||
"consecutive_failure_limit",
|
||||
"hub.consecutiveFailureLimit",
|
||||
)
|
||||
|
||||
for trait, cfg_key in (
|
||||
("pod_name_template", None),
|
||||
("start_timeout", None),
|
||||
("image_pull_policy", "image.pullPolicy"),
|
||||
# ('image_pull_secrets', 'image.pullSecrets'), # Managed manually below
|
||||
("events_enabled", "events"),
|
||||
("extra_labels", None),
|
||||
("extra_annotations", None),
|
||||
# ("allow_privilege_escalation", None), # Managed manually below
|
||||
("uid", None),
|
||||
("fs_gid", None),
|
||||
("service_account", "serviceAccountName"),
|
||||
("storage_extra_labels", "storage.extraLabels"),
|
||||
# ("tolerations", "extraTolerations"), # Managed manually below
|
||||
("node_selector", None),
|
||||
("node_affinity_required", "extraNodeAffinity.required"),
|
||||
("node_affinity_preferred", "extraNodeAffinity.preferred"),
|
||||
("pod_affinity_required", "extraPodAffinity.required"),
|
||||
("pod_affinity_preferred", "extraPodAffinity.preferred"),
|
||||
("pod_anti_affinity_required", "extraPodAntiAffinity.required"),
|
||||
("pod_anti_affinity_preferred", "extraPodAntiAffinity.preferred"),
|
||||
("lifecycle_hooks", None),
|
||||
("init_containers", None),
|
||||
("extra_containers", None),
|
||||
("mem_limit", "memory.limit"),
|
||||
("mem_guarantee", "memory.guarantee"),
|
||||
("cpu_limit", "cpu.limit"),
|
||||
("cpu_guarantee", "cpu.guarantee"),
|
||||
("extra_resource_limits", "extraResource.limits"),
|
||||
("extra_resource_guarantees", "extraResource.guarantees"),
|
||||
("environment", "extraEnv"),
|
||||
("profile_list", None),
|
||||
("extra_pod_config", None),
|
||||
):
|
||||
if cfg_key is None:
|
||||
cfg_key = camelCaseify(trait)
|
||||
set_config_if_not_none(c.KubeSpawner, trait, "singleuser." + cfg_key)
|
||||
|
||||
image = get_config("singleuser.image.name")
|
||||
if image:
|
||||
tag = get_config("singleuser.image.tag")
|
||||
if tag:
|
||||
image = f"{image}:{tag}"
|
||||
|
||||
c.KubeSpawner.image = image
|
||||
|
||||
# allow_privilege_escalation defaults to False in KubeSpawner 2+. Since its a
|
||||
# property where None, False, and True all are valid values that users of the
|
||||
# Helm chart may want to set, we can't use the set_config_if_not_none helper
|
||||
# function as someone may want to override the default False value to None.
|
||||
#
|
||||
c.KubeSpawner.allow_privilege_escalation = get_config(
|
||||
"singleuser.allowPrivilegeEscalation"
|
||||
)
|
||||
|
||||
# Combine imagePullSecret.create (single), imagePullSecrets (list), and
|
||||
# singleuser.image.pullSecrets (list).
|
||||
image_pull_secrets = []
|
||||
if get_config("imagePullSecret.automaticReferenceInjection") and get_config(
|
||||
"imagePullSecret.create"
|
||||
):
|
||||
image_pull_secrets.append(get_name("image-pull-secret"))
|
||||
if get_config("imagePullSecrets"):
|
||||
image_pull_secrets.extend(get_config("imagePullSecrets"))
|
||||
if get_config("singleuser.image.pullSecrets"):
|
||||
image_pull_secrets.extend(get_config("singleuser.image.pullSecrets"))
|
||||
if image_pull_secrets:
|
||||
c.KubeSpawner.image_pull_secrets = image_pull_secrets
|
||||
|
||||
# scheduling:
|
||||
if get_config("scheduling.userScheduler.enabled"):
|
||||
c.KubeSpawner.scheduler_name = get_name("user-scheduler")
|
||||
if get_config("scheduling.podPriority.enabled"):
|
||||
c.KubeSpawner.priority_class_name = get_name("priority")
|
||||
|
||||
# add node-purpose affinity
|
||||
match_node_purpose = get_config("scheduling.userPods.nodeAffinity.matchNodePurpose")
|
||||
if match_node_purpose:
|
||||
node_selector = dict(
|
||||
matchExpressions=[
|
||||
dict(
|
||||
key="hub.jupyter.org/node-purpose",
|
||||
operator="In",
|
||||
values=["user"],
|
||||
)
|
||||
],
|
||||
)
|
||||
if match_node_purpose == "prefer":
|
||||
c.KubeSpawner.node_affinity_preferred.append(
|
||||
dict(
|
||||
weight=100,
|
||||
preference=node_selector,
|
||||
),
|
||||
)
|
||||
elif match_node_purpose == "require":
|
||||
c.KubeSpawner.node_affinity_required.append(node_selector)
|
||||
elif match_node_purpose == "ignore":
|
||||
pass
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unrecognized value for matchNodePurpose: {match_node_purpose}"
|
||||
)
|
||||
|
||||
# Combine the common tolerations for user pods with singleuser tolerations
|
||||
scheduling_user_pods_tolerations = get_config("scheduling.userPods.tolerations", [])
|
||||
singleuser_extra_tolerations = get_config("singleuser.extraTolerations", [])
|
||||
tolerations = scheduling_user_pods_tolerations + singleuser_extra_tolerations
|
||||
if tolerations:
|
||||
c.KubeSpawner.tolerations = tolerations
|
||||
|
||||
# Configure dynamically provisioning pvc
|
||||
storage_type = get_config("singleuser.storage.type")
|
||||
if storage_type == "dynamic":
|
||||
pvc_name_template = get_config("singleuser.storage.dynamic.pvcNameTemplate")
|
||||
c.KubeSpawner.pvc_name_template = pvc_name_template
|
||||
volume_name_template = get_config("singleuser.storage.dynamic.volumeNameTemplate")
|
||||
c.KubeSpawner.storage_pvc_ensure = True
|
||||
set_config_if_not_none(
|
||||
c.KubeSpawner, "storage_class", "singleuser.storage.dynamic.storageClass"
|
||||
)
|
||||
set_config_if_not_none(
|
||||
c.KubeSpawner,
|
||||
"storage_access_modes",
|
||||
"singleuser.storage.dynamic.storageAccessModes",
|
||||
)
|
||||
set_config_if_not_none(
|
||||
c.KubeSpawner, "storage_capacity", "singleuser.storage.capacity"
|
||||
)
|
||||
|
||||
# Add volumes to singleuser pods
|
||||
c.KubeSpawner.volumes = [
|
||||
{
|
||||
"name": volume_name_template,
|
||||
"persistentVolumeClaim": {"claimName": pvc_name_template},
|
||||
}
|
||||
]
|
||||
c.KubeSpawner.volume_mounts = [
|
||||
{
|
||||
"mountPath": get_config("singleuser.storage.homeMountPath"),
|
||||
"name": volume_name_template,
|
||||
}
|
||||
]
|
||||
elif storage_type == "static":
|
||||
pvc_claim_name = get_config("singleuser.storage.static.pvcName")
|
||||
c.KubeSpawner.volumes = [
|
||||
{"name": "home", "persistentVolumeClaim": {"claimName": pvc_claim_name}}
|
||||
]
|
||||
|
||||
c.KubeSpawner.volume_mounts = [
|
||||
{
|
||||
"mountPath": get_config("singleuser.storage.homeMountPath"),
|
||||
"name": "home",
|
||||
"subPath": get_config("singleuser.storage.static.subPath"),
|
||||
}
|
||||
]
|
||||
|
||||
# Inject singleuser.extraFiles as volumes and volumeMounts with data loaded from
|
||||
# the dedicated k8s Secret prepared to hold the extraFiles actual content.
|
||||
extra_files = get_config("singleuser.extraFiles", {})
|
||||
if extra_files:
|
||||
volume = {
|
||||
"name": "files",
|
||||
}
|
||||
items = []
|
||||
for file_key, file_details in extra_files.items():
|
||||
# Each item is a mapping of a key in the k8s Secret to a path in this
|
||||
# abstract volume, the goal is to enable us to set the mode /
|
||||
# permissions only though so we don't change the mapping.
|
||||
item = {
|
||||
"key": file_key,
|
||||
"path": file_key,
|
||||
}
|
||||
if "mode" in file_details:
|
||||
item["mode"] = file_details["mode"]
|
||||
items.append(item)
|
||||
volume["secret"] = {
|
||||
"secretName": get_name("singleuser"),
|
||||
"items": items,
|
||||
}
|
||||
c.KubeSpawner.volumes.append(volume)
|
||||
|
||||
volume_mounts = []
|
||||
for file_key, file_details in extra_files.items():
|
||||
volume_mounts.append(
|
||||
{
|
||||
"mountPath": file_details["mountPath"],
|
||||
"subPath": file_key,
|
||||
"name": "files",
|
||||
}
|
||||
)
|
||||
c.KubeSpawner.volume_mounts.extend(volume_mounts)
|
||||
|
||||
# Inject extraVolumes / extraVolumeMounts
|
||||
c.KubeSpawner.volumes.extend(get_config("singleuser.storage.extraVolumes", []))
|
||||
c.KubeSpawner.volume_mounts.extend(
|
||||
get_config("singleuser.storage.extraVolumeMounts", [])
|
||||
)
|
||||
|
||||
c.JupyterHub.services = []
|
||||
c.JupyterHub.load_roles = []
|
||||
|
||||
# jupyterhub-idle-culler's permissions are scoped to what it needs only, see
|
||||
# https://github.com/jupyterhub/jupyterhub-idle-culler#permissions.
|
||||
#
|
||||
if get_config("cull.enabled", False):
|
||||
jupyterhub_idle_culler_role = {
|
||||
"name": "jupyterhub-idle-culler",
|
||||
"scopes": [
|
||||
"list:users",
|
||||
"read:users:activity",
|
||||
"read:servers",
|
||||
"delete:servers",
|
||||
# "admin:users", # dynamically added if --cull-users is passed
|
||||
],
|
||||
# assign the role to a jupyterhub service, so it gains these permissions
|
||||
"services": ["jupyterhub-idle-culler"],
|
||||
}
|
||||
|
||||
cull_cmd = ["python3", "-m", "jupyterhub_idle_culler"]
|
||||
base_url = c.JupyterHub.get("base_url", "/")
|
||||
cull_cmd.append("--url=http://localhost:8081" + url_path_join(base_url, "hub/api"))
|
||||
|
||||
cull_timeout = get_config("cull.timeout")
|
||||
if cull_timeout:
|
||||
cull_cmd.append(f"--timeout={cull_timeout}")
|
||||
|
||||
cull_every = get_config("cull.every")
|
||||
if cull_every:
|
||||
cull_cmd.append(f"--cull-every={cull_every}")
|
||||
|
||||
cull_concurrency = get_config("cull.concurrency")
|
||||
if cull_concurrency:
|
||||
cull_cmd.append(f"--concurrency={cull_concurrency}")
|
||||
|
||||
if get_config("cull.users"):
|
||||
cull_cmd.append("--cull-users")
|
||||
jupyterhub_idle_culler_role["scopes"].append("admin:users")
|
||||
|
||||
if not get_config("cull.adminUsers"):
|
||||
cull_cmd.append("--cull-admin-users=false")
|
||||
|
||||
if get_config("cull.removeNamedServers"):
|
||||
cull_cmd.append("--remove-named-servers")
|
||||
|
||||
cull_max_age = get_config("cull.maxAge")
|
||||
if cull_max_age:
|
||||
cull_cmd.append(f"--max-age={cull_max_age}")
|
||||
|
||||
c.JupyterHub.services.append(
|
||||
{
|
||||
"name": "jupyterhub-idle-culler",
|
||||
"command": cull_cmd,
|
||||
}
|
||||
)
|
||||
c.JupyterHub.load_roles.append(jupyterhub_idle_culler_role)
|
||||
|
||||
for key, service in get_config("hub.services", {}).items():
|
||||
# c.JupyterHub.services is a list of dicts, but
|
||||
# hub.services is a dict of dicts to make the config mergable
|
||||
service.setdefault("name", key)
|
||||
|
||||
# As the api_token could be exposed in hub.existingSecret, we need to read
|
||||
# it it from there or fall back to the chart managed k8s Secret's value.
|
||||
service.pop("apiToken", None)
|
||||
service["api_token"] = get_secret_value(f"hub.services.{key}.apiToken")
|
||||
|
||||
c.JupyterHub.services.append(service)
|
||||
|
||||
for key, role in get_config("hub.loadRoles", {}).items():
|
||||
# c.JupyterHub.load_roles is a list of dicts, but
|
||||
# hub.loadRoles is a dict of dicts to make the config mergable
|
||||
role.setdefault("name", key)
|
||||
|
||||
c.JupyterHub.load_roles.append(role)
|
||||
|
||||
# respect explicit null command (distinct from unspecified)
|
||||
# this avoids relying on KubeSpawner.cmd's default being None
|
||||
_unspecified = object()
|
||||
specified_cmd = get_config("singleuser.cmd", _unspecified)
|
||||
if specified_cmd is not _unspecified:
|
||||
c.Spawner.cmd = specified_cmd
|
||||
|
||||
set_config_if_not_none(c.Spawner, "default_url", "singleuser.defaultUrl")
|
||||
|
||||
cloud_metadata = get_config("singleuser.cloudMetadata")
|
||||
|
||||
if cloud_metadata.get("blockWithIptables") == True:
|
||||
# Use iptables to block access to cloud metadata by default
|
||||
network_tools_image_name = get_config("singleuser.networkTools.image.name")
|
||||
network_tools_image_tag = get_config("singleuser.networkTools.image.tag")
|
||||
network_tools_resources = get_config("singleuser.networkTools.resources")
|
||||
ip = cloud_metadata["ip"]
|
||||
ip_block_container = client.V1Container(
|
||||
name="block-cloud-metadata",
|
||||
image=f"{network_tools_image_name}:{network_tools_image_tag}",
|
||||
command=[
|
||||
"iptables",
|
||||
"--append",
|
||||
"OUTPUT",
|
||||
"--protocol",
|
||||
"tcp",
|
||||
"--destination",
|
||||
ip,
|
||||
"--destination-port",
|
||||
"80",
|
||||
"--jump",
|
||||
"DROP",
|
||||
],
|
||||
security_context=client.V1SecurityContext(
|
||||
privileged=True,
|
||||
run_as_user=0,
|
||||
capabilities=client.V1Capabilities(add=["NET_ADMIN"]),
|
||||
),
|
||||
resources=network_tools_resources,
|
||||
)
|
||||
|
||||
c.KubeSpawner.init_containers.append(ip_block_container)
|
||||
|
||||
|
||||
if get_config("debug.enabled", False):
|
||||
c.JupyterHub.log_level = "DEBUG"
|
||||
c.Spawner.debug = True
|
||||
|
||||
# load potentially seeded secrets
|
||||
#
|
||||
# NOTE: ConfigurableHTTPProxy.auth_token is set through an environment variable
|
||||
# that is set using the chart managed secret.
|
||||
c.JupyterHub.cookie_secret = get_secret_value("hub.config.JupyterHub.cookie_secret")
|
||||
# NOTE: CryptKeeper.keys should be a list of strings, but we have encoded as a
|
||||
# single string joined with ; in the k8s Secret.
|
||||
#
|
||||
c.CryptKeeper.keys = get_secret_value("hub.config.CryptKeeper.keys").split(";")
|
||||
|
||||
# load hub.config values, except potentially seeded secrets already loaded
|
||||
for app, cfg in get_config("hub.config", {}).items():
|
||||
if app == "JupyterHub":
|
||||
cfg.pop("proxy_auth_token", None)
|
||||
cfg.pop("cookie_secret", None)
|
||||
cfg.pop("services", None)
|
||||
elif app == "ConfigurableHTTPProxy":
|
||||
cfg.pop("auth_token", None)
|
||||
elif app == "CryptKeeper":
|
||||
cfg.pop("keys", None)
|
||||
c[app].update(cfg)
|
||||
|
||||
# load /usr/local/etc/jupyterhub/jupyterhub_config.d config files
|
||||
config_dir = "/usr/local/etc/jupyterhub/jupyterhub_config.d"
|
||||
if os.path.isdir(config_dir):
|
||||
for file_path in sorted(glob.glob(f"{config_dir}/*.py")):
|
||||
file_name = os.path.basename(file_path)
|
||||
print(f"Loading {config_dir} config: {file_name}")
|
||||
with open(file_path) as f:
|
||||
file_content = f.read()
|
||||
# compiling makes debugging easier: https://stackoverflow.com/a/437857
|
||||
exec(compile(source=file_content, filename=file_name, mode="exec"))
|
||||
|
||||
# execute hub.extraConfig entries
|
||||
for key, config_py in sorted(get_config("hub.extraConfig", {}).items()):
|
||||
print(f"Loading extra config: {key}")
|
||||
exec(config_py)
|
121
jupyter/base/charts/jupyterhub/files/hub/z2jh.py
Normal file
121
jupyter/base/charts/jupyterhub/files/hub/z2jh.py
Normal file
|
@ -0,0 +1,121 @@
|
|||
"""
|
||||
Utility methods for use in jupyterhub_config.py and dynamic subconfigs.
|
||||
|
||||
Methods here can be imported by extraConfig in values.yaml
|
||||
"""
|
||||
import os
|
||||
from collections.abc import Mapping
|
||||
from functools import lru_cache
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
# memoize so we only load config once
|
||||
@lru_cache
|
||||
def _load_config():
|
||||
"""Load the Helm chart configuration used to render the Helm templates of
|
||||
the chart from a mounted k8s Secret, and merge in values from an optionally
|
||||
mounted secret (hub.existingSecret)."""
|
||||
|
||||
cfg = {}
|
||||
for source in ("secret/values.yaml", "existing-secret/values.yaml"):
|
||||
path = f"/usr/local/etc/jupyterhub/{source}"
|
||||
if os.path.exists(path):
|
||||
print(f"Loading {path}")
|
||||
with open(path) as f:
|
||||
values = yaml.safe_load(f)
|
||||
cfg = _merge_dictionaries(cfg, values)
|
||||
else:
|
||||
print(f"No config at {path}")
|
||||
return cfg
|
||||
|
||||
|
||||
@lru_cache
|
||||
def _get_config_value(key):
|
||||
"""Load value from the k8s ConfigMap given a key."""
|
||||
|
||||
path = f"/usr/local/etc/jupyterhub/config/{key}"
|
||||
if os.path.exists(path):
|
||||
with open(path) as f:
|
||||
return f.read()
|
||||
else:
|
||||
raise Exception(f"{path} not found!")
|
||||
|
||||
|
||||
@lru_cache
|
||||
def get_secret_value(key, default="never-explicitly-set"):
|
||||
"""Load value from the user managed k8s Secret or the default k8s Secret
|
||||
given a key."""
|
||||
|
||||
for source in ("existing-secret", "secret"):
|
||||
path = f"/usr/local/etc/jupyterhub/{source}/{key}"
|
||||
if os.path.exists(path):
|
||||
with open(path) as f:
|
||||
return f.read()
|
||||
if default != "never-explicitly-set":
|
||||
return default
|
||||
raise Exception(f"{key} not found in either k8s Secret!")
|
||||
|
||||
|
||||
def get_name(name):
|
||||
"""Returns the fullname of a resource given its short name"""
|
||||
return _get_config_value(name)
|
||||
|
||||
|
||||
def get_name_env(name, suffix=""):
|
||||
"""Returns the fullname of a resource given its short name along with a
|
||||
suffix, converted to uppercase with dashes replaced with underscores. This
|
||||
is useful to reference named services associated environment variables, such
|
||||
as PROXY_PUBLIC_SERVICE_PORT."""
|
||||
env_key = _get_config_value(name) + suffix
|
||||
env_key = env_key.upper().replace("-", "_")
|
||||
return os.environ[env_key]
|
||||
|
||||
|
||||
def _merge_dictionaries(a, b):
|
||||
"""Merge two dictionaries recursively.
|
||||
|
||||
Simplified From https://stackoverflow.com/a/7205107
|
||||
"""
|
||||
merged = a.copy()
|
||||
for key in b:
|
||||
if key in a:
|
||||
if isinstance(a[key], Mapping) and isinstance(b[key], Mapping):
|
||||
merged[key] = _merge_dictionaries(a[key], b[key])
|
||||
else:
|
||||
merged[key] = b[key]
|
||||
else:
|
||||
merged[key] = b[key]
|
||||
return merged
|
||||
|
||||
|
||||
def get_config(key, default=None):
|
||||
"""
|
||||
Find a config item of a given name & return it
|
||||
|
||||
Parses everything as YAML, so lists and dicts are available too
|
||||
|
||||
get_config("a.b.c") returns config['a']['b']['c']
|
||||
"""
|
||||
value = _load_config()
|
||||
# resolve path in yaml
|
||||
for level in key.split("."):
|
||||
if not isinstance(value, dict):
|
||||
# a parent is a scalar or null,
|
||||
# can't resolve full path
|
||||
return default
|
||||
if level not in value:
|
||||
return default
|
||||
else:
|
||||
value = value[level]
|
||||
return value
|
||||
|
||||
|
||||
def set_config_if_not_none(cparent, name, key):
|
||||
"""
|
||||
Find a config item of a given name, set the corresponding Jupyter
|
||||
configuration item if not None
|
||||
"""
|
||||
data = get_config(key)
|
||||
if data is not None:
|
||||
setattr(cparent, name, data)
|
158
jupyter/base/charts/jupyterhub/templates/NOTES.txt
Normal file
158
jupyter/base/charts/jupyterhub/templates/NOTES.txt
Normal file
|
@ -0,0 +1,158 @@
|
|||
{{- $proxy_service := include "jupyterhub.proxy-public.fullname" . -}}
|
||||
|
||||
{{- /* Generated with https://patorjk.com/software/taag/#p=display&h=0&f=Slant&t=JupyterHub */}}
|
||||
. __ __ __ __ __
|
||||
/ / __ __ ____ __ __ / /_ ___ _____ / / / / __ __ / /_
|
||||
__ / / / / / / / __ \ / / / / / __/ / _ \ / ___/ / /_/ / / / / / / __ \
|
||||
/ /_/ / / /_/ / / /_/ / / /_/ / / /_ / __/ / / / __ / / /_/ / / /_/ /
|
||||
\____/ \__,_/ / .___/ \__, / \__/ \___/ /_/ /_/ /_/ \__,_/ /_.___/
|
||||
/_/ /____/
|
||||
|
||||
You have successfully installed the official JupyterHub Helm chart!
|
||||
|
||||
### Installation info
|
||||
|
||||
- Kubernetes namespace: {{ .Release.Namespace }}
|
||||
- Helm release name: {{ .Release.Name }}
|
||||
- Helm chart version: {{ .Chart.Version }}
|
||||
- JupyterHub version: {{ .Chart.AppVersion }}
|
||||
- Hub pod packages: See https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/{{ include "jupyterhub.chart-version-to-git-ref" .Chart.Version }}/images/hub/requirements.txt
|
||||
|
||||
### Followup links
|
||||
|
||||
- Documentation: https://z2jh.jupyter.org
|
||||
- Help forum: https://discourse.jupyter.org
|
||||
- Social chat: https://gitter.im/jupyterhub/jupyterhub
|
||||
- Issue tracking: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues
|
||||
|
||||
### Post-installation checklist
|
||||
|
||||
- Verify that created Pods enter a Running state:
|
||||
|
||||
kubectl --namespace={{ .Release.Namespace }} get pod
|
||||
|
||||
If a pod is stuck with a Pending or ContainerCreating status, diagnose with:
|
||||
|
||||
kubectl --namespace={{ .Release.Namespace }} describe pod <name of pod>
|
||||
|
||||
If a pod keeps restarting, diagnose with:
|
||||
|
||||
kubectl --namespace={{ .Release.Namespace }} logs --previous <name of pod>
|
||||
{{- println }}
|
||||
|
||||
{{- if eq .Values.proxy.service.type "LoadBalancer" }}
|
||||
- Verify an external IP is provided for the k8s Service {{ $proxy_service }}.
|
||||
|
||||
kubectl --namespace={{ .Release.Namespace }} get service {{ $proxy_service }}
|
||||
|
||||
If the external ip remains <pending>, diagnose with:
|
||||
|
||||
kubectl --namespace={{ .Release.Namespace }} describe service {{ $proxy_service }}
|
||||
{{- end }}
|
||||
|
||||
- Verify web based access:
|
||||
{{- println }}
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
Try insecure HTTP access: http://{{ $host }}{{ $.Values.hub.baseUrl | trimSuffix "/" }}/
|
||||
{{- end }}
|
||||
|
||||
{{- range $tls := .Values.ingress.tls }}
|
||||
{{- range $host := $tls.hosts }}
|
||||
Try secure HTTPS access: https://{{ $host }}{{ $.Values.hub.baseUrl | trimSuffix "/" }}/
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
You have not configured a k8s Ingress resource so you need to access the k8s
|
||||
Service {{ $proxy_service }} directly.
|
||||
{{- println }}
|
||||
|
||||
{{- if eq .Values.proxy.service.type "NodePort" }}
|
||||
The k8s Service {{ $proxy_service }} is exposed via NodePorts. That means
|
||||
that all the k8s cluster's nodes are exposing the k8s Service via those
|
||||
ports.
|
||||
|
||||
Try insecure HTTP access: http://<any k8s nodes ip>:{{ .Values.proxy.service.nodePorts.http | default "no-http-nodeport-set"}}
|
||||
Try secure HTTPS access: https://<any k8s nodes address>:{{ .Values.proxy.service.nodePorts.https | default "no-https-nodeport-set" }}
|
||||
|
||||
{{- else }}
|
||||
If your computer is outside the k8s cluster, you can port-forward traffic to
|
||||
the k8s Service {{ $proxy_service }} with kubectl to access it from your
|
||||
computer.
|
||||
|
||||
kubectl --namespace={{ .Release.Namespace }} port-forward service/{{ $proxy_service }} 8080:http
|
||||
|
||||
Try insecure HTTP access: http://localhost:8080
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- println }}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
{{- /*
|
||||
Warnings for likely misconfigurations
|
||||
*/}}
|
||||
|
||||
{{- if and (not .Values.scheduling.podPriority.enabled) (and .Values.scheduling.userPlaceholder.enabled .Values.scheduling.userPlaceholder.replicas) }}
|
||||
#################################################################################
|
||||
###### WARNING: You are using user placeholders without pod priority #####
|
||||
###### enabled*, either enable pod priority or stop using the #####
|
||||
###### user placeholders** to avoid having placeholders that #####
|
||||
###### refuse to make room for a real user. #####
|
||||
###### #####
|
||||
###### *scheduling.podPriority.enabled #####
|
||||
###### **scheduling.userPlaceholder.enabled #####
|
||||
###### **scheduling.userPlaceholder.replicas #####
|
||||
#################################################################################
|
||||
{{- println }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
{{- /*
|
||||
Breaking changes and failures for likely misconfigurations.
|
||||
*/}}
|
||||
|
||||
{{- $breaking := "" }}
|
||||
{{- $breaking_title := "\n" }}
|
||||
{{- $breaking_title = print $breaking_title "\n#################################################################################" }}
|
||||
{{- $breaking_title = print $breaking_title "\n###### BREAKING: The config values passed contained no longer accepted #####" }}
|
||||
{{- $breaking_title = print $breaking_title "\n###### options. See the messages below for more details. #####" }}
|
||||
{{- $breaking_title = print $breaking_title "\n###### #####" }}
|
||||
{{- $breaking_title = print $breaking_title "\n###### To verify your updated config is accepted, you can use #####" }}
|
||||
{{- $breaking_title = print $breaking_title "\n###### the `helm template` command. #####" }}
|
||||
{{- $breaking_title = print $breaking_title "\n#################################################################################" }}
|
||||
|
||||
|
||||
{{- /*
|
||||
This is an example (in a helm template comment) on how to detect and
|
||||
communicate with regards to a breaking chart config change.
|
||||
|
||||
{{- if hasKey .Values.singleuser.cloudMetadata "enabled" }}
|
||||
{{- $breaking = print $breaking "\n\nCHANGED: singleuser.cloudMetadata.enabled must as of 1.0.0 be configured using singleuser.cloudMetadata.blockWithIptables with the opposite value." }}
|
||||
{{- end }}
|
||||
*/}}
|
||||
|
||||
|
||||
{{- if hasKey .Values.rbac "enabled" }}
|
||||
{{- $breaking = print $breaking "\n\nCHANGED: rbac.enabled must as of version 2.0.0 be configured via rbac.create and <hub|proxy.traefik|scheduling.userScheduler|prePuller.hook>.serviceAccount.create." }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{- if hasKey .Values.hub "fsGid" }}
|
||||
{{- $breaking = print $breaking "\n\nCHANGED: hub.fsGid must as of version 2.0.0 be configured via hub.podSecurityContext.fsGroup." }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{- if and .Values.singleuser.cloudMetadata.blockWithIptables (and .Values.singleuser.networkPolicy.enabled .Values.singleuser.networkPolicy.egressAllowRules.cloudMetadataServer) }}
|
||||
{{- $breaking = print $breaking "\n\nCHANGED: singleuser.cloudMetadata.blockWithIptables must as of version 3.0.0 not be configured together with singleuser.networkPolicy.egressAllowRules.cloudMetadataServer as it leads to an ambiguous configuration." }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{- if $breaking }}
|
||||
{{- fail (print $breaking_title $breaking "\n\n") }}
|
||||
{{- end }}
|
308
jupyter/base/charts/jupyterhub/templates/_helpers-names.tpl
Normal file
308
jupyter/base/charts/jupyterhub/templates/_helpers-names.tpl
Normal file
|
@ -0,0 +1,308 @@
|
|||
{{- /*
|
||||
These helpers encapsulates logic on how we name resources. They also enable
|
||||
parent charts to reference these dynamic resource names.
|
||||
|
||||
To avoid duplicating documentation, for more information, please see the the
|
||||
fullnameOverride entry in values.schema.yaml or the configuration reference
|
||||
that values.schema.yaml renders to.
|
||||
|
||||
https://z2jh.jupyter.org/en/latest/resources/reference.html#fullnameOverride
|
||||
*/}}
|
||||
|
||||
|
||||
|
||||
{{- /*
|
||||
Utility templates
|
||||
*/}}
|
||||
|
||||
{{- /*
|
||||
Renders to a prefix for the chart's resource names. This prefix is assumed to
|
||||
make the resource name cluster unique.
|
||||
*/}}
|
||||
{{- define "jupyterhub.fullname" -}}
|
||||
{{- /*
|
||||
We have implemented a trick to allow a parent chart depending on this
|
||||
chart to call these named templates.
|
||||
|
||||
Caveats and notes:
|
||||
|
||||
1. While parent charts can reference these, grandparent charts can't.
|
||||
2. Parent charts must not use an alias for this chart.
|
||||
3. There is no failsafe workaround to above due to
|
||||
https://github.com/helm/helm/issues/9214.
|
||||
4. .Chart is of its own type (*chart.Metadata) and needs to be casted
|
||||
using "toYaml | fromYaml" in order to be able to use normal helm
|
||||
template functions on it.
|
||||
*/}}
|
||||
{{- $fullname_override := .Values.fullnameOverride }}
|
||||
{{- $name_override := .Values.nameOverride }}
|
||||
{{- if ne .Chart.Name "jupyterhub" }}
|
||||
{{- if .Values.jupyterhub }}
|
||||
{{- $fullname_override = .Values.jupyterhub.fullnameOverride }}
|
||||
{{- $name_override = .Values.jupyterhub.nameOverride }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if eq (typeOf $fullname_override) "string" }}
|
||||
{{- $fullname_override }}
|
||||
{{- else }}
|
||||
{{- $name := $name_override | default .Chart.Name }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name }}
|
||||
{{- else }}
|
||||
{{- .Release.Name }}-{{ $name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /*
|
||||
Renders to a blank string or if the fullname template is truthy renders to it
|
||||
with an appended dash.
|
||||
*/}}
|
||||
{{- define "jupyterhub.fullname.dash" -}}
|
||||
{{- if (include "jupyterhub.fullname" .) }}
|
||||
{{- include "jupyterhub.fullname" . }}-
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
|
||||
{{- /*
|
||||
Namespaced resources
|
||||
*/}}
|
||||
|
||||
{{- /* hub Deployment */}}
|
||||
{{- define "jupyterhub.hub.fullname" -}}
|
||||
{{- include "jupyterhub.fullname.dash" . }}hub
|
||||
{{- end }}
|
||||
|
||||
{{- /* hub-serviceaccount ServiceAccount */}}
|
||||
{{- define "jupyterhub.hub-serviceaccount.fullname" -}}
|
||||
{{- if .Values.hub.serviceAccount.create }}
|
||||
{{- .Values.hub.serviceAccount.name | default (include "jupyterhub.hub.fullname" .) }}
|
||||
{{- else }}
|
||||
{{- .Values.hub.serviceAccount.name | default "default" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* hub-existing-secret Secret */}}
|
||||
{{- define "jupyterhub.hub-existing-secret.fullname" -}}
|
||||
{{- /* A hack to avoid issues from invoking this from a parent Helm chart. */}}
|
||||
{{- $existing_secret := .Values.hub.existingSecret }}
|
||||
{{- if ne .Chart.Name "jupyterhub" }}
|
||||
{{- if .Values.jupyterhub }}
|
||||
{{- $existing_secret = .Values.jupyterhub.hub.existingSecret }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $existing_secret }}
|
||||
{{- $existing_secret }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* hub-existing-secret-or-default Secret */}}
|
||||
{{- define "jupyterhub.hub-existing-secret-or-default.fullname" -}}
|
||||
{{- include "jupyterhub.hub-existing-secret.fullname" . | default (include "jupyterhub.hub.fullname" .) }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* hub PVC */}}
|
||||
{{- define "jupyterhub.hub-pvc.fullname" -}}
|
||||
{{- include "jupyterhub.hub.fullname" . }}-db-dir
|
||||
{{- end }}
|
||||
|
||||
{{- /* proxy Deployment */}}
|
||||
{{- define "jupyterhub.proxy.fullname" -}}
|
||||
{{- include "jupyterhub.fullname.dash" . }}proxy
|
||||
{{- end }}
|
||||
|
||||
{{- /* proxy-api Service */}}
|
||||
{{- define "jupyterhub.proxy-api.fullname" -}}
|
||||
{{- include "jupyterhub.proxy.fullname" . }}-api
|
||||
{{- end }}
|
||||
|
||||
{{- /* proxy-http Service */}}
|
||||
{{- define "jupyterhub.proxy-http.fullname" -}}
|
||||
{{- include "jupyterhub.proxy.fullname" . }}-http
|
||||
{{- end }}
|
||||
|
||||
{{- /* proxy-public Service */}}
|
||||
{{- define "jupyterhub.proxy-public.fullname" -}}
|
||||
{{- include "jupyterhub.proxy.fullname" . }}-public
|
||||
{{- end }}
|
||||
|
||||
{{- /* proxy-public-tls Secret */}}
|
||||
{{- define "jupyterhub.proxy-public-tls.fullname" -}}
|
||||
{{- include "jupyterhub.proxy-public.fullname" . }}-tls-acme
|
||||
{{- end }}
|
||||
|
||||
{{- /* proxy-public-manual-tls Secret */}}
|
||||
{{- define "jupyterhub.proxy-public-manual-tls.fullname" -}}
|
||||
{{- include "jupyterhub.proxy-public.fullname" . }}-manual-tls
|
||||
{{- end }}
|
||||
|
||||
{{- /* autohttps Deployment */}}
|
||||
{{- define "jupyterhub.autohttps.fullname" -}}
|
||||
{{- include "jupyterhub.fullname.dash" . }}autohttps
|
||||
{{- end }}
|
||||
|
||||
{{- /* autohttps-serviceaccount ServiceAccount */}}
|
||||
{{- define "jupyterhub.autohttps-serviceaccount.fullname" -}}
|
||||
{{- if .Values.proxy.traefik.serviceAccount.create }}
|
||||
{{- .Values.proxy.traefik.serviceAccount.name | default (include "jupyterhub.autohttps.fullname" .) }}
|
||||
{{- else }}
|
||||
{{- .Values.proxy.traefik.serviceAccount.name | default "default" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* user-scheduler Deployment */}}
|
||||
{{- define "jupyterhub.user-scheduler-deploy.fullname" -}}
|
||||
{{- include "jupyterhub.fullname.dash" . }}user-scheduler
|
||||
{{- end }}
|
||||
|
||||
{{- /* user-scheduler-serviceaccount ServiceAccount */}}
|
||||
{{- define "jupyterhub.user-scheduler-serviceaccount.fullname" -}}
|
||||
{{- if .Values.scheduling.userScheduler.serviceAccount.create }}
|
||||
{{- .Values.scheduling.userScheduler.serviceAccount.name | default (include "jupyterhub.user-scheduler-deploy.fullname" .) }}
|
||||
{{- else }}
|
||||
{{- .Values.scheduling.userScheduler.serviceAccount.name | default "default" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* user-scheduler leader election lock resource */}}
|
||||
{{- define "jupyterhub.user-scheduler-lock.fullname" -}}
|
||||
{{- include "jupyterhub.user-scheduler-deploy.fullname" . }}-lock
|
||||
{{- end }}
|
||||
|
||||
{{- /* user-placeholder StatefulSet */}}
|
||||
{{- define "jupyterhub.user-placeholder.fullname" -}}
|
||||
{{- include "jupyterhub.fullname.dash" . }}user-placeholder
|
||||
{{- end }}
|
||||
|
||||
{{- /* image-awaiter Job */}}
|
||||
{{- define "jupyterhub.hook-image-awaiter.fullname" -}}
|
||||
{{- include "jupyterhub.fullname.dash" . }}hook-image-awaiter
|
||||
{{- end }}
|
||||
|
||||
{{- /* image-awaiter-serviceaccount ServiceAccount */}}
|
||||
{{- define "jupyterhub.hook-image-awaiter-serviceaccount.fullname" -}}
|
||||
{{- if .Values.prePuller.hook.serviceAccount.create }}
|
||||
{{- .Values.prePuller.hook.serviceAccount.name | default (include "jupyterhub.hook-image-awaiter.fullname" .) }}
|
||||
{{- else }}
|
||||
{{- .Values.prePuller.hook.serviceAccount.name | default "default" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* hook-image-puller DaemonSet */}}
|
||||
{{- define "jupyterhub.hook-image-puller.fullname" -}}
|
||||
{{- include "jupyterhub.fullname.dash" . }}hook-image-puller
|
||||
{{- end }}
|
||||
|
||||
{{- /* continuous-image-puller DaemonSet */}}
|
||||
{{- define "jupyterhub.continuous-image-puller.fullname" -}}
|
||||
{{- include "jupyterhub.fullname.dash" . }}continuous-image-puller
|
||||
{{- end }}
|
||||
|
||||
{{- /* singleuser NetworkPolicy */}}
|
||||
{{- define "jupyterhub.singleuser.fullname" -}}
|
||||
{{- include "jupyterhub.fullname.dash" . }}singleuser
|
||||
{{- end }}
|
||||
|
||||
{{- /* image-pull-secret Secret */}}
|
||||
{{- define "jupyterhub.image-pull-secret.fullname" -}}
|
||||
{{- include "jupyterhub.fullname.dash" . }}image-pull-secret
|
||||
{{- end }}
|
||||
|
||||
{{- /* Ingress */}}
|
||||
{{- define "jupyterhub.ingress.fullname" -}}
|
||||
{{- if (include "jupyterhub.fullname" .) }}
|
||||
{{- include "jupyterhub.fullname" . }}
|
||||
{{- else -}}
|
||||
jupyterhub
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
|
||||
{{- /*
|
||||
Cluster wide resources
|
||||
|
||||
We enforce uniqueness of names for our cluster wide resources. We assume that
|
||||
the prefix from setting fullnameOverride to null or a string will be cluster
|
||||
unique.
|
||||
*/}}
|
||||
|
||||
{{- /* Priority */}}
|
||||
{{- define "jupyterhub.priority.fullname" -}}
|
||||
{{- if (include "jupyterhub.fullname" .) }}
|
||||
{{- include "jupyterhub.fullname" . }}
|
||||
{{- else }}
|
||||
{{- .Release.Name }}-default-priority
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* user-placeholder Priority */}}
|
||||
{{- define "jupyterhub.user-placeholder-priority.fullname" -}}
|
||||
{{- if (include "jupyterhub.fullname" .) }}
|
||||
{{- include "jupyterhub.user-placeholder.fullname" . }}
|
||||
{{- else }}
|
||||
{{- .Release.Name }}-user-placeholder-priority
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* image-puller Priority */}}
|
||||
{{- define "jupyterhub.image-puller-priority.fullname" -}}
|
||||
{{- if (include "jupyterhub.fullname" .) }}
|
||||
{{- include "jupyterhub.fullname.dash" . }}image-puller
|
||||
{{- else }}
|
||||
{{- .Release.Name }}-image-puller-priority
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* user-scheduler's registered name */}}
|
||||
{{- define "jupyterhub.user-scheduler.fullname" -}}
|
||||
{{- if (include "jupyterhub.fullname" .) }}
|
||||
{{- include "jupyterhub.user-scheduler-deploy.fullname" . }}
|
||||
{{- else }}
|
||||
{{- .Release.Name }}-user-scheduler
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
|
||||
{{- /*
|
||||
A template to render all the named templates in this file for use in the
|
||||
hub's ConfigMap.
|
||||
|
||||
It is important we keep this in sync with the available templates.
|
||||
*/}}
|
||||
{{- define "jupyterhub.name-templates" -}}
|
||||
fullname: {{ include "jupyterhub.fullname" . | quote }}
|
||||
fullname-dash: {{ include "jupyterhub.fullname.dash" . | quote }}
|
||||
hub: {{ include "jupyterhub.hub.fullname" . | quote }}
|
||||
hub-serviceaccount: {{ include "jupyterhub.hub-serviceaccount.fullname" . | quote }}
|
||||
hub-existing-secret: {{ include "jupyterhub.hub-existing-secret.fullname" . | quote }}
|
||||
hub-existing-secret-or-default: {{ include "jupyterhub.hub-existing-secret-or-default.fullname" . | quote }}
|
||||
hub-pvc: {{ include "jupyterhub.hub-pvc.fullname" . | quote }}
|
||||
proxy: {{ include "jupyterhub.proxy.fullname" . | quote }}
|
||||
proxy-api: {{ include "jupyterhub.proxy-api.fullname" . | quote }}
|
||||
proxy-http: {{ include "jupyterhub.proxy-http.fullname" . | quote }}
|
||||
proxy-public: {{ include "jupyterhub.proxy-public.fullname" . | quote }}
|
||||
proxy-public-tls: {{ include "jupyterhub.proxy-public-tls.fullname" . | quote }}
|
||||
proxy-public-manual-tls: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . | quote }}
|
||||
autohttps: {{ include "jupyterhub.autohttps.fullname" . | quote }}
|
||||
autohttps-serviceaccount: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . | quote }}
|
||||
user-scheduler-deploy: {{ include "jupyterhub.user-scheduler-deploy.fullname" . | quote }}
|
||||
user-scheduler-serviceaccount: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . | quote }}
|
||||
user-scheduler-lock: {{ include "jupyterhub.user-scheduler-lock.fullname" . | quote }}
|
||||
user-placeholder: {{ include "jupyterhub.user-placeholder.fullname" . | quote }}
|
||||
image-puller-priority: {{ include "jupyterhub.image-puller-priority.fullname" . | quote }}
|
||||
hook-image-awaiter: {{ include "jupyterhub.hook-image-awaiter.fullname" . | quote }}
|
||||
hook-image-awaiter-serviceaccount: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . | quote }}
|
||||
hook-image-puller: {{ include "jupyterhub.hook-image-puller.fullname" . | quote }}
|
||||
continuous-image-puller: {{ include "jupyterhub.continuous-image-puller.fullname" . | quote }}
|
||||
singleuser: {{ include "jupyterhub.singleuser.fullname" . | quote }}
|
||||
image-pull-secret: {{ include "jupyterhub.image-pull-secret.fullname" . | quote }}
|
||||
ingress: {{ include "jupyterhub.ingress.fullname" . | quote }}
|
||||
priority: {{ include "jupyterhub.priority.fullname" . | quote }}
|
||||
user-placeholder-priority: {{ include "jupyterhub.user-placeholder-priority.fullname" . | quote }}
|
||||
user-scheduler: {{ include "jupyterhub.user-scheduler.fullname" . | quote }}
|
||||
{{- end }}
|
101
jupyter/base/charts/jupyterhub/templates/_helpers-netpol.tpl
Normal file
101
jupyter/base/charts/jupyterhub/templates/_helpers-netpol.tpl
Normal file
|
@ -0,0 +1,101 @@
|
|||
{{- /*
|
||||
This named template renders egress rules for NetworkPolicy resources based on
|
||||
common configuration.
|
||||
|
||||
It is rendering based on the `egressAllowRules` and `egress` keys of the
|
||||
passed networkPolicy config object. Each flag set to true under
|
||||
`egressAllowRules` is rendered to a egress rule that next to any custom user
|
||||
defined rules from the `egress` config.
|
||||
|
||||
This named template needs to render based on a specific networkPolicy
|
||||
resource, but also needs access to the root context. Due to that, it
|
||||
accepts a list as its scope, where the first element is supposed to be the
|
||||
root context and the second element is supposed to be the networkPolicy
|
||||
configuration object.
|
||||
|
||||
As an example, this is how you would render this named template from a
|
||||
NetworkPolicy resource under its egress:
|
||||
|
||||
egress:
|
||||
# other rules here...
|
||||
|
||||
{{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.hub.networkPolicy)) }}
|
||||
{{- . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
Note that the reference to privateIPs and nonPrivateIPs relate to
|
||||
https://en.wikipedia.org/wiki/Private_network#Private_IPv4_addresses.
|
||||
*/}}
|
||||
|
||||
{{- define "jupyterhub.networkPolicy.renderEgressRules" -}}
|
||||
{{- $root := index . 0 }}
|
||||
{{- $netpol := index . 1 }}
|
||||
{{- if or (or $netpol.egressAllowRules.dnsPortsCloudMetadataServer $netpol.egressAllowRules.dnsPortsKubeSystemNamespace) $netpol.egressAllowRules.dnsPortsPrivateIPs }}
|
||||
- ports:
|
||||
- port: 53
|
||||
protocol: UDP
|
||||
- port: 53
|
||||
protocol: TCP
|
||||
to:
|
||||
{{- if $netpol.egressAllowRules.dnsPortsCloudMetadataServer }}
|
||||
# Allow outbound connections to DNS ports on the cloud metadata server
|
||||
- ipBlock:
|
||||
cidr: {{ $root.Values.singleuser.cloudMetadata.ip }}/32
|
||||
{{- end }}
|
||||
{{- if $netpol.egressAllowRules.dnsPortsKubeSystemNamespace }}
|
||||
# Allow outbound connections to DNS ports on pods in the kube-system
|
||||
# namespace
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
kubernetes.io/metadata.name: kube-system
|
||||
{{- end }}
|
||||
{{- if $netpol.egressAllowRules.dnsPortsPrivateIPs }}
|
||||
# Allow outbound connections to DNS ports on destinations in the private IP
|
||||
# ranges
|
||||
- ipBlock:
|
||||
cidr: 10.0.0.0/8
|
||||
- ipBlock:
|
||||
cidr: 172.16.0.0/12
|
||||
- ipBlock:
|
||||
cidr: 192.168.0.0/16
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if $netpol.egressAllowRules.nonPrivateIPs }}
|
||||
# Allow outbound connections to non-private IP ranges
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: 0.0.0.0/0
|
||||
except:
|
||||
# As part of this rule:
|
||||
# - don't allow outbound connections to private IPs
|
||||
- 10.0.0.0/8
|
||||
- 172.16.0.0/12
|
||||
- 192.168.0.0/16
|
||||
# - don't allow outbound connections to the cloud metadata server
|
||||
- {{ $root.Values.singleuser.cloudMetadata.ip }}/32
|
||||
{{- end }}
|
||||
|
||||
{{- if $netpol.egressAllowRules.privateIPs }}
|
||||
# Allow outbound connections to private IP ranges
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: 10.0.0.0/8
|
||||
- ipBlock:
|
||||
cidr: 172.16.0.0/12
|
||||
- ipBlock:
|
||||
cidr: 192.168.0.0/16
|
||||
{{- end }}
|
||||
|
||||
{{- if $netpol.egressAllowRules.cloudMetadataServer }}
|
||||
# Allow outbound connections to the cloud metadata server
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: {{ $root.Values.singleuser.cloudMetadata.ip }}/32
|
||||
{{- end }}
|
||||
|
||||
{{- with $netpol.egress }}
|
||||
# Allow outbound connections based on user specified rules
|
||||
{{ . | toYaml }}
|
||||
{{- end }}
|
||||
{{- end }}
|
402
jupyter/base/charts/jupyterhub/templates/_helpers.tpl
Normal file
402
jupyter/base/charts/jupyterhub/templates/_helpers.tpl
Normal file
|
@ -0,0 +1,402 @@
|
|||
{{- /*
|
||||
## About
|
||||
This file contains helpers to systematically name, label and select Kubernetes
|
||||
objects we define in the .yaml template files.
|
||||
|
||||
|
||||
## How helpers work
|
||||
Helm helper functions is a good way to avoid repeating something. They will
|
||||
generate some output based on one single dictionary of input that we call the
|
||||
helpers scope. When you are in helm, you access your current scope with a
|
||||
single a single punctuation (.).
|
||||
|
||||
When you ask a helper to render its content, one often forward the current
|
||||
scope to the helper in order to allow it to access .Release.Name,
|
||||
.Values.rbac.create and similar values.
|
||||
|
||||
#### Example - Passing the current scope
|
||||
{{ include "jupyterhub.commonLabels" . }}
|
||||
|
||||
It would be possible to pass something specific instead of the current scope
|
||||
(.), but that would make .Release.Name etc. inaccessible by the helper which
|
||||
is something we aim to avoid.
|
||||
|
||||
#### Example - Passing a new scope
|
||||
{{ include "demo.bananaPancakes" (dict "pancakes" 5 "bananas" 3) }}
|
||||
|
||||
To let a helper access the current scope along with additional values we have
|
||||
opted to create dictionary containing additional values that is then populated
|
||||
with additional values from the current scope through a the merge function.
|
||||
|
||||
#### Example - Passing a new scope augmented with the old
|
||||
{{- $_ := merge (dict "appLabel" "kube-lego") . }}
|
||||
{{- include "jupyterhub.matchLabels" $_ | nindent 6 }}
|
||||
|
||||
In this way, the code within the definition of `jupyterhub.matchLabels` will
|
||||
be able to access .Release.Name and .appLabel.
|
||||
|
||||
NOTE:
|
||||
The ordering of merge is crucial, the latter argument is merged into the
|
||||
former. So if you would swap the order you would influence the current scope
|
||||
risking unintentional behavior. Therefore, always put the fresh unreferenced
|
||||
dictionary (dict "key1" "value1") first and the current scope (.) last.
|
||||
|
||||
|
||||
## Declared helpers
|
||||
- appLabel |
|
||||
- componentLabel |
|
||||
- commonLabels | uses appLabel
|
||||
- labels | uses commonLabels
|
||||
- matchLabels | uses labels
|
||||
- podCullerSelector | uses matchLabels
|
||||
|
||||
|
||||
## Example usage
|
||||
```yaml
|
||||
# Excerpt from proxy/autohttps/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.autohttps.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" $_ | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" $_ | nindent 8 }}
|
||||
hub.jupyter.org/network-access-proxy-http: "true"
|
||||
```
|
||||
|
||||
NOTE:
|
||||
The "jupyterhub.matchLabels" and "jupyterhub.labels" is passed an augmented
|
||||
scope that will influence the helpers' behavior. It get the current scope
|
||||
"." but merged with a dictionary containing extra key/value pairs. In this
|
||||
case the "." scope was merged with a small dictionary containing only one
|
||||
key/value pair "appLabel: kube-lego". It is required for kube-lego to
|
||||
function properly. It is a way to override the default app label's value.
|
||||
*/}}
|
||||
|
||||
|
||||
{{- /*
|
||||
jupyterhub.appLabel:
|
||||
Used by "jupyterhub.labels".
|
||||
*/}}
|
||||
{{- define "jupyterhub.appLabel" -}}
|
||||
{{ .Values.nameOverride | default .Chart.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{- /*
|
||||
jupyterhub.componentLabel:
|
||||
Used by "jupyterhub.labels".
|
||||
|
||||
NOTE: The component label is determined by either...
|
||||
- 1: The provided scope's .componentLabel
|
||||
- 2: The template's filename if living in the root folder
|
||||
- 3: The template parent folder's name
|
||||
- : ...and is combined with .componentPrefix and .componentSuffix
|
||||
*/}}
|
||||
{{- define "jupyterhub.componentLabel" -}}
|
||||
{{- $file := .Template.Name | base | trimSuffix ".yaml" -}}
|
||||
{{- $parent := .Template.Name | dir | base | trimPrefix "templates" -}}
|
||||
{{- $component := .componentLabel | default $parent | default $file -}}
|
||||
{{- $component := print (.componentPrefix | default "") $component (.componentSuffix | default "") -}}
|
||||
{{ $component }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{- /*
|
||||
jupyterhub.commonLabels:
|
||||
Foundation for "jupyterhub.labels".
|
||||
Provides labels: app, release, (chart and heritage).
|
||||
*/}}
|
||||
{{- define "jupyterhub.commonLabels" -}}
|
||||
app: {{ .appLabel | default (include "jupyterhub.appLabel" .) }}
|
||||
release: {{ .Release.Name }}
|
||||
{{- if not .matchLabels }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
|
||||
heritage: {{ .heritageLabel | default .Release.Service }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{- /*
|
||||
jupyterhub.labels:
|
||||
Provides labels: component, app, release, (chart and heritage).
|
||||
*/}}
|
||||
{{- define "jupyterhub.labels" -}}
|
||||
component: {{ include "jupyterhub.componentLabel" . }}
|
||||
{{ include "jupyterhub.commonLabels" . }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{- /*
|
||||
jupyterhub.matchLabels:
|
||||
Used to provide pod selection labels: component, app, release.
|
||||
*/}}
|
||||
{{- define "jupyterhub.matchLabels" -}}
|
||||
{{- $_ := merge (dict "matchLabels" true) . -}}
|
||||
{{ include "jupyterhub.labels" $_ }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{- /*
|
||||
jupyterhub.dockerconfigjson:
|
||||
Creates a base64 encoded docker registry json blob for use in a image pull
|
||||
secret, just like the `kubectl create secret docker-registry` command does
|
||||
for the generated secrets data.dockerconfigjson field. The output is
|
||||
verified to be exactly the same even if you have a password spanning
|
||||
multiple lines as you may need to use a private GCR registry.
|
||||
|
||||
- https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
|
||||
*/}}
|
||||
{{- define "jupyterhub.dockerconfigjson" -}}
|
||||
{{ include "jupyterhub.dockerconfigjson.yaml" . | b64enc }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "jupyterhub.dockerconfigjson.yaml" -}}
|
||||
{{- with .Values.imagePullSecret -}}
|
||||
{
|
||||
"auths": {
|
||||
{{ .registry | default "https://index.docker.io/v1/" | quote }}: {
|
||||
"username": {{ .username | quote }},
|
||||
"password": {{ .password | quote }},
|
||||
{{- if .email }}
|
||||
"email": {{ .email | quote }},
|
||||
{{- end }}
|
||||
"auth": {{ (print .username ":" .password) | b64enc | quote }}
|
||||
}
|
||||
}
|
||||
}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /*
|
||||
jupyterhub.imagePullSecrets
|
||||
Augments passed .pullSecrets with $.Values.imagePullSecrets
|
||||
*/}}
|
||||
{{- define "jupyterhub.imagePullSecrets" -}}
|
||||
{{- /*
|
||||
We have implemented a trick to allow a parent chart depending on this
|
||||
chart to call this named templates.
|
||||
|
||||
Caveats and notes:
|
||||
|
||||
1. While parent charts can reference these, grandparent charts can't.
|
||||
2. Parent charts must not use an alias for this chart.
|
||||
3. There is no failsafe workaround to above due to
|
||||
https://github.com/helm/helm/issues/9214.
|
||||
4. .Chart is of its own type (*chart.Metadata) and needs to be casted
|
||||
using "toYaml | fromYaml" in order to be able to use normal helm
|
||||
template functions on it.
|
||||
*/}}
|
||||
{{- $jupyterhub_values := .root.Values }}
|
||||
{{- if ne .root.Chart.Name "jupyterhub" }}
|
||||
{{- if .root.Values.jupyterhub }}
|
||||
{{- $jupyterhub_values = .root.Values.jupyterhub }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* Populate $_.list with all relevant entries */}}
|
||||
{{- $_ := dict "list" (concat .image.pullSecrets $jupyterhub_values.imagePullSecrets | uniq) }}
|
||||
{{- if and $jupyterhub_values.imagePullSecret.create $jupyterhub_values.imagePullSecret.automaticReferenceInjection }}
|
||||
{{- $__ := set $_ "list" (append $_.list (include "jupyterhub.image-pull-secret.fullname" .root) | uniq) }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* Decide if something should be written */}}
|
||||
{{- if not (eq ($_.list | toJson) "[]") }}
|
||||
|
||||
{{- /* Process the $_.list where strings become dicts with a name key and the
|
||||
strings become the name keys' values into $_.res */}}
|
||||
{{- $_ := set $_ "res" list }}
|
||||
{{- range $_.list }}
|
||||
{{- if eq (typeOf .) "string" }}
|
||||
{{- $__ := set $_ "res" (append $_.res (dict "name" .)) }}
|
||||
{{- else }}
|
||||
{{- $__ := set $_ "res" (append $_.res .) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* Write the results */}}
|
||||
{{- $_.res | toJson }}
|
||||
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /*
|
||||
jupyterhub.singleuser.resources:
|
||||
The resource request of a singleuser.
|
||||
*/}}
|
||||
{{- define "jupyterhub.singleuser.resources" -}}
|
||||
{{- $r1 := .Values.singleuser.cpu.guarantee -}}
|
||||
{{- $r2 := .Values.singleuser.memory.guarantee -}}
|
||||
{{- $r3 := .Values.singleuser.extraResource.guarantees -}}
|
||||
{{- $r := or $r1 $r2 $r3 -}}
|
||||
{{- $l1 := .Values.singleuser.cpu.limit -}}
|
||||
{{- $l2 := .Values.singleuser.memory.limit -}}
|
||||
{{- $l3 := .Values.singleuser.extraResource.limits -}}
|
||||
{{- $l := or $l1 $l2 $l3 -}}
|
||||
{{- if $r -}}
|
||||
requests:
|
||||
{{- if $r1 }}
|
||||
cpu: {{ .Values.singleuser.cpu.guarantee }}
|
||||
{{- end }}
|
||||
{{- if $r2 }}
|
||||
memory: {{ .Values.singleuser.memory.guarantee }}
|
||||
{{- end }}
|
||||
{{- if $r3 }}
|
||||
{{- range $key, $value := .Values.singleuser.extraResource.guarantees }}
|
||||
{{ $key | quote }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if $l }}
|
||||
limits:
|
||||
{{- if $l1 }}
|
||||
cpu: {{ .Values.singleuser.cpu.limit }}
|
||||
{{- end }}
|
||||
{{- if $l2 }}
|
||||
memory: {{ .Values.singleuser.memory.limit }}
|
||||
{{- end }}
|
||||
{{- if $l3 }}
|
||||
{{- range $key, $value := .Values.singleuser.extraResource.limits }}
|
||||
{{ $key | quote }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /*
|
||||
jupyterhub.extraEnv:
|
||||
Output YAML formatted EnvVar entries for use in a containers env field.
|
||||
*/}}
|
||||
{{- define "jupyterhub.extraEnv" -}}
|
||||
{{- include "jupyterhub.extraEnv.withTrailingNewLine" . | trimSuffix "\n" }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "jupyterhub.extraEnv.withTrailingNewLine" -}}
|
||||
{{- if . }}
|
||||
{{- /* If extraEnv is a list, we inject it as it is. */}}
|
||||
{{- if eq (typeOf .) "[]interface {}" }}
|
||||
{{- . | toYaml }}
|
||||
|
||||
{{- /* If extraEnv is a map, we differentiate two cases: */}}
|
||||
{{- else if eq (typeOf .) "map[string]interface {}" }}
|
||||
{{- range $key, $value := . }}
|
||||
{{- /*
|
||||
- If extraEnv.someKey has a map value, then we add the value as a YAML
|
||||
parsed list element and use the key as the name value unless its
|
||||
explicitly set.
|
||||
*/}}
|
||||
{{- if eq (typeOf $value) "map[string]interface {}" }}
|
||||
{{- merge (dict) $value (dict "name" $key) | list | toYaml | println }}
|
||||
{{- /*
|
||||
- If extraEnv.someKey has a string value, then we use the key as the
|
||||
environment variable name for the value.
|
||||
*/}}
|
||||
{{- else if eq (typeOf $value) "string" -}}
|
||||
- name: {{ $key | quote }}
|
||||
value: {{ $value | quote | println }}
|
||||
{{- else }}
|
||||
{{- printf "?.extraEnv.%s had an unexpected type (%s)" $key (typeOf $value) | fail }}
|
||||
{{- end }}
|
||||
{{- end }} {{- /* end of range */}}
|
||||
{{- end }}
|
||||
{{- end }} {{- /* end of: if . */}}
|
||||
{{- end }} {{- /* end of definition */}}
|
||||
|
||||
{{- /*
|
||||
jupyterhub.extraFiles.data:
|
||||
Renders content for a k8s Secret's data field, coming from extraFiles with
|
||||
binaryData entries.
|
||||
*/}}
|
||||
{{- define "jupyterhub.extraFiles.data.withNewLineSuffix" -}}
|
||||
{{- range $file_key, $file_details := . }}
|
||||
{{- include "jupyterhub.extraFiles.validate-file" (list $file_key $file_details) }}
|
||||
{{- if $file_details.binaryData }}
|
||||
{{- $file_key | quote }}: {{ $file_details.binaryData | nospace | quote }}{{ println }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- define "jupyterhub.extraFiles.data" -}}
|
||||
{{- include "jupyterhub.extraFiles.data.withNewLineSuffix" . | trimSuffix "\n" }}
|
||||
{{- end }}
|
||||
|
||||
{{- /*
|
||||
jupyterhub.extraFiles.stringData:
|
||||
Renders content for a k8s Secret's stringData field, coming from extraFiles
|
||||
with either data or stringData entries.
|
||||
*/}}
|
||||
{{- define "jupyterhub.extraFiles.stringData.withNewLineSuffix" -}}
|
||||
{{- range $file_key, $file_details := . }}
|
||||
{{- include "jupyterhub.extraFiles.validate-file" (list $file_key $file_details) }}
|
||||
{{- $file_name := $file_details.mountPath | base }}
|
||||
{{- if $file_details.stringData }}
|
||||
{{- $file_key | quote }}: |
|
||||
{{- $file_details.stringData | trimSuffix "\n" | nindent 2 }}{{ println }}
|
||||
{{- end }}
|
||||
{{- if $file_details.data }}
|
||||
{{- $file_key | quote }}: |
|
||||
{{- if or (eq (ext $file_name) ".yaml") (eq (ext $file_name) ".yml") }}
|
||||
{{- $file_details.data | toYaml | nindent 2 }}{{ println }}
|
||||
{{- else if eq (ext $file_name) ".json" }}
|
||||
{{- $file_details.data | toJson | nindent 2 }}{{ println }}
|
||||
{{- else if eq (ext $file_name) ".toml" }}
|
||||
{{- $file_details.data | toToml | trimSuffix "\n" | nindent 2 }}{{ println }}
|
||||
{{- else }}
|
||||
{{- print "\n\nextraFiles entries with 'data' (" $file_key " > " $file_details.mountPath ") needs to have a filename extension of .yaml, .yml, .json, or .toml!" | fail }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- define "jupyterhub.extraFiles.stringData" -}}
|
||||
{{- include "jupyterhub.extraFiles.stringData.withNewLineSuffix" . | trimSuffix "\n" }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "jupyterhub.extraFiles.validate-file" -}}
|
||||
{{- $file_key := index . 0 }}
|
||||
{{- $file_details := index . 1 }}
|
||||
|
||||
{{- /* Use of mountPath. */}}
|
||||
{{- if not ($file_details.mountPath) }}
|
||||
{{- print "\n\nextraFiles entries (" $file_key ") must contain the field 'mountPath'." | fail }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* Use one of stringData, binaryData, data. */}}
|
||||
{{- $field_count := 0 }}
|
||||
{{- if $file_details.data }}
|
||||
{{- $field_count = add1 $field_count }}
|
||||
{{- end }}
|
||||
{{- if $file_details.stringData }}
|
||||
{{- $field_count = add1 $field_count }}
|
||||
{{- end }}
|
||||
{{- if $file_details.binaryData }}
|
||||
{{- $field_count = add1 $field_count }}
|
||||
{{- end }}
|
||||
{{- if ne $field_count 1 }}
|
||||
{{- print "\n\nextraFiles entries (" $file_key ") must only contain one of the fields: 'data', 'stringData', and 'binaryData'." | fail }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /*
|
||||
jupyterhub.chart-version-to-git-ref:
|
||||
Renders a valid git reference from a chartpress generated version string.
|
||||
In practice, either a git tag or a git commit hash will be returned.
|
||||
|
||||
- The version string will follow a chartpress pattern, see
|
||||
https://github.com/jupyterhub/chartpress#examples-chart-versions-and-image-tags.
|
||||
|
||||
- The regexReplaceAll function is a sprig library function, see
|
||||
https://masterminds.github.io/sprig/strings.html.
|
||||
|
||||
- The regular expression is in golang syntax, but \d had to become \\d for
|
||||
example.
|
||||
*/}}
|
||||
{{- define "jupyterhub.chart-version-to-git-ref" -}}
|
||||
{{- regexReplaceAll ".*[.-]n\\d+[.]h(.*)" . "${1}" }}
|
||||
{{- end }}
|
|
@ -0,0 +1,92 @@
|
|||
{{- /*
|
||||
This file contains logic to lookup already
|
||||
generated passwords or generate a new.
|
||||
|
||||
proxy.secretToken / hub.config.ConfigurableHTTPProxy.auth_token
|
||||
hub.cookieSecret / hub.config.JupyterHub.cookie_secret
|
||||
auth.state.cryptoKey* / hub.config.CryptKeeper.keys
|
||||
|
||||
*Note that the entire auth section is deprecated and users
|
||||
are forced through "fail" in NOTES.txt to migrate to hub.config.
|
||||
|
||||
Note that lookup logic returns falsy value when run with
|
||||
`helm diff upgrade`, so it is a bit troublesome to test.
|
||||
*/}}
|
||||
|
||||
{{- /*
|
||||
Returns given number of random Hex characters.
|
||||
|
||||
- randNumeric 4 | atoi generates a random number in [0, 10^4)
|
||||
This is a range range evenly divisble by 16, but even if off by one,
|
||||
that last partial interval offsetting randomness is only 1 part in 625.
|
||||
- mod N 16 maps to the range 0-15
|
||||
- printf "%x" represents a single number 0-15 as a single hex character
|
||||
*/}}
|
||||
{{- define "jupyterhub.randHex" -}}
|
||||
{{- $result := "" }}
|
||||
{{- range $i := until . }}
|
||||
{{- $rand_hex_char := mod (randNumeric 4 | atoi) 16 | printf "%x" }}
|
||||
{{- $result = print $result $rand_hex_char }}
|
||||
{{- end }}
|
||||
{{- $result }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" -}}
|
||||
{{- if (.Values.hub.config | dig "ConfigurableHTTPProxy" "auth_token" "") }}
|
||||
{{- .Values.hub.config.ConfigurableHTTPProxy.auth_token }}
|
||||
{{- else if .Values.proxy.secretToken }}
|
||||
{{- .Values.proxy.secretToken }}
|
||||
{{- else }}
|
||||
{{- $k8s_state := lookup "v1" "Secret" .Release.Namespace (include "jupyterhub.hub.fullname" .) | default (dict "data" (dict)) }}
|
||||
{{- if hasKey $k8s_state.data "hub.config.ConfigurableHTTPProxy.auth_token" }}
|
||||
{{- index $k8s_state.data "hub.config.ConfigurableHTTPProxy.auth_token" | b64dec }}
|
||||
{{- else }}
|
||||
{{- randAlphaNum 64 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "jupyterhub.hub.config.JupyterHub.cookie_secret" -}}
|
||||
{{- if (.Values.hub.config | dig "JupyterHub" "cookie_secret" "") }}
|
||||
{{- .Values.hub.config.JupyterHub.cookie_secret }}
|
||||
{{- else if .Values.hub.cookieSecret }}
|
||||
{{- .Values.hub.cookieSecret }}
|
||||
{{- else }}
|
||||
{{- $k8s_state := lookup "v1" "Secret" .Release.Namespace (include "jupyterhub.hub.fullname" .) | default (dict "data" (dict)) }}
|
||||
{{- if hasKey $k8s_state.data "hub.config.JupyterHub.cookie_secret" }}
|
||||
{{- index $k8s_state.data "hub.config.JupyterHub.cookie_secret" | b64dec }}
|
||||
{{- else }}
|
||||
{{- include "jupyterhub.randHex" 64 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "jupyterhub.hub.config.CryptKeeper.keys" -}}
|
||||
{{- if (.Values.hub.config | dig "CryptKeeper" "keys" "") }}
|
||||
{{- .Values.hub.config.CryptKeeper.keys | join ";" }}
|
||||
{{- else }}
|
||||
{{- $k8s_state := lookup "v1" "Secret" .Release.Namespace (include "jupyterhub.hub.fullname" .) | default (dict "data" (dict)) }}
|
||||
{{- if hasKey $k8s_state.data "hub.config.CryptKeeper.keys" }}
|
||||
{{- index $k8s_state.data "hub.config.CryptKeeper.keys" | b64dec }}
|
||||
{{- else }}
|
||||
{{- include "jupyterhub.randHex" 64 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "jupyterhub.hub.services.get_api_token" -}}
|
||||
{{- $_ := index . 0 }}
|
||||
{{- $service_key := index . 1 }}
|
||||
{{- $explicitly_set_api_token := or ($_.Values.hub.services | dig $service_key "api_token" "") ($_.Values.hub.services | dig $service_key "apiToken" "") }}
|
||||
{{- if $explicitly_set_api_token }}
|
||||
{{- $explicitly_set_api_token }}
|
||||
{{- else }}
|
||||
{{- $k8s_state := lookup "v1" "Secret" $_.Release.Namespace (include "jupyterhub.hub.fullname" $_) | default (dict "data" (dict)) }}
|
||||
{{- $k8s_secret_key := print "hub.services." $service_key ".apiToken" }}
|
||||
{{- if hasKey $k8s_state.data $k8s_secret_key }}
|
||||
{{- index $k8s_state.data $k8s_secret_key | b64dec }}
|
||||
{{- else }}
|
||||
{{- include "jupyterhub.randHex" 64 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
30
jupyter/base/charts/jupyterhub/templates/hub/configmap.yaml
Normal file
30
jupyter/base/charts/jupyterhub/templates/hub/configmap.yaml
Normal file
|
@ -0,0 +1,30 @@
|
|||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hub.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
data:
|
||||
{{- /*
|
||||
Resource names exposed to reliably reference them.
|
||||
|
||||
user-scheduler: "my-helm-release-user-scheduler"
|
||||
...
|
||||
*/}}
|
||||
{{- include "jupyterhub.name-templates" . | nindent 2 }}
|
||||
|
||||
{{- /*
|
||||
Glob files to allow them to be mounted by the hub pod
|
||||
|
||||
jupyterhub_config: |
|
||||
multi line string content...
|
||||
z2jh.py: |
|
||||
multi line string content...
|
||||
*/}}
|
||||
{{- (.Files.Glob "files/hub/*").AsConfig | nindent 2 }}
|
||||
|
||||
{{- /*
|
||||
Store away a checksum of the hook-image-puller daemonset so future upgrades
|
||||
can compare and decide if it should run or not using the `lookup` function.
|
||||
*/}}
|
||||
checksum_hook-image-puller: {{ include "jupyterhub.imagePuller.daemonset.hook.checksum" . | quote }}
|
243
jupyter/base/charts/jupyterhub/templates/hub/deployment.yaml
Normal file
243
jupyter/base/charts/jupyterhub/templates/hub/deployment.yaml
Normal file
|
@ -0,0 +1,243 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hub.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if typeIs "int" .Values.hub.revisionHistoryLimit }}
|
||||
revisionHistoryLimit: {{ .Values.hub.revisionHistoryLimit }}
|
||||
{{- end }}
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
strategy:
|
||||
{{- .Values.hub.deploymentStrategy | toYaml | nindent 4 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- /* Changes here will cause the Deployment to restart the pods. */}}
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 8 }}
|
||||
hub.jupyter.org/network-access-proxy-api: "true"
|
||||
hub.jupyter.org/network-access-proxy-http: "true"
|
||||
hub.jupyter.org/network-access-singleuser: "true"
|
||||
{{- with .Values.hub.labels }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
{{- /* This lets us autorestart when the secret changes! */}}
|
||||
checksum/config-map: {{ include (print .Template.BasePath "/hub/configmap.yaml") . | sha256sum }}
|
||||
checksum/secret: {{ include (print .Template.BasePath "/hub/secret.yaml") . | sha256sum }}
|
||||
{{- with .Values.hub.annotations }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.scheduling.podPriority.enabled }}
|
||||
priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
|
||||
{{- end }}
|
||||
{{- with .Values.hub.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with concat .Values.scheduling.corePods.tolerations .Values.hub.tolerations }}
|
||||
tolerations:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- include "jupyterhub.coreAffinity" . | nindent 6 }}
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ include "jupyterhub.hub.fullname" . }}
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: {{ include "jupyterhub.hub.fullname" . }}
|
||||
{{- with (include "jupyterhub.hub-existing-secret.fullname" .) }}
|
||||
- name: existing-secret
|
||||
secret:
|
||||
secretName: {{ . }}
|
||||
{{- end }}
|
||||
{{- if .Values.hub.extraFiles }}
|
||||
- name: files
|
||||
secret:
|
||||
secretName: {{ include "jupyterhub.hub.fullname" . }}
|
||||
items:
|
||||
{{- range $file_key, $file_details := .Values.hub.extraFiles }}
|
||||
- key: {{ $file_key | quote }}
|
||||
path: {{ $file_key | quote }}
|
||||
{{- with $file_details.mode }}
|
||||
mode: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.hub.extraVolumes }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.hub.db.type "sqlite-pvc" }}
|
||||
- name: pvc
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ include "jupyterhub.hub-pvc.fullname" . }}
|
||||
{{- end }}
|
||||
{{- with include "jupyterhub.hub-serviceaccount.fullname" . }}
|
||||
serviceAccountName: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.hub.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.hub.image) }}
|
||||
imagePullSecrets: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.hub.initContainers }}
|
||||
initContainers:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
{{- with .Values.hub.extraContainers }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
- name: hub
|
||||
image: {{ .Values.hub.image.name }}:{{ .Values.hub.image.tag }}
|
||||
{{- with .Values.hub.command }}
|
||||
command:
|
||||
{{- range . }}
|
||||
- {{ tpl . $ }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
args:
|
||||
{{- /* .Values.hub.args overrides everything the Helm chart otherside would set */}}
|
||||
{{- if .Values.hub.args }}
|
||||
{{- range .Values.hub.args }}
|
||||
- {{ tpl . $ }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* .Values.hub.args didn't replace the default logic */}}
|
||||
{{- else }}
|
||||
- jupyterhub
|
||||
- --config
|
||||
- /usr/local/etc/jupyterhub/jupyterhub_config.py
|
||||
{{- if .Values.debug.enabled }}
|
||||
- --debug
|
||||
{{- end }}
|
||||
{{- /* NOTE:
|
||||
We want to do automatic upgrades for sqlite-pvc by default, but
|
||||
allow users to opt out of that if they want. Users using their own
|
||||
db need to 'opt in' Go Templates treat nil and "" and false as
|
||||
'false', making this code complex. We can probably make this a
|
||||
one-liner, but doing combinations of boolean vars in go templates
|
||||
is very inelegant & hard to reason about.
|
||||
*/}}
|
||||
{{- $upgradeType := typeOf .Values.hub.db.upgrade }}
|
||||
{{- if eq $upgradeType "bool" }}
|
||||
{{- /* .Values.hub.db.upgrade has been explicitly set to true or false */}}
|
||||
{{- if .Values.hub.db.upgrade }}
|
||||
- --upgrade-db
|
||||
{{- end }}
|
||||
{{- else if eq $upgradeType "<nil>" }}
|
||||
{{- /* .Values.hub.db.upgrade is nil */}}
|
||||
{{- if eq .Values.hub.db.type "sqlite-pvc" }}
|
||||
- --upgrade-db
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- mountPath: /usr/local/etc/jupyterhub/jupyterhub_config.py
|
||||
subPath: jupyterhub_config.py
|
||||
name: config
|
||||
- mountPath: /usr/local/etc/jupyterhub/z2jh.py
|
||||
subPath: z2jh.py
|
||||
name: config
|
||||
- mountPath: /usr/local/etc/jupyterhub/config/
|
||||
name: config
|
||||
- mountPath: /usr/local/etc/jupyterhub/secret/
|
||||
name: secret
|
||||
{{- if (include "jupyterhub.hub-existing-secret.fullname" .) }}
|
||||
- mountPath: /usr/local/etc/jupyterhub/existing-secret/
|
||||
name: existing-secret
|
||||
{{- end }}
|
||||
{{- range $file_key, $file_details := .Values.hub.extraFiles }}
|
||||
- mountPath: {{ $file_details.mountPath }}
|
||||
subPath: {{ $file_key | quote }}
|
||||
name: files
|
||||
{{- end }}
|
||||
{{- with .Values.hub.extraVolumeMounts }}
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.hub.db.type "sqlite-pvc" }}
|
||||
- mountPath: /srv/jupyterhub
|
||||
name: pvc
|
||||
{{- with .Values.hub.db.pvc.subPath }}
|
||||
subPath: {{ . | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.hub.resources }}
|
||||
resources:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.hub.image.pullPolicy }}
|
||||
imagePullPolicy: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.hub.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.hub.lifecycle }}
|
||||
lifecycle:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: PYTHONUNBUFFERED
|
||||
value: "1"
|
||||
- name: HELM_RELEASE_NAME
|
||||
value: {{ .Release.Name | quote }}
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CONFIGPROXY_AUTH_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
{{- /* NOTE:
|
||||
References the chart managed k8s Secret even if
|
||||
hub.existingSecret is specified to avoid using the lookup
|
||||
function on the user managed k8s Secret which is assumed to
|
||||
not be possible.
|
||||
*/}}
|
||||
name: {{ include "jupyterhub.hub.fullname" . }}
|
||||
key: hub.config.ConfigurableHTTPProxy.auth_token
|
||||
{{- with .Values.hub.extraEnv }}
|
||||
{{- include "jupyterhub.extraEnv" . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8081
|
||||
{{- if .Values.hub.livenessProbe.enabled }}
|
||||
{{- /* NOTE:
|
||||
We don't know how long hub database upgrades could take so having a
|
||||
liveness probe could be a bit risky unless we put a
|
||||
initialDelaySeconds value with long enough margin for that to not be
|
||||
an issue. If it is too short, we could end up aborting database
|
||||
upgrades midway or ending up in an infinite restart loop.
|
||||
*/}}
|
||||
livenessProbe:
|
||||
initialDelaySeconds: {{ .Values.hub.livenessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.hub.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.hub.livenessProbe.timeoutSeconds }}
|
||||
failureThreshold: {{ .Values.hub.livenessProbe.failureThreshold }}
|
||||
httpGet:
|
||||
path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/health
|
||||
port: http
|
||||
{{- end }}
|
||||
{{- if .Values.hub.readinessProbe.enabled }}
|
||||
readinessProbe:
|
||||
initialDelaySeconds: {{ .Values.hub.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.hub.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.hub.readinessProbe.timeoutSeconds }}
|
||||
failureThreshold: {{ .Values.hub.readinessProbe.failureThreshold }}
|
||||
httpGet:
|
||||
path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/health
|
||||
port: http
|
||||
{{- end }}
|
||||
{{- with .Values.hub.extraPodSpec }}
|
||||
{{- . | toYaml | nindent 6 }}
|
||||
{{- end }}
|
84
jupyter/base/charts/jupyterhub/templates/hub/netpol.yaml
Normal file
84
jupyter/base/charts/jupyterhub/templates/hub/netpol.yaml
Normal file
|
@ -0,0 +1,84 @@
|
|||
{{- if .Values.hub.networkPolicy.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hub.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
|
||||
# IMPORTANT:
|
||||
# NetworkPolicy's ingress "from" and egress "to" rule specifications require
|
||||
# great attention to detail. A quick summary is:
|
||||
#
|
||||
# 1. You can provide "from"/"to" rules that provide access either ports or a
|
||||
# subset of ports.
|
||||
# 2. You can for each "from"/"to" rule provide any number of
|
||||
# "sources"/"destinations" of four different kinds.
|
||||
# - podSelector - targets pods with a certain label in the same namespace as the NetworkPolicy
|
||||
# - namespaceSelector - targets all pods running in namespaces with a certain label
|
||||
# - namespaceSelector and podSelector - targets pods with a certain label running in namespaces with a certain label
|
||||
# - ipBlock - targets network traffic from/to a set of IP address ranges
|
||||
#
|
||||
# Read more at: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
||||
#
|
||||
ingress:
|
||||
{{- with .Values.hub.networkPolicy.allowedIngressPorts }}
|
||||
# allow incoming traffic to these ports independent of source
|
||||
- ports:
|
||||
{{- range $port := . }}
|
||||
- port: {{ $port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
# allowed pods (hub.jupyter.org/network-access-hub) --> hub
|
||||
- ports:
|
||||
- port: http
|
||||
from:
|
||||
# source 1 - labeled pods
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
hub.jupyter.org/network-access-hub: "true"
|
||||
{{- if eq .Values.hub.networkPolicy.interNamespaceAccessLabels "accept" }}
|
||||
namespaceSelector:
|
||||
matchLabels: {} # without this, the podSelector would only consider pods in the local namespace
|
||||
# source 2 - pods in labeled namespaces
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
hub.jupyter.org/network-access-hub: "true"
|
||||
{{- end }}
|
||||
|
||||
{{- with .Values.hub.networkPolicy.ingress }}
|
||||
# depends, but default is nothing --> hub
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
egress:
|
||||
# hub --> proxy
|
||||
- to:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
{{- $_ := merge (dict "componentLabel" "proxy") . }}
|
||||
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
|
||||
ports:
|
||||
- port: 8001
|
||||
|
||||
# hub --> singleuser-server
|
||||
- to:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
{{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
|
||||
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
|
||||
ports:
|
||||
- port: 8888
|
||||
|
||||
{{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.hub.networkPolicy)) }}
|
||||
{{- . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
18
jupyter/base/charts/jupyterhub/templates/hub/pdb.yaml
Normal file
18
jupyter/base/charts/jupyterhub/templates/hub/pdb.yaml
Normal file
|
@ -0,0 +1,18 @@
|
|||
{{- if .Values.hub.pdb.enabled -}}
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hub.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not (typeIs "<nil>" .Values.hub.pdb.maxUnavailable) }}
|
||||
maxUnavailable: {{ .Values.hub.pdb.maxUnavailable }}
|
||||
{{- end }}
|
||||
{{- if not (typeIs "<nil>" .Values.hub.pdb.minAvailable) }}
|
||||
minAvailable: {{ .Values.hub.pdb.minAvailable }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
{{- end }}
|
25
jupyter/base/charts/jupyterhub/templates/hub/pvc.yaml
Normal file
25
jupyter/base/charts/jupyterhub/templates/hub/pvc.yaml
Normal file
|
@ -0,0 +1,25 @@
|
|||
{{- if eq .Values.hub.db.type "sqlite-pvc" -}}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hub-pvc.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
{{- with .Values.hub.db.pvc.annotations }}
|
||||
annotations:
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.hub.db.pvc.selector }}
|
||||
selector:
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if typeIs "string" .Values.hub.db.pvc.storageClassName }}
|
||||
storageClassName: {{ .Values.hub.db.pvc.storageClassName | quote }}
|
||||
{{- end }}
|
||||
accessModes:
|
||||
{{- .Values.hub.db.pvc.accessModes | toYaml | nindent 4 }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.hub.db.pvc.storage | quote }}
|
||||
{{- end }}
|
30
jupyter/base/charts/jupyterhub/templates/hub/rbac.yaml
Normal file
30
jupyter/base/charts/jupyterhub/templates/hub/rbac.yaml
Normal file
|
@ -0,0 +1,30 @@
|
|||
{{- if .Values.rbac.create -}}
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hub.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups: [""] # "" indicates the core API group
|
||||
resources: ["pods", "persistentvolumeclaims", "secrets", "services"]
|
||||
verbs: ["get", "watch", "list", "create", "delete"]
|
||||
- apiGroups: [""] # "" indicates the core API group
|
||||
resources: ["events"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hub.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }}
|
||||
namespace: "{{ .Release.Namespace }}"
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "jupyterhub.hub.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
50
jupyter/base/charts/jupyterhub/templates/hub/secret.yaml
Normal file
50
jupyter/base/charts/jupyterhub/templates/hub/secret.yaml
Normal file
|
@ -0,0 +1,50 @@
|
|||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hub.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- $values := merge dict .Values }}
|
||||
{{- /* also passthrough subset of Chart / Release */}}
|
||||
{{- $_ := set $values "Chart" (dict "Name" .Chart.Name "Version" .Chart.Version) }}
|
||||
{{- $_ := set $values "Release" (pick .Release "Name" "Namespace" "Service") }}
|
||||
values.yaml: {{ $values | toYaml | b64enc | quote }}
|
||||
|
||||
{{- with .Values.hub.db.password }}
|
||||
# Used to mount MYSQL_PWD or PGPASSWORD on hub pod, unless hub.existingSecret
|
||||
# is set as then that k8s Secret's value must be specified instead.
|
||||
hub.db.password: {{ . | b64enc | quote }}
|
||||
{{- end }}
|
||||
|
||||
# Any JupyterHub Services api_tokens are exposed in this k8s Secret as a
|
||||
# convinience for external services running in the k8s cluster that could
|
||||
# mount them directly from this k8s Secret.
|
||||
{{- range $key, $service := .Values.hub.services }}
|
||||
hub.services.{{ $key }}.apiToken: {{ include "jupyterhub.hub.services.get_api_token" (list $ $key) | b64enc | quote }}
|
||||
{{- end }}
|
||||
|
||||
# During Helm template rendering, these values that can be autogenerated for
|
||||
# users are set using the following logic:
|
||||
#
|
||||
# 1. Use chart configuration's value
|
||||
# 2. Use k8s Secret's value
|
||||
# 3. Use a new autogenerated value
|
||||
#
|
||||
# hub.config.ConfigurableHTTPProxy.auth_token: for hub to proxy-api authorization (JupyterHub.proxy_auth_token is deprecated)
|
||||
# hub.config.JupyterHub.cookie_secret: for cookie encryption
|
||||
# hub.config.CryptKeeper.keys: for auth state encryption
|
||||
#
|
||||
hub.config.ConfigurableHTTPProxy.auth_token: {{ include "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" . | required "This should not happen: blank output from 'jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token' template" | b64enc | quote }}
|
||||
hub.config.JupyterHub.cookie_secret: {{ include "jupyterhub.hub.config.JupyterHub.cookie_secret" . | required "This should not happen: blank output from 'jupyterhub.hub.config.JupyterHub.cookie_secret' template" | b64enc | quote }}
|
||||
hub.config.CryptKeeper.keys: {{ include "jupyterhub.hub.config.CryptKeeper.keys" . | required "This should not happen: blank output from 'jupyterhub.hub.config.CryptKeeper.keys' template" | b64enc | quote }}
|
||||
|
||||
{{- with include "jupyterhub.extraFiles.data" .Values.hub.extraFiles }}
|
||||
{{- . | nindent 2 }}
|
||||
{{- end }}
|
||||
|
||||
{{- with include "jupyterhub.extraFiles.stringData" .Values.hub.extraFiles }}
|
||||
stringData:
|
||||
{{- . | nindent 2 }}
|
||||
{{- end }}
|
37
jupyter/base/charts/jupyterhub/templates/hub/service.yaml
Normal file
37
jupyter/base/charts/jupyterhub/templates/hub/service.yaml
Normal file
|
@ -0,0 +1,37 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hub.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if not (index .Values.hub.service.annotations "prometheus.io/scrape") }}
|
||||
prometheus.io/scrape: "true"
|
||||
{{- end }}
|
||||
{{- if not (index .Values.hub.service.annotations "prometheus.io/path") }}
|
||||
prometheus.io/path: {{ .Values.hub.baseUrl | trimSuffix "/" }}/hub/metrics
|
||||
{{- end }}
|
||||
{{- if not (index .Values.hub.service.annotations "prometheus.io/port") }}
|
||||
prometheus.io/port: "8081"
|
||||
{{- end }}
|
||||
{{- with .Values.hub.service.annotations }}
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.hub.service.type }}
|
||||
{{- with .Values.hub.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ . }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 4 }}
|
||||
ports:
|
||||
- name: hub
|
||||
port: 8081
|
||||
targetPort: http
|
||||
{{- with .Values.hub.service.ports.nodePort }}
|
||||
nodePort: {{ . }}
|
||||
{{- end }}
|
||||
|
||||
{{- with .Values.hub.service.extraPorts }}
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,12 @@
|
|||
{{- if .Values.hub.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hub-serviceaccount.fullname" . }}
|
||||
{{- with .Values.hub.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,15 @@
|
|||
{{- if .Values.imagePullSecret.create }}
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.image-pull-secret.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation
|
||||
"helm.sh/hook-weight": "-20"
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
data:
|
||||
.dockerconfigjson: {{ include "jupyterhub.dockerconfigjson" . }}
|
||||
{{- end }}
|
|
@ -0,0 +1,251 @@
|
|||
{{- /*
|
||||
Returns an image-puller daemonset. Two daemonsets will be created like this.
|
||||
- hook-image-puller: for pre helm upgrade image pulling (lives temporarily)
|
||||
- continuous-image-puller: for newly added nodes image pulling
|
||||
*/}}
|
||||
{{- define "jupyterhub.imagePuller.daemonset" -}}
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
{{- if .hook }}
|
||||
name: {{ include "jupyterhub.hook-image-puller.fullname" . }}
|
||||
{{- else }}
|
||||
name: {{ include "jupyterhub.continuous-image-puller.fullname" . }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
{{- if .hook }}
|
||||
hub.jupyter.org/deletable: "true"
|
||||
{{- end }}
|
||||
{{- if .hook }}
|
||||
annotations:
|
||||
{{- /*
|
||||
Allows the daemonset to be deleted when the image-awaiter job is completed.
|
||||
*/}}
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
"helm.sh/hook-weight": "-10"
|
||||
{{- end }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 100%
|
||||
{{- if typeIs "int" .Values.prePuller.revisionHistoryLimit }}
|
||||
revisionHistoryLimit: {{ .Values.prePuller.revisionHistoryLimit }}
|
||||
{{- end }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 8 }}
|
||||
{{- with .Values.prePuller.annotations }}
|
||||
annotations:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- /*
|
||||
image-puller pods are made evictable to save on the k8s pods
|
||||
per node limit all k8s clusters have and have a higher priority
|
||||
than user-placeholder pods that could block an entire node.
|
||||
*/}}
|
||||
{{- if .Values.scheduling.podPriority.enabled }}
|
||||
priorityClassName: {{ include "jupyterhub.image-puller-priority.fullname" . }}
|
||||
{{- end }}
|
||||
{{- with .Values.singleuser.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with concat .Values.scheduling.userPods.tolerations .Values.singleuser.extraTolerations .Values.prePuller.extraTolerations }}
|
||||
tolerations:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if include "jupyterhub.userNodeAffinityRequired" . }}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
{{- include "jupyterhub.userNodeAffinityRequired" . | nindent 14 }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 0
|
||||
automountServiceAccountToken: false
|
||||
{{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.singleuser.image) }}
|
||||
imagePullSecrets: {{ . }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
{{- /* --- Conditionally pull an image all user pods will use in an initContainer --- */}}
|
||||
{{- $blockWithIptables := hasKey .Values.singleuser.cloudMetadata "enabled" | ternary (not .Values.singleuser.cloudMetadata.enabled) .Values.singleuser.cloudMetadata.blockWithIptables }}
|
||||
{{- if $blockWithIptables }}
|
||||
- name: image-pull-metadata-block
|
||||
image: {{ .Values.singleuser.networkTools.image.name }}:{{ .Values.singleuser.networkTools.image.tag }}
|
||||
{{- with .Values.singleuser.networkTools.image.pullPolicy }}
|
||||
imagePullPolicy: {{ . }}
|
||||
{{- end }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- echo "Pulling complete"
|
||||
{{- with .Values.prePuller.resources }}
|
||||
resources:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.prePuller.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* --- Pull default image --- */}}
|
||||
- name: image-pull-singleuser
|
||||
image: {{ .Values.singleuser.image.name }}:{{ .Values.singleuser.image.tag }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- echo "Pulling complete"
|
||||
{{- with .Values.prePuller.resources }}
|
||||
resources:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.prePuller.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* --- Pull extra containers' images --- */}}
|
||||
{{- range $k, $container := concat .Values.singleuser.initContainers .Values.singleuser.extraContainers }}
|
||||
- name: image-pull-singleuser-init-and-extra-containers-{{ $k }}
|
||||
image: {{ $container.image }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- echo "Pulling complete"
|
||||
{{- with $.Values.prePuller.resources }}
|
||||
resources:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with $.Values.prePuller.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* --- Conditionally pull profileList images --- */}}
|
||||
{{- if .Values.prePuller.pullProfileListImages }}
|
||||
{{- range $k, $container := .Values.singleuser.profileList }}
|
||||
{{- if $container.kubespawner_override }}
|
||||
{{- if $container.kubespawner_override.image }}
|
||||
- name: image-pull-singleuser-profilelist-{{ $k }}
|
||||
image: {{ $container.kubespawner_override.image }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- echo "Pulling complete"
|
||||
{{- with $.Values.prePuller.resources }}
|
||||
resources:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with $.Values.prePuller.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- /* --- Pull extra images --- */}}
|
||||
{{- range $k, $v := .Values.prePuller.extraImages }}
|
||||
- name: image-pull-{{ $k }}
|
||||
image: {{ $v.name }}:{{ $v.tag }}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- echo "Pulling complete"
|
||||
{{- with $.Values.prePuller.resources }}
|
||||
resources:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with $.Values.prePuller.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: pause
|
||||
image: {{ .Values.prePuller.pause.image.name }}:{{ .Values.prePuller.pause.image.tag }}
|
||||
{{- with .Values.prePuller.resources }}
|
||||
resources:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.prePuller.pause.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{- /*
|
||||
Returns a rendered k8s DaemonSet resource: continuous-image-puller
|
||||
*/}}
|
||||
{{- define "jupyterhub.imagePuller.daemonset.continuous" -}}
|
||||
{{- $_ := merge (dict "hook" false "componentPrefix" "continuous-") . }}
|
||||
{{- include "jupyterhub.imagePuller.daemonset" $_ }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{- /*
|
||||
Returns a rendered k8s DaemonSet resource: hook-image-puller
|
||||
*/}}
|
||||
{{- define "jupyterhub.imagePuller.daemonset.hook" -}}
|
||||
{{- $_ := merge (dict "hook" true "componentPrefix" "hook-") . }}
|
||||
{{- include "jupyterhub.imagePuller.daemonset" $_ }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{- /*
|
||||
Returns a checksum of the rendered k8s DaemonSet resource: hook-image-puller
|
||||
|
||||
This checksum is used when prePuller.hook.pullOnlyOnChanges=true to decide if
|
||||
it is worth creating the hook-image-puller associated resources.
|
||||
*/}}
|
||||
{{- define "jupyterhub.imagePuller.daemonset.hook.checksum" -}}
|
||||
{{- /*
|
||||
We pin componentLabel and Chart.Version as doing so can pin labels
|
||||
of no importance if they would change. Chart.Name is also pinned as
|
||||
a harmless technical workaround when we compute the checksum.
|
||||
*/}}
|
||||
{{- $_ := merge (dict "componentLabel" "pinned" "Chart" (dict "Name" "jupyterhub" "Version" "pinned")) . -}}
|
||||
{{- $yaml := include "jupyterhub.imagePuller.daemonset.hook" $_ }}
|
||||
{{- $yaml | sha256sum }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{- /*
|
||||
Returns a truthy string or a blank string depending on if the
|
||||
hook-image-puller should be installed. The truthy strings are comments
|
||||
that summarize the state that led to returning a truthy string.
|
||||
|
||||
- prePuller.hook.enabled must be true
|
||||
- if prePuller.hook.pullOnlyOnChanges is true, the checksum of the
|
||||
hook-image-puller daemonset must differ since last upgrade
|
||||
*/}}
|
||||
{{- define "jupyterhub.imagePuller.daemonset.hook.install" -}}
|
||||
{{- if .Values.prePuller.hook.enabled }}
|
||||
{{- if .Values.prePuller.hook.pullOnlyOnChanges }}
|
||||
{{- $new_checksum := include "jupyterhub.imagePuller.daemonset.hook.checksum" . }}
|
||||
{{- $k8s_state := lookup "v1" "ConfigMap" .Release.Namespace (include "jupyterhub.hub.fullname" .) | default (dict "data" (dict)) }}
|
||||
{{- $old_checksum := index $k8s_state.data "checksum_hook-image-puller" | default "" }}
|
||||
{{- if ne $new_checksum $old_checksum -}}
|
||||
# prePuller.hook.enabled={{ .Values.prePuller.hook.enabled }}
|
||||
# prePuller.hook.pullOnlyOnChanges={{ .Values.prePuller.hook.pullOnlyOnChanges }}
|
||||
# post-upgrade checksum != pre-upgrade checksum (of the hook-image-puller DaemonSet)
|
||||
# "{{ $new_checksum }}" != "{{ $old_checksum}}"
|
||||
{{- end }}
|
||||
{{- else -}}
|
||||
# prePuller.hook.enabled={{ .Values.prePuller.hook.enabled }}
|
||||
# prePuller.hook.pullOnlyOnChanges={{ .Values.prePuller.hook.pullOnlyOnChanges }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,8 @@
|
|||
{{- /*
|
||||
The continuous-image-puller daemonset task is to pull required images to nodes
|
||||
that are added in between helm upgrades, for example by manually adding a node
|
||||
or by the cluster autoscaler.
|
||||
*/}}
|
||||
{{- if .Values.prePuller.continuous.enabled }}
|
||||
{{- include "jupyterhub.imagePuller.daemonset.continuous" . }}
|
||||
{{- end }}
|
|
@ -0,0 +1,9 @@
|
|||
{{- /*
|
||||
The hook-image-puller daemonset will be created with the highest priority during
|
||||
helm upgrades. It's task is to pull the required images on all nodes. When the
|
||||
image-awaiter job confirms the required images to be pulled, the daemonset is
|
||||
deleted. Only then will the actual helm upgrade start.
|
||||
*/}}
|
||||
{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
|
||||
{{- include "jupyterhub.imagePuller.daemonset.hook" . }}
|
||||
{{- end }}
|
|
@ -0,0 +1,76 @@
|
|||
{{- /*
|
||||
This job has a part to play in a helm upgrade process. It simply waits for the
|
||||
hook-image-puller daemonset which is started slightly before this job to get
|
||||
its' pods running. If all those pods are running they must have pulled all the
|
||||
required images on all nodes as they are used as init containers with a dummy
|
||||
command.
|
||||
*/}}
|
||||
{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
hub.jupyter.org/deletable: "true"
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
"helm.sh/hook-weight": "10"
|
||||
spec:
|
||||
template:
|
||||
# The hook-image-awaiter Job and hook-image-puller DaemonSet was
|
||||
# conditionally created based on this state:
|
||||
#
|
||||
{{- include "jupyterhub.imagePuller.daemonset.hook.install" . | nindent 4 }}
|
||||
#
|
||||
metadata:
|
||||
labels:
|
||||
{{- /* Changes here will cause the Job to restart the pods. */}}
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 8 }}
|
||||
{{- with .Values.prePuller.labels }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.prePuller.annotations }}
|
||||
annotations:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
{{- with include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
|
||||
serviceAccountName: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.prePuller.hook.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with concat .Values.scheduling.corePods.tolerations .Values.prePuller.hook.tolerations }}
|
||||
tolerations:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.prePuller.hook.image) }}
|
||||
imagePullSecrets: {{ . }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- image: {{ .Values.prePuller.hook.image.name }}:{{ .Values.prePuller.hook.image.tag }}
|
||||
name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
|
||||
{{- with .Values.prePuller.hook.image.pullPolicy }}
|
||||
imagePullPolicy: {{ . }}
|
||||
{{- end }}
|
||||
command:
|
||||
- /image-awaiter
|
||||
- -ca-path=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
- -auth-token-path=/var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
- -api-server-address=https://kubernetes.default.svc:$(KUBERNETES_SERVICE_PORT)
|
||||
- -namespace={{ .Release.Namespace }}
|
||||
- -daemonset={{ include "jupyterhub.hook-image-puller.fullname" . }}
|
||||
- -pod-scheduling-wait-duration={{ .Values.prePuller.hook.podSchedulingWaitDuration }}
|
||||
{{- with .Values.prePuller.hook.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.prePuller.hook.resources }}
|
||||
resources:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,18 @@
|
|||
{{- if .Values.scheduling.podPriority.enabled }}
|
||||
{{- if or .Values.prePuller.hook.enabled .Values.prePuller.continuous.enabled -}}
|
||||
apiVersion: scheduling.k8s.io/v1
|
||||
kind: PriorityClass
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.image-puller-priority.fullname" . }}
|
||||
annotations:
|
||||
meta.helm.sh/release-name: "{{ .Release.Name }}"
|
||||
meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
value: {{ .Values.scheduling.podPriority.imagePullerPriority }}
|
||||
globalDefault: false
|
||||
description: >-
|
||||
Enables [hook|continuous]-image-puller pods to fit on nodes even though they
|
||||
are clogged by user-placeholder pods, while not evicting normal user pods.
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,45 @@
|
|||
{{- /*
|
||||
Permissions to be used by the hook-image-awaiter job
|
||||
*/}}
|
||||
{{- if .Values.rbac.create -}}
|
||||
{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
hub.jupyter.org/deletable: "true"
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
"helm.sh/hook-weight": "0"
|
||||
rules:
|
||||
- apiGroups: ["apps"] # "" indicates the core API group
|
||||
resources: ["daemonsets"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
{{- /*
|
||||
... as declared by this binding.
|
||||
*/}}
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
hub.jupyter.org/deletable: "true"
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
"helm.sh/hook-weight": "0"
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
|
||||
namespace: "{{ .Release.Namespace }}"
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "jupyterhub.hook-image-awaiter.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,21 @@
|
|||
{{- /*
|
||||
ServiceAccount for the pre-puller hook's image-awaiter-job
|
||||
*/}}
|
||||
{{- if .Values.prePuller.hook.serviceAccount.create -}}
|
||||
{{- if (include "jupyterhub.imagePuller.daemonset.hook.install" .) -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.hook-image-awaiter-serviceaccount.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
hub.jupyter.org/deletable: "true"
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install,pre-upgrade
|
||||
"helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
|
||||
"helm.sh/hook-weight": "0"
|
||||
{{- with .Values.prePuller.hook.serviceAccount.annotations }}
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
35
jupyter/base/charts/jupyterhub/templates/ingress.yaml
Normal file
35
jupyter/base/charts/jupyterhub/templates/ingress.yaml
Normal file
|
@ -0,0 +1,35 @@
|
|||
{{- if .Values.ingress.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.ingress.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.ingress.ingressClassName }}
|
||||
ingressClassName: "{{ . }}"
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range $host := .Values.ingress.hosts | default (list "") }}
|
||||
- http:
|
||||
paths:
|
||||
- path: {{ $.Values.hub.baseUrl | trimSuffix "/" }}/{{ $.Values.ingress.pathSuffix }}
|
||||
pathType: {{ $.Values.ingress.pathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ include "jupyterhub.proxy-public.fullname" $ }}
|
||||
port:
|
||||
name: http
|
||||
{{- if $host }}
|
||||
host: {{ $host | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,9 @@
|
|||
# Automatic HTTPS Terminator
|
||||
|
||||
This directory has Kubernetes objects for automatic Let's Encrypt Support.
|
||||
When enabled, we create a new deployment object that has an nginx-ingress
|
||||
and kube-lego container in it. This is responsible for requesting,
|
||||
storing and renewing certificates as needed from Let's Encrypt.
|
||||
|
||||
The only change required outside of this directory is in the `proxy-public`
|
||||
service, which targets different hubs based on automatic HTTPS status.
|
|
@ -0,0 +1,109 @@
|
|||
{{- define "jupyterhub.dynamic.yaml" -}}
|
||||
# Content of dynamic.yaml to be merged merged with
|
||||
# proxy.traefik.extraDynamicConfig.
|
||||
# ----------------------------------------------------------------------------
|
||||
http:
|
||||
# Middlewares tweaks requests. We define them here and reference them in
|
||||
# our routers. We use them to redirect http traffic and headers to proxied
|
||||
# web requests.
|
||||
#
|
||||
# ref: https://docs.traefik.io/middlewares/overview/
|
||||
middlewares:
|
||||
hsts:
|
||||
# A middleware to add a HTTP Strict-Transport-Security (HSTS) response
|
||||
# header, they function as a request for browsers to enforce HTTPS on
|
||||
# their end in for a given time into the future, and optionally
|
||||
# subdomains for requests to subdomains as well.
|
||||
#
|
||||
# ref: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security
|
||||
headers:
|
||||
stsIncludeSubdomains: {{ .Values.proxy.traefik.hsts.includeSubdomains }}
|
||||
stsPreload: {{ .Values.proxy.traefik.hsts.preload }}
|
||||
stsSeconds: {{ .Values.proxy.traefik.hsts.maxAge }}
|
||||
# A middleware to redirect to https
|
||||
redirect:
|
||||
redirectScheme:
|
||||
permanent: true
|
||||
scheme: https
|
||||
# A middleware to add a X-Scheme (X-Forwarded-Proto) header that
|
||||
# JupyterHub's Tornado web-server needs if expecting to serve https
|
||||
# traffic. Without it we would run into issues like:
|
||||
# https://github.com/jupyterhub/jupyterhub/issues/2284
|
||||
scheme:
|
||||
headers:
|
||||
customRequestHeaders:
|
||||
# DISCUSS ME: Can we use the X-Forwarded-Proto header instead? It
|
||||
# seems more recognized. Mozilla calls it the de-facto standard
|
||||
# header for this purpose, and Tornado recognizes both.
|
||||
#
|
||||
# ref: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-Proto
|
||||
# ref: https://www.tornadoweb.org/en/stable/httpserver.html#http-server
|
||||
X-Scheme: https
|
||||
|
||||
# Routers routes web requests to a service and optionally tweaks them with
|
||||
# middleware.
|
||||
#
|
||||
# ref: https://docs.traefik.io/routing/routers/
|
||||
routers:
|
||||
# Route secure https traffic to the configurable-http-proxy managed by
|
||||
# JupyterHub.
|
||||
default:
|
||||
entrypoints:
|
||||
- "https"
|
||||
middlewares:
|
||||
- "hsts"
|
||||
- "scheme"
|
||||
rule: PathPrefix(`/`)
|
||||
service: default
|
||||
# Use our predefined TLS options and certificate resolver, enabling
|
||||
# this route to act as a TLS termination proxy with high security
|
||||
# standards.
|
||||
tls:
|
||||
certResolver: default
|
||||
domains:
|
||||
{{- range $host := .Values.proxy.https.hosts }}
|
||||
- main: {{ $host }}
|
||||
{{- end }}
|
||||
options: default
|
||||
|
||||
# Route insecure http traffic to https
|
||||
insecure:
|
||||
entrypoints:
|
||||
- "http"
|
||||
middlewares:
|
||||
- "redirect"
|
||||
rule: PathPrefix(`/`)
|
||||
service: default
|
||||
|
||||
# Services represents the destinations we route traffic to.
|
||||
#
|
||||
# ref: https://docs.traefik.io/routing/services/
|
||||
services:
|
||||
# Represents the configurable-http-proxy (chp) server that is managed by
|
||||
# JupyterHub to route traffic both to itself and to user pods.
|
||||
default:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: 'http://proxy-http:8000/'
|
||||
|
||||
# Configure TLS to give us an A+ in the ssllabs.com test
|
||||
#
|
||||
# ref: https://www.ssllabs.com/ssltest/
|
||||
tls:
|
||||
options:
|
||||
default:
|
||||
# Allowed ciphers adapted from Mozillas SSL Configuration Generator
|
||||
# configured for Intermediate support which doesn't support very old
|
||||
# systems but doesn't require very modern either.
|
||||
#
|
||||
# ref: https://ssl-config.mozilla.org/#server=traefik&version=2.1.2&config=intermediate&guideline=5.4
|
||||
cipherSuites:
|
||||
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
|
||||
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
|
||||
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
|
||||
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
||||
- TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
|
||||
- TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
|
||||
minVersion: VersionTLS12
|
||||
sniStrict: true
|
||||
{{- end }}
|
|
@ -0,0 +1,68 @@
|
|||
{{- define "jupyterhub.traefik.yaml" -}}
|
||||
# Content of traefik.yaml to be merged merged with
|
||||
# proxy.traefik.extraStaticConfig.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# Config of logs about web requests
|
||||
#
|
||||
# ref: https://docs.traefik.io/observability/access-logs/
|
||||
accessLog:
|
||||
# Redact commonly sensitive headers
|
||||
fields:
|
||||
headers:
|
||||
names:
|
||||
Authorization: redacted
|
||||
Cookie: redacted
|
||||
Set-Cookie: redacted
|
||||
X-Xsrftoken: redacted
|
||||
# Only log errors
|
||||
filters:
|
||||
statusCodes:
|
||||
- 500-599
|
||||
|
||||
# Automatically acquire certificates certificates form a Certificate
|
||||
# Authority (CA) like Let's Encrypt using the ACME protocol's HTTP-01
|
||||
# challenge.
|
||||
#
|
||||
# ref: https://docs.traefik.io/https/acme/#certificate-resolvers
|
||||
certificatesResolvers:
|
||||
default:
|
||||
acme:
|
||||
caServer: {{ .Values.proxy.https.letsencrypt.acmeServer }}
|
||||
email: {{ .Values.proxy.https.letsencrypt.contactEmail }}
|
||||
httpChallenge:
|
||||
entryPoint: http
|
||||
storage: /etc/acme/acme.json
|
||||
|
||||
# Let Traefik listen to port 80 and port 443
|
||||
#
|
||||
# ref: https://docs.traefik.io/routing/entrypoints/
|
||||
entryPoints:
|
||||
# Port 80, used for:
|
||||
# - ACME HTTP-01 challenges
|
||||
# - Redirects to HTTPS
|
||||
http:
|
||||
address: ':8080'
|
||||
# Port 443, used for:
|
||||
# - TLS Termination Proxy, where HTTPS transitions to HTTP.
|
||||
https:
|
||||
address: ':8443'
|
||||
# Configure a high idle timeout for our websockets connections
|
||||
transport:
|
||||
respondingTimeouts:
|
||||
idleTimeout: 10m0s
|
||||
|
||||
# Config of logs about what happens to Traefik itself (startup,
|
||||
# configuration, events, shutdown, and so on).
|
||||
#
|
||||
# ref: https://docs.traefik.io/observability/logs
|
||||
log:
|
||||
level: {{ if .Values.debug.enabled -}} DEBUG {{- else -}} WARN {{- end }}
|
||||
|
||||
# Let Traefik monitor another file we mount for dynamic configuration. As we
|
||||
# mount this file through this configmap, we can make a `kubectl edit` on the
|
||||
# configmap and have Traefik update on changes to dynamic.yaml.
|
||||
providers:
|
||||
file:
|
||||
filename: /etc/traefik/dynamic.yaml
|
||||
{{- end }}
|
|
@ -0,0 +1,28 @@
|
|||
{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) }}
|
||||
{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) }}
|
||||
{{- if $autoHTTPS -}}
|
||||
{{- $_ := .Values.proxy.https.letsencrypt.contactEmail | required "proxy.https.letsencrypt.contactEmail is a required field" -}}
|
||||
|
||||
# This configmap contains Traefik configuration files to be mounted.
|
||||
# - traefik.yaml will only be read during startup (static configuration)
|
||||
# - dynamic.yaml will be read on change (dynamic configuration)
|
||||
#
|
||||
# ref: https://docs.traefik.io/getting-started/configuration-overview/
|
||||
#
|
||||
# The configuration files are first rendered with Helm templating to large YAML
|
||||
# strings. Then we use the fromYAML function on these strings to get an object,
|
||||
# that we in turn merge with user provided extra configuration.
|
||||
#
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.autohttps.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
data:
|
||||
traefik.yaml: |
|
||||
{{- include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.proxy.traefik.extraStaticConfig | toYaml | nindent 4 }}
|
||||
dynamic.yaml: |
|
||||
{{- include "jupyterhub.dynamic.yaml" . | fromYaml | merge .Values.proxy.traefik.extraDynamicConfig | toYaml | nindent 4 }}
|
||||
|
||||
{{- end }}
|
|
@ -0,0 +1,154 @@
|
|||
{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) }}
|
||||
{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) }}
|
||||
{{- if $autoHTTPS -}}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.autohttps.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if typeIs "int" .Values.proxy.traefik.revisionHistoryLimit }}
|
||||
revisionHistoryLimit: {{ .Values.proxy.traefik.revisionHistoryLimit }}
|
||||
{{- end }}
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 8 }}
|
||||
hub.jupyter.org/network-access-proxy-http: "true"
|
||||
{{- with .Values.proxy.traefik.labels }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
# Only force a restart through a change to this checksum when the static
|
||||
# configuration is changed, as the dynamic can be updated after start.
|
||||
# Any disruptions to this deployment impacts everything, it is the
|
||||
# entrypoint of all network traffic.
|
||||
checksum/static-config: {{ include "jupyterhub.traefik.yaml" . | fromYaml | merge .Values.proxy.traefik.extraStaticConfig | toYaml | sha256sum }}
|
||||
spec:
|
||||
{{- with include "jupyterhub.autohttps-serviceaccount.fullname" . }}
|
||||
serviceAccountName: {{ . }}
|
||||
{{- end }}
|
||||
{{- if .Values.scheduling.podPriority.enabled }}
|
||||
priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.traefik.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with concat .Values.scheduling.corePods.tolerations .Values.proxy.traefik.tolerations }}
|
||||
tolerations:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- include "jupyterhub.coreAffinity" . | nindent 6 }}
|
||||
volumes:
|
||||
- name: certificates
|
||||
emptyDir: {}
|
||||
- name: traefik-config
|
||||
configMap:
|
||||
name: {{ include "jupyterhub.autohttps.fullname" . }}
|
||||
{{- with .Values.proxy.traefik.extraVolumes }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.proxy.traefik.image) }}
|
||||
imagePullSecrets: {{ . }}
|
||||
{{- end }}
|
||||
initContainers:
|
||||
- name: load-acme
|
||||
image: "{{ .Values.proxy.secretSync.image.name }}:{{ .Values.proxy.secretSync.image.tag }}"
|
||||
{{- with .Values.proxy.secretSync.image.pullPolicy }}
|
||||
imagePullPolicy: {{ . }}
|
||||
{{- end }}
|
||||
args:
|
||||
- load
|
||||
- {{ include "jupyterhub.proxy-public-tls.fullname" . }}
|
||||
- acme.json
|
||||
- /etc/acme/acme.json
|
||||
env:
|
||||
# We need this to get logs immediately
|
||||
- name: PYTHONUNBUFFERED
|
||||
value: "True"
|
||||
{{- with .Values.proxy.traefik.extraEnv }}
|
||||
{{- include "jupyterhub.extraEnv" . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: certificates
|
||||
mountPath: /etc/acme
|
||||
{{- with .Values.proxy.secretSync.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.traefik.extraInitContainers }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: traefik
|
||||
image: "{{ .Values.proxy.traefik.image.name }}:{{ .Values.proxy.traefik.image.tag }}"
|
||||
{{- with .Values.proxy.traefik.image.pullPolicy }}
|
||||
imagePullPolicy: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.traefik.resources }}
|
||||
resources:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
- name: https
|
||||
containerPort: 8443
|
||||
{{- with .Values.proxy.traefik.extraPorts }}
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeMounts:
|
||||
- name: traefik-config
|
||||
mountPath: /etc/traefik
|
||||
- name: certificates
|
||||
mountPath: /etc/acme
|
||||
{{- with .Values.proxy.traefik.extraVolumeMounts }}
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.traefik.extraEnv }}
|
||||
env:
|
||||
{{- include "jupyterhub.extraEnv" . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.traefik.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
- name: secret-sync
|
||||
image: "{{ .Values.proxy.secretSync.image.name }}:{{ .Values.proxy.secretSync.image.tag }}"
|
||||
{{- with .Values.proxy.secretSync.image.pullPolicy }}
|
||||
imagePullPolicy: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.secretSync.resources }}
|
||||
resources:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
args:
|
||||
- watch-save
|
||||
- --label=app={{ include "jupyterhub.appLabel" . }}
|
||||
- --label=release={{ .Release.Name }}
|
||||
- --label=chart={{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
|
||||
- --label=heritage=secret-sync
|
||||
- {{ include "jupyterhub.proxy-public-tls.fullname" . }}
|
||||
- acme.json
|
||||
- /etc/acme/acme.json
|
||||
env:
|
||||
# We need this to get logs immediately
|
||||
- name: PYTHONUNBUFFERED
|
||||
value: "True"
|
||||
volumeMounts:
|
||||
- name: certificates
|
||||
mountPath: /etc/acme
|
||||
{{- with .Values.proxy.secretSync.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.traefik.extraPodSpec }}
|
||||
{{- . | toYaml | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,78 @@
|
|||
{{- $HTTPS := .Values.proxy.https.enabled -}}
|
||||
{{- $autoHTTPS := and $HTTPS (and (eq .Values.proxy.https.type "letsencrypt") .Values.proxy.https.hosts) -}}
|
||||
{{- if and $autoHTTPS .Values.proxy.traefik.networkPolicy.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.autohttps.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
|
||||
# IMPORTANT:
|
||||
# NetworkPolicy's ingress "from" and egress "to" rule specifications require
|
||||
# great attention to detail. A quick summary is:
|
||||
#
|
||||
# 1. You can provide "from"/"to" rules that provide access either ports or a
|
||||
# subset of ports.
|
||||
# 2. You can for each "from"/"to" rule provide any number of
|
||||
# "sources"/"destinations" of four different kinds.
|
||||
# - podSelector - targets pods with a certain label in the same namespace as the NetworkPolicy
|
||||
# - namespaceSelector - targets all pods running in namespaces with a certain label
|
||||
# - namespaceSelector and podSelector - targets pods with a certain label running in namespaces with a certain label
|
||||
# - ipBlock - targets network traffic from/to a set of IP address ranges
|
||||
#
|
||||
# Read more at: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
||||
#
|
||||
ingress:
|
||||
{{- with .Values.proxy.traefik.networkPolicy.allowedIngressPorts }}
|
||||
# allow incoming traffic to these ports independent of source
|
||||
- ports:
|
||||
{{- range $port := . }}
|
||||
- port: {{ $port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
# allowed pods (hub.jupyter.org/network-access-proxy-http) --> proxy (http/https port)
|
||||
- ports:
|
||||
- port: http
|
||||
- port: https
|
||||
from:
|
||||
# source 1 - labeled pods
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
hub.jupyter.org/network-access-proxy-http: "true"
|
||||
{{- if eq .Values.proxy.traefik.networkPolicy.interNamespaceAccessLabels "accept" }}
|
||||
namespaceSelector:
|
||||
matchLabels: {} # without this, the podSelector would only consider pods in the local namespace
|
||||
# source 2 - pods in labeled namespaces
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
hub.jupyter.org/network-access-proxy-http: "true"
|
||||
{{- end }}
|
||||
|
||||
{{- with .Values.proxy.traefik.networkPolicy.ingress}}
|
||||
# depends, but default is nothing --> proxy
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
egress:
|
||||
# autohttps --> proxy (http port)
|
||||
- to:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
{{- $_ := merge (dict "componentLabel" "proxy") . }}
|
||||
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
|
||||
ports:
|
||||
- port: 8000
|
||||
|
||||
{{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.proxy.traefik.networkPolicy)) }}
|
||||
{{- . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,18 @@
|
|||
{{- if .Values.proxy.traefik.pdb.enabled -}}
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: proxy
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not (typeIs "<nil>" .Values.proxy.traefik.pdb.maxUnavailable) }}
|
||||
maxUnavailable: {{ .Values.proxy.traefik.pdb.maxUnavailable }}
|
||||
{{- end }}
|
||||
{{- if not (typeIs "<nil>" .Values.proxy.traefik.pdb.minAvailable) }}
|
||||
minAvailable: {{ .Values.proxy.traefik.pdb.minAvailable }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,35 @@
|
|||
{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) -}}
|
||||
{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) -}}
|
||||
{{- if $autoHTTPS -}}
|
||||
{{- if .Values.rbac.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.autohttps.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
{{- with .Values.proxy.traefik.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "patch", "list", "create"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.autohttps.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . }}
|
||||
apiGroup:
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "jupyterhub.autohttps.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,25 @@
|
|||
{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) }}
|
||||
{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) }}
|
||||
{{- if $autoHTTPS -}}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.proxy-http.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
{{- with .Values.proxy.service.labels }}
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.service.annotations }}
|
||||
annotations:
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
selector:
|
||||
{{- $_ := merge (dict "componentLabel" "proxy") . }}
|
||||
{{- include "jupyterhub.matchLabels" $_ | nindent 4 }}
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: http
|
||||
{{- end }}
|
|
@ -0,0 +1,12 @@
|
|||
{{- $HTTPS := (and .Values.proxy.https.hosts .Values.proxy.https.enabled) -}}
|
||||
{{- $autoHTTPS := (and $HTTPS (eq .Values.proxy.https.type "letsencrypt")) -}}
|
||||
{{- if $autoHTTPS -}}
|
||||
{{- if .Values.proxy.traefik.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.autohttps-serviceaccount.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
178
jupyter/base/charts/jupyterhub/templates/proxy/deployment.yaml
Normal file
178
jupyter/base/charts/jupyterhub/templates/proxy/deployment.yaml
Normal file
|
@ -0,0 +1,178 @@
|
|||
{{- $manualHTTPS := and .Values.proxy.https.enabled (eq .Values.proxy.https.type "manual") -}}
|
||||
{{- $manualHTTPSwithsecret := and .Values.proxy.https.enabled (eq .Values.proxy.https.type "secret") -}}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.proxy.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if typeIs "int" .Values.proxy.chp.revisionHistoryLimit }}
|
||||
revisionHistoryLimit: {{ .Values.proxy.chp.revisionHistoryLimit }}
|
||||
{{- end }}
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
strategy:
|
||||
{{- .Values.proxy.deploymentStrategy | toYaml | nindent 4 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- /* Changes here will cause the Deployment to restart the pods. */}}
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 8 }}
|
||||
hub.jupyter.org/network-access-hub: "true"
|
||||
hub.jupyter.org/network-access-singleuser: "true"
|
||||
{{- with .Values.proxy.labels }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
# We want to restart proxy only if the auth token changes
|
||||
# Other changes to the hub config should not restart.
|
||||
# We truncate to 4 chars to avoid leaking auth token info,
|
||||
# since someone could brute force the hash to obtain the token
|
||||
#
|
||||
# Note that if auth_token has to be generated at random, it will be
|
||||
# generated at random here separately from being generated at random in
|
||||
# the k8s Secret template. This will cause this annotation to change to
|
||||
# match the k8s Secret during the first upgrade following an auth_token
|
||||
# was generated.
|
||||
checksum/auth-token: {{ include "jupyterhub.hub.config.ConfigurableHTTPProxy.auth_token" . | sha256sum | trunc 4 | quote }}
|
||||
checksum/proxy-secret: {{ include (print $.Template.BasePath "/proxy/secret.yaml") . | sha256sum | quote }}
|
||||
{{- with .Values.proxy.annotations }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 60
|
||||
{{- if .Values.scheduling.podPriority.enabled }}
|
||||
priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.chp.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with concat .Values.scheduling.corePods.tolerations .Values.proxy.chp.tolerations }}
|
||||
tolerations:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- include "jupyterhub.coreAffinity" . | nindent 6 }}
|
||||
{{- if $manualHTTPS }}
|
||||
volumes:
|
||||
- name: tls-secret
|
||||
secret:
|
||||
secretName: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . }}
|
||||
{{- else if $manualHTTPSwithsecret }}
|
||||
volumes:
|
||||
- name: tls-secret
|
||||
secret:
|
||||
secretName: {{ .Values.proxy.https.secret.name }}
|
||||
{{- end }}
|
||||
{{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.proxy.chp.image) }}
|
||||
imagePullSecrets: {{ . }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: chp
|
||||
image: {{ .Values.proxy.chp.image.name }}:{{ .Values.proxy.chp.image.tag }}
|
||||
{{- $hubNameAsEnv := include "jupyterhub.hub.fullname" . | upper | replace "-" "_" }}
|
||||
{{- $hubHost := printf "http://%s:$(%s_SERVICE_PORT)" (include "jupyterhub.hub.fullname" .) $hubNameAsEnv }}
|
||||
command:
|
||||
- configurable-http-proxy
|
||||
- "--ip="
|
||||
- "--api-ip="
|
||||
- --api-port=8001
|
||||
- --default-target={{ .Values.proxy.chp.defaultTarget | default $hubHost }}
|
||||
- --error-target={{ .Values.proxy.chp.errorTarget | default (printf "%s/hub/error" $hubHost) }}
|
||||
{{- if $manualHTTPS }}
|
||||
- --port=8443
|
||||
- --redirect-port=8000
|
||||
- --redirect-to=443
|
||||
- --ssl-key=/etc/chp/tls/tls.key
|
||||
- --ssl-cert=/etc/chp/tls/tls.crt
|
||||
{{- else if $manualHTTPSwithsecret }}
|
||||
- --port=8443
|
||||
- --redirect-port=8000
|
||||
- --redirect-to=443
|
||||
- --ssl-key=/etc/chp/tls/{{ .Values.proxy.https.secret.key }}
|
||||
- --ssl-cert=/etc/chp/tls/{{ .Values.proxy.https.secret.crt }}
|
||||
{{- else }}
|
||||
- --port=8000
|
||||
{{- end }}
|
||||
{{- if .Values.debug.enabled }}
|
||||
- --log-level=debug
|
||||
{{- end }}
|
||||
{{- range .Values.proxy.chp.extraCommandLineFlags }}
|
||||
- {{ tpl . $ }}
|
||||
{{- end }}
|
||||
{{- if or $manualHTTPS $manualHTTPSwithsecret }}
|
||||
volumeMounts:
|
||||
- name: tls-secret
|
||||
mountPath: /etc/chp/tls
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.chp.resources }}
|
||||
resources:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: CONFIGPROXY_AUTH_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
# NOTE: References the chart managed k8s Secret even if
|
||||
# hub.existingSecret is specified to avoid using the
|
||||
# lookup function on the user managed k8s Secret.
|
||||
name: {{ include "jupyterhub.hub.fullname" . }}
|
||||
key: hub.config.ConfigurableHTTPProxy.auth_token
|
||||
{{- with .Values.proxy.chp.extraEnv }}
|
||||
{{- include "jupyterhub.extraEnv" . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.chp.image.pullPolicy }}
|
||||
imagePullPolicy: {{ . }}
|
||||
{{- end }}
|
||||
ports:
|
||||
{{- if or $manualHTTPS $manualHTTPSwithsecret }}
|
||||
- name: https
|
||||
containerPort: 8443
|
||||
{{- end }}
|
||||
- name: http
|
||||
containerPort: 8000
|
||||
- name: api
|
||||
containerPort: 8001
|
||||
{{- if .Values.proxy.chp.livenessProbe.enabled }}
|
||||
livenessProbe:
|
||||
initialDelaySeconds: {{ .Values.proxy.chp.livenessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.proxy.chp.livenessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.proxy.chp.livenessProbe.timeoutSeconds }}
|
||||
failureThreshold: {{ .Values.proxy.chp.livenessProbe.failureThreshold }}
|
||||
httpGet:
|
||||
path: /_chp_healthz
|
||||
{{- if or $manualHTTPS $manualHTTPSwithsecret }}
|
||||
port: https
|
||||
scheme: HTTPS
|
||||
{{- else }}
|
||||
port: http
|
||||
scheme: HTTP
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.proxy.chp.readinessProbe.enabled }}
|
||||
readinessProbe:
|
||||
initialDelaySeconds: {{ .Values.proxy.chp.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.proxy.chp.readinessProbe.periodSeconds }}
|
||||
timeoutSeconds: {{ .Values.proxy.chp.readinessProbe.timeoutSeconds }}
|
||||
failureThreshold: {{ .Values.proxy.chp.readinessProbe.failureThreshold }}
|
||||
httpGet:
|
||||
path: /_chp_healthz
|
||||
{{- if or $manualHTTPS $manualHTTPSwithsecret }}
|
||||
port: https
|
||||
scheme: HTTPS
|
||||
{{- else }}
|
||||
port: http
|
||||
scheme: HTTP
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.chp.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.chp.extraPodSpec }}
|
||||
{{- . | toYaml | nindent 6 }}
|
||||
{{- end }}
|
108
jupyter/base/charts/jupyterhub/templates/proxy/netpol.yaml
Normal file
108
jupyter/base/charts/jupyterhub/templates/proxy/netpol.yaml
Normal file
|
@ -0,0 +1,108 @@
|
|||
{{- $HTTPS := .Values.proxy.https.enabled -}}
|
||||
{{- $autoHTTPS := and $HTTPS (and (eq .Values.proxy.https.type "letsencrypt") .Values.proxy.https.hosts) -}}
|
||||
{{- $manualHTTPS := and $HTTPS (eq .Values.proxy.https.type "manual") -}}
|
||||
{{- $manualHTTPSwithsecret := and $HTTPS (eq .Values.proxy.https.type "secret") -}}
|
||||
{{- if .Values.proxy.chp.networkPolicy.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.proxy.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
|
||||
# IMPORTANT:
|
||||
# NetworkPolicy's ingress "from" and egress "to" rule specifications require
|
||||
# great attention to detail. A quick summary is:
|
||||
#
|
||||
# 1. You can provide "from"/"to" rules that provide access either ports or a
|
||||
# subset of ports.
|
||||
# 2. You can for each "from"/"to" rule provide any number of
|
||||
# "sources"/"destinations" of four different kinds.
|
||||
# - podSelector - targets pods with a certain label in the same namespace as the NetworkPolicy
|
||||
# - namespaceSelector - targets all pods running in namespaces with a certain label
|
||||
# - namespaceSelector and podSelector - targets pods with a certain label running in namespaces with a certain label
|
||||
# - ipBlock - targets network traffic from/to a set of IP address ranges
|
||||
#
|
||||
# Read more at: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
||||
#
|
||||
ingress:
|
||||
{{- with .Values.proxy.chp.networkPolicy.allowedIngressPorts }}
|
||||
# allow incoming traffic to these ports independent of source
|
||||
- ports:
|
||||
{{- range $port := . }}
|
||||
- port: {{ $port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
# allowed pods (hub.jupyter.org/network-access-proxy-http) --> proxy (http/https port)
|
||||
- ports:
|
||||
- port: http
|
||||
{{- if or $manualHTTPS $manualHTTPSwithsecret }}
|
||||
- port: https
|
||||
{{- end }}
|
||||
from:
|
||||
# source 1 - labeled pods
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
hub.jupyter.org/network-access-proxy-http: "true"
|
||||
{{- if eq .Values.proxy.chp.networkPolicy.interNamespaceAccessLabels "accept" }}
|
||||
namespaceSelector:
|
||||
matchLabels: {} # without this, the podSelector would only consider pods in the local namespace
|
||||
# source 2 - pods in labeled namespaces
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
hub.jupyter.org/network-access-proxy-http: "true"
|
||||
{{- end }}
|
||||
|
||||
# allowed pods (hub.jupyter.org/network-access-proxy-api) --> proxy (api port)
|
||||
- ports:
|
||||
- port: api
|
||||
from:
|
||||
# source 1 - labeled pods
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
hub.jupyter.org/network-access-proxy-api: "true"
|
||||
{{- if eq .Values.proxy.chp.networkPolicy.interNamespaceAccessLabels "accept" }}
|
||||
namespaceSelector:
|
||||
matchLabels: {} # without this, the podSelector would only consider pods in the local namespace
|
||||
# source 2 - pods in labeled namespaces
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
hub.jupyter.org/network-access-proxy-api: "true"
|
||||
{{- end }}
|
||||
|
||||
{{- with .Values.proxy.chp.networkPolicy.ingress}}
|
||||
# depends, but default is nothing --> proxy
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
egress:
|
||||
# proxy --> hub
|
||||
- to:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
{{- $_ := merge (dict "componentLabel" "hub") . }}
|
||||
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
|
||||
ports:
|
||||
- port: 8081
|
||||
|
||||
# proxy --> singleuser-server
|
||||
- to:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
{{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
|
||||
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
|
||||
ports:
|
||||
- port: 8888
|
||||
|
||||
{{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.proxy.chp.networkPolicy)) }}
|
||||
{{- . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
18
jupyter/base/charts/jupyterhub/templates/proxy/pdb.yaml
Normal file
18
jupyter/base/charts/jupyterhub/templates/proxy/pdb.yaml
Normal file
|
@ -0,0 +1,18 @@
|
|||
{{- if .Values.proxy.chp.pdb.enabled -}}
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.proxy.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not (typeIs "<nil>" .Values.proxy.chp.pdb.maxUnavailable) }}
|
||||
maxUnavailable: {{ .Values.proxy.chp.pdb.maxUnavailable }}
|
||||
{{- end }}
|
||||
{{- if not (typeIs "<nil>" .Values.proxy.chp.pdb.minAvailable) }}
|
||||
minAvailable: {{ .Values.proxy.chp.pdb.minAvailable }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
{{- end }}
|
13
jupyter/base/charts/jupyterhub/templates/proxy/secret.yaml
Normal file
13
jupyter/base/charts/jupyterhub/templates/proxy/secret.yaml
Normal file
|
@ -0,0 +1,13 @@
|
|||
{{- $manualHTTPS := and .Values.proxy.https.enabled (eq .Values.proxy.https.type "manual") -}}
|
||||
{{- if $manualHTTPS -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.proxy-public-manual-tls.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
type: kubernetes.io/tls
|
||||
data:
|
||||
tls.crt: {{ .Values.proxy.https.manual.cert | required "Required configuration missing: proxy.https.manual.cert" | b64enc }}
|
||||
tls.key: {{ .Values.proxy.https.manual.key | required "Required configuration missing: proxy.https.manual.key" | b64enc }}
|
||||
{{- end }}
|
83
jupyter/base/charts/jupyterhub/templates/proxy/service.yaml
Normal file
83
jupyter/base/charts/jupyterhub/templates/proxy/service.yaml
Normal file
|
@ -0,0 +1,83 @@
|
|||
{{- $enabled := .Values.proxy.https.enabled -}}
|
||||
{{- $autoHTTPS := and $enabled (and (eq .Values.proxy.https.type "letsencrypt") .Values.proxy.https.hosts) -}}
|
||||
{{- $manualHTTPS := and $enabled (eq .Values.proxy.https.type "manual") -}}
|
||||
{{- $manualHTTPSwithsecret := and $enabled (eq .Values.proxy.https.type "secret") -}}
|
||||
{{- $offloadHTTPS := and $enabled (eq .Values.proxy.https.type "offload") -}}
|
||||
{{- $valid := or $autoHTTPS (or $manualHTTPS (or $manualHTTPSwithsecret $offloadHTTPS)) -}}
|
||||
{{- $HTTPS := and $enabled $valid -}}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.proxy-api.fullname" . }}
|
||||
labels:
|
||||
{{- $_ := merge (dict "componentSuffix" "-api") . }}
|
||||
{{- include "jupyterhub.labels" $_ | nindent 4 }}
|
||||
spec:
|
||||
selector:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 4 }}
|
||||
ports:
|
||||
- port: 8001
|
||||
targetPort: api
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.proxy-public.fullname" . }}
|
||||
labels:
|
||||
{{- $_ := merge (dict "componentSuffix" "-public") . }}
|
||||
{{- include "jupyterhub.labels" $_ | nindent 4 }}
|
||||
{{- with .Values.proxy.service.labels }}
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.service.annotations }}
|
||||
annotations:
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
selector:
|
||||
# This service will target the autohttps pod if autohttps is configured, and
|
||||
# the proxy pod if not. When autohttps is configured, the service proxy-http
|
||||
# will be around to target the proxy pod directly.
|
||||
{{- if $autoHTTPS }}
|
||||
{{- $_ := merge (dict "componentLabel" "autohttps") . -}}
|
||||
{{- include "jupyterhub.matchLabels" $_ | nindent 4 }}
|
||||
{{- else }}
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
{{- if $HTTPS }}
|
||||
- name: https
|
||||
port: 443
|
||||
# When HTTPS termination is handled outside our helm chart, pass traffic
|
||||
# coming in via this Service's port 443 to targeted pod's port meant for
|
||||
# HTTP traffic.
|
||||
{{- if $offloadHTTPS }}
|
||||
targetPort: http
|
||||
{{- else }}
|
||||
targetPort: https
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.service.nodePorts.https }}
|
||||
nodePort: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if ne .Values.proxy.service.disableHttpPort true }}
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: http
|
||||
{{- with .Values.proxy.service.nodePorts.http }}
|
||||
nodePort: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.proxy.service.extraPorts }}
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
type: {{ .Values.proxy.service.type }}
|
||||
{{- with .Values.proxy.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ . }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.proxy.service.type "LoadBalancer" }}
|
||||
{{- with .Values.proxy.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,138 @@
|
|||
{{- define "jupyterhub.userNodeAffinityRequired" -}}
|
||||
{{- if eq .Values.scheduling.userPods.nodeAffinity.matchNodePurpose "require" -}}
|
||||
- matchExpressions:
|
||||
- key: hub.jupyter.org/node-purpose
|
||||
operator: In
|
||||
values: [user]
|
||||
{{- end }}
|
||||
{{- with .Values.singleuser.extraNodeAffinity.required }}
|
||||
{{- . | toYaml | nindent 0 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "jupyterhub.userNodeAffinityPreferred" -}}
|
||||
{{- if eq .Values.scheduling.userPods.nodeAffinity.matchNodePurpose "prefer" -}}
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hub.jupyter.org/node-purpose
|
||||
operator: In
|
||||
values: [user]
|
||||
{{- end }}
|
||||
{{- with .Values.singleuser.extraNodeAffinity.preferred }}
|
||||
{{- . | toYaml | nindent 0 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "jupyterhub.userPodAffinityRequired" -}}
|
||||
{{- with .Values.singleuser.extraPodAffinity.required -}}
|
||||
{{ . | toYaml }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "jupyterhub.userPodAffinityPreferred" -}}
|
||||
{{- with .Values.singleuser.extraPodAffinity.preferred -}}
|
||||
{{ . | toYaml }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "jupyterhub.userPodAntiAffinityRequired" -}}
|
||||
{{- with .Values.singleuser.extraPodAntiAffinity.required -}}
|
||||
{{ . | toYaml }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "jupyterhub.userPodAntiAffinityPreferred" -}}
|
||||
{{- with .Values.singleuser.extraPodAntiAffinity.preferred -}}
|
||||
{{ . | toYaml }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
|
||||
{{- /*
|
||||
jupyterhub.userAffinity:
|
||||
It is used by user-placeholder to set the same affinity on them as the
|
||||
spawned user pods spawned by kubespawner.
|
||||
*/}}
|
||||
{{- define "jupyterhub.userAffinity" -}}
|
||||
|
||||
{{- $dummy := set . "nodeAffinityRequired" (include "jupyterhub.userNodeAffinityRequired" .) -}}
|
||||
{{- $dummy := set . "podAffinityRequired" (include "jupyterhub.userPodAffinityRequired" .) -}}
|
||||
{{- $dummy := set . "podAntiAffinityRequired" (include "jupyterhub.userPodAntiAffinityRequired" .) -}}
|
||||
{{- $dummy := set . "nodeAffinityPreferred" (include "jupyterhub.userNodeAffinityPreferred" .) -}}
|
||||
{{- $dummy := set . "podAffinityPreferred" (include "jupyterhub.userPodAffinityPreferred" .) -}}
|
||||
{{- $dummy := set . "podAntiAffinityPreferred" (include "jupyterhub.userPodAntiAffinityPreferred" .) -}}
|
||||
{{- $dummy := set . "hasNodeAffinity" (or .nodeAffinityRequired .nodeAffinityPreferred) -}}
|
||||
{{- $dummy := set . "hasPodAffinity" (or .podAffinityRequired .podAffinityPreferred) -}}
|
||||
{{- $dummy := set . "hasPodAntiAffinity" (or .podAntiAffinityRequired .podAntiAffinityPreferred) -}}
|
||||
|
||||
{{- if .hasNodeAffinity -}}
|
||||
nodeAffinity:
|
||||
{{- if .nodeAffinityRequired }}
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
{{- .nodeAffinityRequired | nindent 6 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .nodeAffinityPreferred }}
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
{{- .nodeAffinityPreferred | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .hasPodAffinity }}
|
||||
podAffinity:
|
||||
{{- if .podAffinityRequired }}
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
{{- .podAffinityRequired | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .podAffinityPreferred }}
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
{{- .podAffinityPreferred | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .hasPodAntiAffinity }}
|
||||
podAntiAffinity:
|
||||
{{- if .podAntiAffinityRequired }}
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
{{- .podAntiAffinityRequired | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .podAntiAffinityPreferred }}
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
{{- .podAntiAffinityPreferred | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- end }}
|
||||
|
||||
|
||||
|
||||
{{- define "jupyterhub.coreAffinity" -}}
|
||||
{{- $require := eq .Values.scheduling.corePods.nodeAffinity.matchNodePurpose "require" -}}
|
||||
{{- $prefer := eq .Values.scheduling.corePods.nodeAffinity.matchNodePurpose "prefer" -}}
|
||||
{{- if or $require $prefer -}}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
{{- if $require }}
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: hub.jupyter.org/node-purpose
|
||||
operator: In
|
||||
values: [core]
|
||||
{{- end }}
|
||||
{{- if $prefer }}
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
preference:
|
||||
matchExpressions:
|
||||
- key: hub.jupyter.org/node-purpose
|
||||
operator: In
|
||||
values: [core]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,15 @@
|
|||
{{- if .Values.scheduling.podPriority.enabled }}
|
||||
apiVersion: scheduling.k8s.io/v1
|
||||
kind: PriorityClass
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.priority.fullname" . }}
|
||||
annotations:
|
||||
meta.helm.sh/release-name: "{{ .Release.Name }}"
|
||||
meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
|
||||
labels:
|
||||
{{- $_ := merge (dict "componentLabel" "default-priority") . }}
|
||||
{{- include "jupyterhub.labels" $_ | nindent 4 }}
|
||||
value: {{ .Values.scheduling.podPriority.defaultPriority }}
|
||||
globalDefault: {{ .Values.scheduling.podPriority.globalDefault }}
|
||||
description: "A default priority higher than user placeholders priority."
|
||||
{{- end }}
|
|
@ -0,0 +1,17 @@
|
|||
{{- /*
|
||||
The cluster autoscaler should be allowed to evict and reschedule these pods if
|
||||
it would help in order to scale down a node.
|
||||
*/}}
|
||||
{{- if .Values.scheduling.userPlaceholder.enabled -}}
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.user-placeholder.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
minAvailable: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,16 @@
|
|||
{{- if .Values.scheduling.podPriority.enabled }}
|
||||
{{- if .Values.scheduling.userPlaceholder.enabled -}}
|
||||
apiVersion: scheduling.k8s.io/v1
|
||||
kind: PriorityClass
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
|
||||
annotations:
|
||||
meta.helm.sh/release-name: "{{ .Release.Name }}"
|
||||
meta.helm.sh/release-namespace: "{{ .Release.Namespace }}"
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
value: {{ .Values.scheduling.podPriority.userPlaceholderPriority }}
|
||||
globalDefault: false
|
||||
description: "With a priority higher or eqaul to a cluster autoscalers priority cutoff, a pod can trigger a cluster scale up. At the same time, placeholder pods priority should be lower than other pods to make them evictable."
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,80 @@
|
|||
|
||||
{{- /*
|
||||
These user-placeholder pods can be used to test cluster autoscaling in a
|
||||
controlled fashion.
|
||||
|
||||
Example:
|
||||
$ echo 'Simulating four users...'
|
||||
$ kubectl scale sts/user-placeholder --replicas 4
|
||||
*/}}
|
||||
{{- if .Values.scheduling.userPlaceholder.enabled -}}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.user-placeholder.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
podManagementPolicy: Parallel
|
||||
{{- if typeIs "int" .Values.scheduling.userPlaceholder.revisionHistoryLimit }}
|
||||
revisionHistoryLimit: {{ .Values.scheduling.userPlaceholder.revisionHistoryLimit }}
|
||||
{{- end }}
|
||||
replicas: {{ .Values.scheduling.userPlaceholder.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
serviceName: {{ include "jupyterhub.user-placeholder.fullname" . }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.scheduling.userPlaceholder.annotations }}
|
||||
annotations:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- /* Changes here will cause the Deployment to restart the pods. */}}
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 8 }}
|
||||
{{- with .Values.scheduling.userPlaceholder.labels }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.scheduling.podPriority.enabled }}
|
||||
priorityClassName: {{ include "jupyterhub.user-placeholder-priority.fullname" . }}
|
||||
{{- end }}
|
||||
{{- if .Values.scheduling.userScheduler.enabled }}
|
||||
schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
|
||||
{{- end }}
|
||||
{{- with .Values.singleuser.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with concat .Values.scheduling.userPods.tolerations .Values.singleuser.extraTolerations }}
|
||||
tolerations:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if include "jupyterhub.userAffinity" . }}
|
||||
affinity:
|
||||
{{- include "jupyterhub.userAffinity" . | nindent 8 }}
|
||||
{{- end }}
|
||||
terminationGracePeriodSeconds: 0
|
||||
automountServiceAccountToken: false
|
||||
{{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.scheduling.userPlaceholder.image) }}
|
||||
imagePullSecrets: {{ . }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: pause
|
||||
image: {{ .Values.scheduling.userPlaceholder.image.name }}:{{ .Values.scheduling.userPlaceholder.image.tag }}
|
||||
{{- if .Values.scheduling.userPlaceholder.resources }}
|
||||
resources:
|
||||
{{- .Values.scheduling.userPlaceholder.resources | toYaml | nindent 12 }}
|
||||
{{- else if (include "jupyterhub.singleuser.resources" .) }}
|
||||
resources:
|
||||
{{- include "jupyterhub.singleuser.resources" . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.scheduling.userPlaceholder.image.pullPolicy }}
|
||||
imagePullPolicy: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.scheduling.userPlaceholder.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,45 @@
|
|||
{{- if .Values.scheduling.userScheduler.enabled -}}
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
data:
|
||||
{{- /*
|
||||
This is configuration of a k8s official kube-scheduler binary running in the
|
||||
user-scheduler.
|
||||
|
||||
The config version and kube-scheduler binary version has a fallback for k8s
|
||||
clusters versioned v1.23 or lower because:
|
||||
|
||||
- v1 / v1beta3 config requires kube-scheduler binary >=1.25 / >=1.23
|
||||
- kube-scheduler binary >=1.25 requires storage.k8s.io/v1/CSIStorageCapacity
|
||||
available first in k8s >=1.24
|
||||
|
||||
ref: https://kubernetes.io/docs/reference/scheduling/config/
|
||||
ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1/
|
||||
ref: https://kubernetes.io/docs/reference/config-api/kube-scheduler-config.v1beta3/
|
||||
*/}}
|
||||
config.yaml: |
|
||||
{{- if semverCompare ">=1.24.0-0" .Capabilities.KubeVersion.Version }}
|
||||
apiVersion: kubescheduler.config.k8s.io/v1
|
||||
{{- else }}
|
||||
apiVersion: kubescheduler.config.k8s.io/v1beta3
|
||||
{{- end }}
|
||||
kind: KubeSchedulerConfiguration
|
||||
leaderElection:
|
||||
resourceLock: endpointsleases
|
||||
resourceName: {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
|
||||
resourceNamespace: "{{ .Release.Namespace }}"
|
||||
profiles:
|
||||
- schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }}
|
||||
{{- with .Values.scheduling.userScheduler.plugins }}
|
||||
plugins:
|
||||
{{- . | toYaml | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- with .Values.scheduling.userScheduler.pluginConfig }}
|
||||
pluginConfig:
|
||||
{{- . | toYaml | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,102 @@
|
|||
{{- if .Values.scheduling.userScheduler.enabled -}}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if typeIs "int" .Values.scheduling.userScheduler.revisionHistoryLimit }}
|
||||
revisionHistoryLimit: {{ .Values.scheduling.userScheduler.revisionHistoryLimit }}
|
||||
{{- end }}
|
||||
replicas: {{ .Values.scheduling.userScheduler.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 8 }}
|
||||
{{- with .Values.scheduling.userScheduler.labels }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
annotations:
|
||||
checksum/config-map: {{ include (print $.Template.BasePath "/scheduling/user-scheduler/configmap.yaml") . | sha256sum }}
|
||||
{{- with .Values.scheduling.userScheduler.annotations }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{ with include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
|
||||
serviceAccountName: {{ . }}
|
||||
{{- end }}
|
||||
{{- if .Values.scheduling.podPriority.enabled }}
|
||||
priorityClassName: {{ include "jupyterhub.priority.fullname" . }}
|
||||
{{- end }}
|
||||
{{- with .Values.scheduling.userScheduler.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with concat .Values.scheduling.corePods.tolerations .Values.scheduling.userScheduler.tolerations }}
|
||||
tolerations:
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- include "jupyterhub.coreAffinity" . | nindent 6 }}
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
|
||||
{{- with include "jupyterhub.imagePullSecrets" (dict "root" . "image" .Values.scheduling.userScheduler.image) }}
|
||||
imagePullSecrets: {{ . }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: kube-scheduler
|
||||
{{- if semverCompare ">=1.24.0-0" .Capabilities.KubeVersion.Version }}
|
||||
image: {{ .Values.scheduling.userScheduler.image.name }}:{{ .Values.scheduling.userScheduler.image.tag }}
|
||||
{{- else }}
|
||||
# WARNING: The tag of this image is hardcoded, and the
|
||||
# "scheduling.userScheduler.image.tag" configuration of the
|
||||
# Helm chart that generated this resource manifest isn't
|
||||
# respected. If you install the Helm chart in a k8s cluster
|
||||
# versioned 1.24 or higher, your configuration will be
|
||||
# respected.
|
||||
image: {{ .Values.scheduling.userScheduler.image.name }}:v1.23.14
|
||||
{{- end }}
|
||||
{{- with .Values.scheduling.userScheduler.image.pullPolicy }}
|
||||
imagePullPolicy: {{ . }}
|
||||
{{- end }}
|
||||
command:
|
||||
- /usr/local/bin/kube-scheduler
|
||||
# NOTE: --authentication-skip-lookup=true is used to avoid a
|
||||
# seemingly harmless error, if we need to not skip
|
||||
# "authentication lookup" in the future, see the linked issue.
|
||||
#
|
||||
# ref: https://github.com/jupyterhub/zero-to-jupyterhub-k8s/issues/1894
|
||||
- --config=/etc/user-scheduler/config.yaml
|
||||
- --authentication-skip-lookup=true
|
||||
- --v={{ .Values.scheduling.userScheduler.logLevel }}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/user-scheduler
|
||||
name: config
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
scheme: HTTPS
|
||||
port: 10259
|
||||
initialDelaySeconds: 15
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
scheme: HTTPS
|
||||
port: 10259
|
||||
{{- with .Values.scheduling.userScheduler.resources }}
|
||||
resources:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.scheduling.userScheduler.containerSecurityContext }}
|
||||
securityContext:
|
||||
{{- . | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.scheduling.userScheduler.extraPodSpec }}
|
||||
{{- . | toYaml | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,18 @@
|
|||
{{- if and .Values.scheduling.userScheduler.enabled .Values.scheduling.userScheduler.pdb.enabled -}}
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.user-scheduler-deploy.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not (typeIs "<nil>" .Values.scheduling.userScheduler.pdb.maxUnavailable) }}
|
||||
maxUnavailable: {{ .Values.scheduling.userScheduler.pdb.maxUnavailable }}
|
||||
{{- end }}
|
||||
{{- if not (typeIs "<nil>" .Values.scheduling.userScheduler.pdb.minAvailable) }}
|
||||
minAvailable: {{ .Values.scheduling.userScheduler.pdb.minAvailable }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "jupyterhub.matchLabels" . | nindent 6 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,233 @@
|
|||
{{- if .Values.scheduling.userScheduler.enabled -}}
|
||||
{{- if .Values.rbac.create -}}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.user-scheduler.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
rules:
|
||||
# Copied from the system:kube-scheduler ClusterRole of the k8s version
|
||||
# matching the kube-scheduler binary we use. A modification has been made to
|
||||
# resourceName fields to remain relevant for how we have named our resources
|
||||
# in this Helm chart.
|
||||
#
|
||||
# NOTE: These rules have been:
|
||||
# - unchanged between 1.12 and 1.15
|
||||
# - changed in 1.16
|
||||
# - changed in 1.17
|
||||
# - unchanged between 1.18 and 1.20
|
||||
# - changed in 1.21: get/list/watch permission for namespace,
|
||||
# csidrivers, csistoragecapacities was added.
|
||||
# - unchanged between 1.22 and 1.27
|
||||
#
|
||||
# ref: https://github.com/kubernetes/kubernetes/blob/v1.27.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L736-L892
|
||||
- apiGroups:
|
||||
- ""
|
||||
- events.k8s.io
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resourceNames:
|
||||
- {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- {{ include "jupyterhub.user-scheduler-lock.fullname" . }}
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- bindings
|
||||
- pods/binding
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- replicationcontrollers
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- replicasets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- csinodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- csidrivers
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- csistoragecapacities
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
# Copied from the system:volume-scheduler ClusterRole of the k8s version
|
||||
# matching the kube-scheduler binary we use.
|
||||
#
|
||||
# NOTE: These rules have not changed between 1.12 and 1.27.
|
||||
#
|
||||
# ref: https://github.com/kubernetes/kubernetes/blob/v1.27.0/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/cluster-roles.yaml#L1311-L1338
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.user-scheduler.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
|
||||
namespace: "{{ .Release.Namespace }}"
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ include "jupyterhub.user-scheduler.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,14 @@
|
|||
{{- if .Values.scheduling.userScheduler.enabled -}}
|
||||
{{- if .Values.scheduling.userScheduler.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.user-scheduler-serviceaccount.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
{{- with .Values.scheduling.userScheduler.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,99 @@
|
|||
{{- if and .Values.singleuser.networkPolicy.enabled -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.singleuser.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- $_ := merge (dict "componentLabel" "singleuser-server") . }}
|
||||
{{- include "jupyterhub.matchLabels" $_ | nindent 6 }}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
|
||||
# IMPORTANT:
|
||||
# NetworkPolicy's ingress "from" and egress "to" rule specifications require
|
||||
# great attention to detail. A quick summary is:
|
||||
#
|
||||
# 1. You can provide "from"/"to" rules that provide access either ports or a
|
||||
# subset of ports.
|
||||
# 2. You can for each "from"/"to" rule provide any number of
|
||||
# "sources"/"destinations" of four different kinds.
|
||||
# - podSelector - targets pods with a certain label in the same namespace as the NetworkPolicy
|
||||
# - namespaceSelector - targets all pods running in namespaces with a certain label
|
||||
# - namespaceSelector and podSelector - targets pods with a certain label running in namespaces with a certain label
|
||||
# - ipBlock - targets network traffic from/to a set of IP address ranges
|
||||
#
|
||||
# Read more at: https://kubernetes.io/docs/concepts/services-networking/network-policies/#behavior-of-to-and-from-selectors
|
||||
#
|
||||
ingress:
|
||||
{{- with .Values.singleuser.networkPolicy.allowedIngressPorts }}
|
||||
# allow incoming traffic to these ports independent of source
|
||||
- ports:
|
||||
{{- range $port := . }}
|
||||
- port: {{ $port }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
# allowed pods (hub.jupyter.org/network-access-singleuser) --> singleuser-server
|
||||
- ports:
|
||||
- port: notebook-port
|
||||
from:
|
||||
# source 1 - labeled pods
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
hub.jupyter.org/network-access-singleuser: "true"
|
||||
{{- if eq .Values.singleuser.networkPolicy.interNamespaceAccessLabels "accept" }}
|
||||
namespaceSelector:
|
||||
matchLabels: {} # without this, the podSelector would only consider pods in the local namespace
|
||||
# source 2 - pods in labeled namespaces
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
hub.jupyter.org/network-access-singleuser: "true"
|
||||
{{- end }}
|
||||
|
||||
{{- with .Values.singleuser.networkPolicy.ingress }}
|
||||
# depends, but default is nothing --> singleuser-server
|
||||
{{- . | toYaml | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
egress:
|
||||
# singleuser-server --> hub
|
||||
- to:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
{{- $_ := merge (dict "componentLabel" "hub") . }}
|
||||
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
|
||||
ports:
|
||||
- port: 8081
|
||||
|
||||
# singleuser-server --> proxy
|
||||
# singleuser-server --> autohttps
|
||||
#
|
||||
# While not critical for core functionality, a user or library code may rely
|
||||
# on communicating with the proxy or autohttps pods via a k8s Service it can
|
||||
# detected from well known environment variables.
|
||||
#
|
||||
- to:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
{{- $_ := merge (dict "componentLabel" "proxy") . }}
|
||||
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
|
||||
ports:
|
||||
- port: 8000
|
||||
- to:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
{{- $_ := merge (dict "componentLabel" "autohttps") . }}
|
||||
{{- include "jupyterhub.matchLabels" $_ | nindent 14 }}
|
||||
ports:
|
||||
- port: 8080
|
||||
- port: 8443
|
||||
|
||||
{{- with (include "jupyterhub.networkPolicy.renderEgressRules" (list . .Values.singleuser.networkPolicy)) }}
|
||||
{{- . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,17 @@
|
|||
{{- if .Values.singleuser.extraFiles }}
|
||||
kind: Secret
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ include "jupyterhub.singleuser.fullname" . }}
|
||||
labels:
|
||||
{{- include "jupyterhub.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
{{- with include "jupyterhub.extraFiles.data" .Values.singleuser.extraFiles }}
|
||||
data:
|
||||
{{- . | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- with include "jupyterhub.extraFiles.stringData" .Values.singleuser.extraFiles }}
|
||||
stringData:
|
||||
{{- . | nindent 2 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
1
jupyter/base/charts/jupyterhub/values.schema.json
Normal file
1
jupyter/base/charts/jupyterhub/values.schema.json
Normal file
File diff suppressed because one or more lines are too long
674
jupyter/base/charts/jupyterhub/values.yaml
Normal file
674
jupyter/base/charts/jupyterhub/values.yaml
Normal file
|
@ -0,0 +1,674 @@
|
|||
# fullnameOverride and nameOverride distinguishes blank strings, null values,
|
||||
# and non-blank strings. For more details, see the configuration reference.
|
||||
fullnameOverride: ""
|
||||
nameOverride:
|
||||
|
||||
# enabled is ignored by the jupyterhub chart itself, but a chart depending on
|
||||
# the jupyterhub chart conditionally can make use this config option as the
|
||||
# condition.
|
||||
enabled:
|
||||
|
||||
# custom can contain anything you want to pass to the hub pod, as all passed
|
||||
# Helm template values will be made available there.
|
||||
custom: {}
|
||||
|
||||
# imagePullSecret is configuration to create a k8s Secret that Helm chart's pods
|
||||
# can get credentials from to pull their images.
|
||||
imagePullSecret:
|
||||
create: false
|
||||
automaticReferenceInjection: true
|
||||
registry:
|
||||
username:
|
||||
password:
|
||||
email:
|
||||
# imagePullSecrets is configuration to reference the k8s Secret resources the
|
||||
# Helm chart's pods can get credentials from to pull their images.
|
||||
imagePullSecrets: []
|
||||
|
||||
# hub relates to the hub pod, responsible for running JupyterHub, its configured
|
||||
# Authenticator class KubeSpawner, and its configured Proxy class
|
||||
# ConfigurableHTTPProxy. KubeSpawner creates the user pods, and
|
||||
# ConfigurableHTTPProxy speaks with the actual ConfigurableHTTPProxy server in
|
||||
# the proxy pod.
|
||||
hub:
|
||||
revisionHistoryLimit:
|
||||
config:
|
||||
JupyterHub:
|
||||
admin_access: true
|
||||
authenticator_class: dummy
|
||||
service:
|
||||
type: ClusterIP
|
||||
annotations: {}
|
||||
ports:
|
||||
nodePort:
|
||||
extraPorts: []
|
||||
loadBalancerIP:
|
||||
baseUrl: /
|
||||
cookieSecret:
|
||||
initContainers: []
|
||||
nodeSelector: {}
|
||||
tolerations: []
|
||||
concurrentSpawnLimit: 64
|
||||
consecutiveFailureLimit: 5
|
||||
activeServerLimit:
|
||||
deploymentStrategy:
|
||||
## type: Recreate
|
||||
## - sqlite-pvc backed hubs require the Recreate deployment strategy as a
|
||||
## typical PVC storage can only be bound to one pod at the time.
|
||||
## - JupyterHub isn't designed to support being run in parallell. More work
|
||||
## needs to be done in JupyterHub itself for a fully highly available (HA)
|
||||
## deployment of JupyterHub on k8s is to be possible.
|
||||
type: Recreate
|
||||
db:
|
||||
type: sqlite-pvc
|
||||
upgrade:
|
||||
pvc:
|
||||
annotations: {}
|
||||
selector: {}
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storage: 1Gi
|
||||
subPath:
|
||||
storageClassName:
|
||||
url:
|
||||
password:
|
||||
labels: {}
|
||||
annotations: {}
|
||||
command: []
|
||||
args: []
|
||||
extraConfig: {}
|
||||
extraFiles: {}
|
||||
extraEnv: {}
|
||||
extraContainers: []
|
||||
extraVolumes: []
|
||||
extraVolumeMounts: []
|
||||
image:
|
||||
name: jupyterhub/k8s-hub
|
||||
tag: "3.0.3"
|
||||
pullPolicy:
|
||||
pullSecrets: []
|
||||
resources: {}
|
||||
podSecurityContext:
|
||||
fsGroup: 1000
|
||||
containerSecurityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
allowPrivilegeEscalation: false
|
||||
lifecycle: {}
|
||||
loadRoles: {}
|
||||
services: {}
|
||||
pdb:
|
||||
enabled: false
|
||||
maxUnavailable:
|
||||
minAvailable: 1
|
||||
networkPolicy:
|
||||
enabled: true
|
||||
ingress: []
|
||||
egress: []
|
||||
egressAllowRules:
|
||||
cloudMetadataServer: true
|
||||
dnsPortsCloudMetadataServer: true
|
||||
dnsPortsKubeSystemNamespace: true
|
||||
dnsPortsPrivateIPs: true
|
||||
nonPrivateIPs: true
|
||||
privateIPs: true
|
||||
interNamespaceAccessLabels: ignore
|
||||
allowedIngressPorts: []
|
||||
allowNamedServers: false
|
||||
namedServerLimitPerUser:
|
||||
authenticatePrometheus:
|
||||
redirectToServer:
|
||||
shutdownOnLogout:
|
||||
templatePaths: []
|
||||
templateVars: {}
|
||||
livenessProbe:
|
||||
# The livenessProbe's aim to give JupyterHub sufficient time to startup but
|
||||
# be able to restart if it becomes unresponsive for ~5 min.
|
||||
enabled: true
|
||||
initialDelaySeconds: 300
|
||||
periodSeconds: 10
|
||||
failureThreshold: 30
|
||||
timeoutSeconds: 3
|
||||
readinessProbe:
|
||||
# The readinessProbe's aim is to provide a successful startup indication,
|
||||
# but following that never become unready before its livenessProbe fail and
|
||||
# restarts it if needed. To become unready following startup serves no
|
||||
# purpose as there are no other pod to fallback to in our non-HA deployment.
|
||||
enabled: true
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 2
|
||||
failureThreshold: 1000
|
||||
timeoutSeconds: 1
|
||||
existingSecret:
|
||||
serviceAccount:
|
||||
create: true
|
||||
name:
|
||||
annotations: {}
|
||||
extraPodSpec: {}
|
||||
|
||||
rbac:
|
||||
create: true
|
||||
|
||||
# proxy relates to the proxy pod, the proxy-public service, and the autohttps
|
||||
# pod and proxy-http service.
|
||||
proxy:
|
||||
secretToken:
|
||||
annotations: {}
|
||||
deploymentStrategy:
|
||||
## type: Recreate
|
||||
## - JupyterHub's interaction with the CHP proxy becomes a lot more robust
|
||||
## with this configuration. To understand this, consider that JupyterHub
|
||||
## during startup will interact a lot with the k8s service to reach a
|
||||
## ready proxy pod. If the hub pod during a helm upgrade is restarting
|
||||
## directly while the proxy pod is making a rolling upgrade, the hub pod
|
||||
## could end up running a sequence of interactions with the old proxy pod
|
||||
## and finishing up the sequence of interactions with the new proxy pod.
|
||||
## As CHP proxy pods carry individual state this is very error prone. One
|
||||
## outcome when not using Recreate as a strategy has been that user pods
|
||||
## have been deleted by the hub pod because it considered them unreachable
|
||||
## as it only configured the old proxy pod but not the new before trying
|
||||
## to reach them.
|
||||
type: Recreate
|
||||
## rollingUpdate:
|
||||
## - WARNING:
|
||||
## This is required to be set explicitly blank! Without it being
|
||||
## explicitly blank, k8s will let eventual old values under rollingUpdate
|
||||
## remain and then the Deployment becomes invalid and a helm upgrade would
|
||||
## fail with an error like this:
|
||||
##
|
||||
## UPGRADE FAILED
|
||||
## Error: Deployment.apps "proxy" is invalid: spec.strategy.rollingUpdate: Forbidden: may not be specified when strategy `type` is 'Recreate'
|
||||
## Error: UPGRADE FAILED: Deployment.apps "proxy" is invalid: spec.strategy.rollingUpdate: Forbidden: may not be specified when strategy `type` is 'Recreate'
|
||||
rollingUpdate:
|
||||
# service relates to the proxy-public service
|
||||
service:
|
||||
type: LoadBalancer
|
||||
labels: {}
|
||||
annotations: {}
|
||||
nodePorts:
|
||||
http:
|
||||
https:
|
||||
disableHttpPort: false
|
||||
extraPorts: []
|
||||
loadBalancerIP:
|
||||
loadBalancerSourceRanges: []
|
||||
# chp relates to the proxy pod, which is responsible for routing traffic based
|
||||
# on dynamic configuration sent from JupyterHub to CHP's REST API.
|
||||
chp:
|
||||
revisionHistoryLimit:
|
||||
containerSecurityContext:
|
||||
runAsUser: 65534 # nobody user
|
||||
runAsGroup: 65534 # nobody group
|
||||
allowPrivilegeEscalation: false
|
||||
image:
|
||||
name: jupyterhub/configurable-http-proxy
|
||||
# tag is automatically bumped to new patch versions by the
|
||||
# watch-dependencies.yaml workflow.
|
||||
#
|
||||
tag: "4.5.6" # https://github.com/jupyterhub/configurable-http-proxy/tags
|
||||
pullPolicy:
|
||||
pullSecrets: []
|
||||
extraCommandLineFlags: []
|
||||
livenessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
failureThreshold: 30
|
||||
timeoutSeconds: 3
|
||||
readinessProbe:
|
||||
enabled: true
|
||||
initialDelaySeconds: 0
|
||||
periodSeconds: 2
|
||||
failureThreshold: 1000
|
||||
timeoutSeconds: 1
|
||||
resources: {}
|
||||
defaultTarget:
|
||||
errorTarget:
|
||||
extraEnv: {}
|
||||
nodeSelector: {}
|
||||
tolerations: []
|
||||
networkPolicy:
|
||||
enabled: true
|
||||
ingress: []
|
||||
egress: []
|
||||
egressAllowRules:
|
||||
cloudMetadataServer: true
|
||||
dnsPortsCloudMetadataServer: true
|
||||
dnsPortsKubeSystemNamespace: true
|
||||
dnsPortsPrivateIPs: true
|
||||
nonPrivateIPs: true
|
||||
privateIPs: true
|
||||
interNamespaceAccessLabels: ignore
|
||||
allowedIngressPorts: [http, https]
|
||||
pdb:
|
||||
enabled: false
|
||||
maxUnavailable:
|
||||
minAvailable: 1
|
||||
extraPodSpec: {}
|
||||
# traefik relates to the autohttps pod, which is responsible for TLS
|
||||
# termination when proxy.https.type=letsencrypt.
|
||||
traefik:
|
||||
revisionHistoryLimit:
|
||||
containerSecurityContext:
|
||||
runAsUser: 65534 # nobody user
|
||||
runAsGroup: 65534 # nobody group
|
||||
allowPrivilegeEscalation: false
|
||||
image:
|
||||
name: traefik
|
||||
# tag is automatically bumped to new patch versions by the
|
||||
# watch-dependencies.yaml workflow.
|
||||
#
|
||||
tag: "v2.10.4" # ref: https://hub.docker.com/_/traefik?tab=tags
|
||||
pullPolicy:
|
||||
pullSecrets: []
|
||||
hsts:
|
||||
includeSubdomains: false
|
||||
preload: false
|
||||
maxAge: 15724800 # About 6 months
|
||||
resources: {}
|
||||
labels: {}
|
||||
extraInitContainers: []
|
||||
extraEnv: {}
|
||||
extraVolumes: []
|
||||
extraVolumeMounts: []
|
||||
extraStaticConfig: {}
|
||||
extraDynamicConfig: {}
|
||||
nodeSelector: {}
|
||||
tolerations: []
|
||||
extraPorts: []
|
||||
networkPolicy:
|
||||
enabled: true
|
||||
ingress: []
|
||||
egress: []
|
||||
egressAllowRules:
|
||||
cloudMetadataServer: true
|
||||
dnsPortsCloudMetadataServer: true
|
||||
dnsPortsKubeSystemNamespace: true
|
||||
dnsPortsPrivateIPs: true
|
||||
nonPrivateIPs: true
|
||||
privateIPs: true
|
||||
interNamespaceAccessLabels: ignore
|
||||
allowedIngressPorts: [http, https]
|
||||
pdb:
|
||||
enabled: false
|
||||
maxUnavailable:
|
||||
minAvailable: 1
|
||||
serviceAccount:
|
||||
create: true
|
||||
name:
|
||||
annotations: {}
|
||||
extraPodSpec: {}
|
||||
secretSync:
|
||||
containerSecurityContext:
|
||||
runAsUser: 65534 # nobody user
|
||||
runAsGroup: 65534 # nobody group
|
||||
allowPrivilegeEscalation: false
|
||||
image:
|
||||
name: jupyterhub/k8s-secret-sync
|
||||
tag: "3.0.3"
|
||||
pullPolicy:
|
||||
pullSecrets: []
|
||||
resources: {}
|
||||
labels: {}
|
||||
https:
|
||||
enabled: false
|
||||
type: letsencrypt
|
||||
#type: letsencrypt, manual, offload, secret
|
||||
letsencrypt:
|
||||
contactEmail:
|
||||
# Specify custom server here (https://acme-staging-v02.api.letsencrypt.org/directory) to hit staging LE
|
||||
acmeServer: https://acme-v02.api.letsencrypt.org/directory
|
||||
manual:
|
||||
key:
|
||||
cert:
|
||||
secret:
|
||||
name:
|
||||
key: tls.key
|
||||
crt: tls.crt
|
||||
hosts: []
|
||||
|
||||
# singleuser relates to the configuration of KubeSpawner which runs in the hub
|
||||
# pod, and its spawning of user pods such as jupyter-myusername.
|
||||
singleuser:
|
||||
podNameTemplate:
|
||||
extraTolerations: []
|
||||
nodeSelector: {}
|
||||
extraNodeAffinity:
|
||||
required: []
|
||||
preferred: []
|
||||
extraPodAffinity:
|
||||
required: []
|
||||
preferred: []
|
||||
extraPodAntiAffinity:
|
||||
required: []
|
||||
preferred: []
|
||||
networkTools:
|
||||
image:
|
||||
name: jupyterhub/k8s-network-tools
|
||||
tag: "3.0.3"
|
||||
pullPolicy:
|
||||
pullSecrets: []
|
||||
resources: {}
|
||||
cloudMetadata:
|
||||
# block set to true will append a privileged initContainer using the
|
||||
# iptables to block the sensitive metadata server at the provided ip.
|
||||
blockWithIptables: true
|
||||
ip: 169.254.169.254
|
||||
networkPolicy:
|
||||
enabled: true
|
||||
ingress: []
|
||||
egress: []
|
||||
egressAllowRules:
|
||||
cloudMetadataServer: false
|
||||
dnsPortsCloudMetadataServer: true
|
||||
dnsPortsKubeSystemNamespace: true
|
||||
dnsPortsPrivateIPs: true
|
||||
nonPrivateIPs: true
|
||||
privateIPs: false
|
||||
interNamespaceAccessLabels: ignore
|
||||
allowedIngressPorts: []
|
||||
events: true
|
||||
extraAnnotations: {}
|
||||
extraLabels:
|
||||
hub.jupyter.org/network-access-hub: "true"
|
||||
extraFiles: {}
|
||||
extraEnv: {}
|
||||
lifecycleHooks: {}
|
||||
initContainers: []
|
||||
extraContainers: []
|
||||
allowPrivilegeEscalation: false
|
||||
uid: 1000
|
||||
fsGid: 100
|
||||
serviceAccountName:
|
||||
storage:
|
||||
type: dynamic
|
||||
extraLabels: {}
|
||||
extraVolumes: []
|
||||
extraVolumeMounts: []
|
||||
static:
|
||||
pvcName:
|
||||
subPath: "{username}"
|
||||
capacity: 10Gi
|
||||
homeMountPath: /home/jovyan
|
||||
dynamic:
|
||||
storageClass:
|
||||
pvcNameTemplate: claim-{username}{servername}
|
||||
volumeNameTemplate: volume-{username}{servername}
|
||||
storageAccessModes: [ReadWriteOnce]
|
||||
image:
|
||||
name: jupyterhub/k8s-singleuser-sample
|
||||
tag: "3.0.3"
|
||||
pullPolicy:
|
||||
pullSecrets: []
|
||||
startTimeout: 300
|
||||
cpu:
|
||||
limit:
|
||||
guarantee:
|
||||
memory:
|
||||
limit:
|
||||
guarantee: 1G
|
||||
extraResource:
|
||||
limits: {}
|
||||
guarantees: {}
|
||||
cmd: jupyterhub-singleuser
|
||||
defaultUrl:
|
||||
extraPodConfig: {}
|
||||
profileList: []
|
||||
|
||||
# scheduling relates to the user-scheduler pods and user-placeholder pods.
|
||||
scheduling:
|
||||
userScheduler:
|
||||
enabled: true
|
||||
revisionHistoryLimit:
|
||||
replicas: 2
|
||||
logLevel: 4
|
||||
# plugins are configured on the user-scheduler to make us score how we
|
||||
# schedule user pods in a way to help us schedule on the most busy node. By
|
||||
# doing this, we help scale down more effectively. It isn't obvious how to
|
||||
# enable/disable scoring plugins, and configure them, to accomplish this.
|
||||
#
|
||||
# plugins ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins-1
|
||||
# migration ref: https://kubernetes.io/docs/reference/scheduling/config/#scheduler-configuration-migrations
|
||||
#
|
||||
plugins:
|
||||
score:
|
||||
# These scoring plugins are enabled by default according to
|
||||
# https://kubernetes.io/docs/reference/scheduling/config/#scheduling-plugins
|
||||
# 2022-02-22.
|
||||
#
|
||||
# Enabled with high priority:
|
||||
# - NodeAffinity
|
||||
# - InterPodAffinity
|
||||
# - NodeResourcesFit
|
||||
# - ImageLocality
|
||||
# Remains enabled with low default priority:
|
||||
# - TaintToleration
|
||||
# - PodTopologySpread
|
||||
# - VolumeBinding
|
||||
# Disabled for scoring:
|
||||
# - NodeResourcesBalancedAllocation
|
||||
#
|
||||
disabled:
|
||||
# We disable these plugins (with regards to scoring) to not interfere
|
||||
# or complicate our use of NodeResourcesFit.
|
||||
- name: NodeResourcesBalancedAllocation
|
||||
# Disable plugins to be allowed to enable them again with a different
|
||||
# weight and avoid an error.
|
||||
- name: NodeAffinity
|
||||
- name: InterPodAffinity
|
||||
- name: NodeResourcesFit
|
||||
- name: ImageLocality
|
||||
enabled:
|
||||
- name: NodeAffinity
|
||||
weight: 14631
|
||||
- name: InterPodAffinity
|
||||
weight: 1331
|
||||
- name: NodeResourcesFit
|
||||
weight: 121
|
||||
- name: ImageLocality
|
||||
weight: 11
|
||||
pluginConfig:
|
||||
# Here we declare that we should optimize pods to fit based on a
|
||||
# MostAllocated strategy instead of the default LeastAllocated.
|
||||
- name: NodeResourcesFit
|
||||
args:
|
||||
scoringStrategy:
|
||||
resources:
|
||||
- name: cpu
|
||||
weight: 1
|
||||
- name: memory
|
||||
weight: 1
|
||||
type: MostAllocated
|
||||
containerSecurityContext:
|
||||
runAsUser: 65534 # nobody user
|
||||
runAsGroup: 65534 # nobody group
|
||||
allowPrivilegeEscalation: false
|
||||
image:
|
||||
# IMPORTANT: Bumping the minor version of this binary should go hand in
|
||||
# hand with an inspection of the user-scheduelrs RBAC resources
|
||||
# that we have forked in
|
||||
# templates/scheduling/user-scheduler/rbac.yaml.
|
||||
#
|
||||
# Debugging advice:
|
||||
#
|
||||
# - Is configuration of kube-scheduler broken in
|
||||
# templates/scheduling/user-scheduler/configmap.yaml?
|
||||
#
|
||||
# - Is the kube-scheduler binary's compatibility to work
|
||||
# against a k8s api-server that is too new or too old?
|
||||
#
|
||||
# - You can update the GitHub workflow that runs tests to
|
||||
# include "deploy/user-scheduler" in the k8s namespace report
|
||||
# and reduce the user-scheduler deployments replicas to 1 in
|
||||
# dev-config.yaml to get relevant logs from the user-scheduler
|
||||
# pods. Inspect the "Kubernetes namespace report" action!
|
||||
#
|
||||
# - Typical failures are that kube-scheduler fails to search for
|
||||
# resources via its "informers", and won't start trying to
|
||||
# schedule pods before they succeed which may require
|
||||
# additional RBAC permissions or that the k8s api-server is
|
||||
# aware of the resources.
|
||||
#
|
||||
# - If "successfully acquired lease" can be seen in the logs, it
|
||||
# is a good sign kube-scheduler is ready to schedule pods.
|
||||
#
|
||||
name: registry.k8s.io/kube-scheduler
|
||||
# tag is automatically bumped to new patch versions by the
|
||||
# watch-dependencies.yaml workflow. The minor version is pinned in the
|
||||
# workflow, and should be updated there if a minor version bump is done
|
||||
# here. We aim to stay around 1 minor version behind the latest k8s
|
||||
# version.
|
||||
#
|
||||
tag: "v1.26.8" # ref: https://github.com/kubernetes/kubernetes/tree/master/CHANGELOG
|
||||
pullPolicy:
|
||||
pullSecrets: []
|
||||
nodeSelector: {}
|
||||
tolerations: []
|
||||
labels: {}
|
||||
annotations: {}
|
||||
pdb:
|
||||
enabled: true
|
||||
maxUnavailable: 1
|
||||
minAvailable:
|
||||
resources: {}
|
||||
serviceAccount:
|
||||
create: true
|
||||
name:
|
||||
annotations: {}
|
||||
extraPodSpec: {}
|
||||
podPriority:
|
||||
enabled: false
|
||||
globalDefault: false
|
||||
defaultPriority: 0
|
||||
imagePullerPriority: -5
|
||||
userPlaceholderPriority: -10
|
||||
userPlaceholder:
|
||||
enabled: true
|
||||
image:
|
||||
name: registry.k8s.io/pause
|
||||
# tag is automatically bumped to new patch versions by the
|
||||
# watch-dependencies.yaml workflow.
|
||||
#
|
||||
# If you update this, also update prePuller.pause.image.tag
|
||||
#
|
||||
tag: "3.9"
|
||||
pullPolicy:
|
||||
pullSecrets: []
|
||||
revisionHistoryLimit:
|
||||
replicas: 0
|
||||
labels: {}
|
||||
annotations: {}
|
||||
containerSecurityContext:
|
||||
runAsUser: 65534 # nobody user
|
||||
runAsGroup: 65534 # nobody group
|
||||
allowPrivilegeEscalation: false
|
||||
resources: {}
|
||||
corePods:
|
||||
tolerations:
|
||||
- key: hub.jupyter.org/dedicated
|
||||
operator: Equal
|
||||
value: core
|
||||
effect: NoSchedule
|
||||
- key: hub.jupyter.org_dedicated
|
||||
operator: Equal
|
||||
value: core
|
||||
effect: NoSchedule
|
||||
nodeAffinity:
|
||||
matchNodePurpose: prefer
|
||||
userPods:
|
||||
tolerations:
|
||||
- key: hub.jupyter.org/dedicated
|
||||
operator: Equal
|
||||
value: user
|
||||
effect: NoSchedule
|
||||
- key: hub.jupyter.org_dedicated
|
||||
operator: Equal
|
||||
value: user
|
||||
effect: NoSchedule
|
||||
nodeAffinity:
|
||||
matchNodePurpose: prefer
|
||||
|
||||
# prePuller relates to the hook|continuous-image-puller DaemonsSets
|
||||
prePuller:
|
||||
revisionHistoryLimit:
|
||||
labels: {}
|
||||
annotations: {}
|
||||
resources: {}
|
||||
containerSecurityContext:
|
||||
runAsUser: 65534 # nobody user
|
||||
runAsGroup: 65534 # nobody group
|
||||
allowPrivilegeEscalation: false
|
||||
extraTolerations: []
|
||||
# hook relates to the hook-image-awaiter Job and hook-image-puller DaemonSet
|
||||
hook:
|
||||
enabled: true
|
||||
pullOnlyOnChanges: true
|
||||
# image and the configuration below relates to the hook-image-awaiter Job
|
||||
image:
|
||||
name: jupyterhub/k8s-image-awaiter
|
||||
tag: "3.0.3"
|
||||
pullPolicy:
|
||||
pullSecrets: []
|
||||
containerSecurityContext:
|
||||
runAsUser: 65534 # nobody user
|
||||
runAsGroup: 65534 # nobody group
|
||||
allowPrivilegeEscalation: false
|
||||
podSchedulingWaitDuration: 10
|
||||
nodeSelector: {}
|
||||
tolerations: []
|
||||
resources: {}
|
||||
serviceAccount:
|
||||
create: true
|
||||
name:
|
||||
annotations: {}
|
||||
continuous:
|
||||
enabled: true
|
||||
pullProfileListImages: true
|
||||
extraImages: {}
|
||||
pause:
|
||||
containerSecurityContext:
|
||||
runAsUser: 65534 # nobody user
|
||||
runAsGroup: 65534 # nobody group
|
||||
allowPrivilegeEscalation: false
|
||||
image:
|
||||
name: registry.k8s.io/pause
|
||||
# tag is automatically bumped to new patch versions by the
|
||||
# watch-dependencies.yaml workflow.
|
||||
#
|
||||
# If you update this, also update scheduling.userPlaceholder.image.tag
|
||||
#
|
||||
tag: "3.9"
|
||||
pullPolicy:
|
||||
pullSecrets: []
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
ingressClassName:
|
||||
hosts: []
|
||||
pathSuffix:
|
||||
pathType: Prefix
|
||||
tls: []
|
||||
|
||||
# cull relates to the jupyterhub-idle-culler service, responsible for evicting
|
||||
# inactive singleuser pods.
|
||||
#
|
||||
# The configuration below, except for enabled, corresponds to command-line flags
|
||||
# for jupyterhub-idle-culler as documented here:
|
||||
# https://github.com/jupyterhub/jupyterhub-idle-culler#as-a-standalone-script
|
||||
#
|
||||
cull:
|
||||
enabled: true
|
||||
users: false # --cull-users
|
||||
adminUsers: true # --cull-admin-users
|
||||
removeNamedServers: false # --remove-named-servers
|
||||
timeout: 3600 # --timeout
|
||||
every: 600 # --cull-every
|
||||
concurrency: 10 # --concurrency
|
||||
maxAge: 0 # --max-age
|
||||
|
||||
debug:
|
||||
enabled: false
|
||||
|
||||
global:
|
||||
safeToShowValues: false
|
32
jupyter/base/jupyterhub-ingress.yml
Normal file
32
jupyter/base/jupyterhub-ingress.yml
Normal file
|
@ -0,0 +1,32 @@
|
|||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: jupyterhub-ingress
|
||||
namespace: jupyterhub
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
spec:
|
||||
defaultBackend:
|
||||
service:
|
||||
name: proxy-public
|
||||
port:
|
||||
number: 8443
|
||||
tls:
|
||||
- hosts:
|
||||
- jupyter.drive.test.sunet.se
|
||||
secretName: tls-secret
|
||||
|
||||
rules:
|
||||
- host: jupyter.drive.test.sunet.se
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: proxy-public
|
||||
port:
|
||||
number: 80
|
25
jupyter/base/jupyterhub-service.yml
Normal file
25
jupyter/base/jupyterhub-service.yml
Normal file
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
items:
|
||||
- apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: jupyterhub-node
|
||||
name: jupyterhub-node
|
||||
namespace: jupyterhub
|
||||
spec:
|
||||
ports:
|
||||
- port: 8080
|
||||
protocol: TCP
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app: jupyterhub-node
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
||||
kind: List
|
||||
metadata:
|
||||
resourceVersion: ""
|
||||
selfLink: ""
|
15
jupyter/base/kustomization.yaml
Normal file
15
jupyter/base/kustomization.yaml
Normal file
|
@ -0,0 +1,15 @@
|
|||
resources:
|
||||
- jupyterhub-ingress.yml
|
||||
- jupyterhub-service.yml
|
||||
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
helmCharts:
|
||||
- includeCRDs: true
|
||||
name: jupyterhub
|
||||
namespace: jupyterhub
|
||||
releaseName: jupyterhub
|
||||
valuesFile: ./values/values.yaml
|
||||
version: 3.0.3
|
||||
helmGlobals:
|
||||
chartHome: ./charts/
|
Loading…
Reference in a new issue