Compare commits
15 commits
cosmos-ops
...
main
Author | SHA1 | Date | |
---|---|---|---|
1c22bfb722 | |||
46ade449bb | |||
ca2a3ef1c9 | |||
5df6b70bbf | |||
46a7ccc30f | |||
d71a71f226 | |||
22871236cb | |||
d19f81d0c4 | |||
d6b200faad | |||
1449221f43 | |||
487770e350 | |||
c19aaa5a97 | |||
980adbf867 | |||
c27a2195cc | |||
1c72cff364 |
33 changed files with 1608 additions and 514 deletions
22
IaC-prod/dnsoutput.tf
Normal file
22
IaC-prod/dnsoutput.tf
Normal file
|
@ -0,0 +1,22 @@
|
|||
output "control_ip_addr_dco" {
|
||||
value = [ for node in resource.openstack_compute_instance_v2.controller-nodes-dco : "knotctl -z sunet.se --ttl 360 -r A -d ${node.access_ip_v4} -n ${node.name}\nknotctl -z sunet.se --ttl 360 -r AAAA -d ${node.access_ip_v6} -n ${node.name}" ]
|
||||
}
|
||||
output "worker_ip_addr_dco" {
|
||||
value = [ for node in resource.openstack_compute_instance_v2.worker-nodes-dco : "knotctl -z sunet.se --ttl 360 -r A -d ${node.access_ip_v4} -n ${node.name}\nknotctl -z sunet.se --ttl 360 -r AAAA -d ${node.access_ip_v6} -n ${node.name}" ]
|
||||
}
|
||||
|
||||
|
||||
output "control_ip_addr_sto3" {
|
||||
value = [ for node in resource.openstack_compute_instance_v2.controller-nodes-sto3 : "knotctl -z sunet.se --ttl 360 -r A -d ${node.access_ip_v4} -n ${node.name}\nknotctl -z sunet.se --ttl 360 -r AAAA -d ${node.access_ip_v6} -n ${node.name}" ]
|
||||
}
|
||||
output "worker_ip_addr_sto3" {
|
||||
value = [ for node in resource.openstack_compute_instance_v2.worker-nodes-sto3 : "knotctl -z sunet.se --ttl 360 -r A -d ${node.access_ip_v4} -n ${node.name}\nknotctl -z sunet.se --ttl 360 -r AAAA -d ${node.access_ip_v6} -n ${node.name}" ]
|
||||
}
|
||||
|
||||
output "control_ip_addr_sto4" {
|
||||
value = [ for node in resource.openstack_compute_instance_v2.controller-nodes-sto4 : "knotctl -z sunet.se --ttl 360 -r A -d ${node.access_ip_v4} -n ${node.name}\nknotctl -z sunet.se --ttl 360 -r AAAA -d ${node.access_ip_v6} -n ${node.name}" ]
|
||||
}
|
||||
output "worker_ip_addr_sto4" {
|
||||
value = [ for node in resource.openstack_compute_instance_v2.worker-nodes-sto4 : "knotctl -z sunet.se --ttl 360 -r A -d ${node.access_ip_v4} -n ${node.name}\nknotctl -z sunet.se --ttl 360 -r AAAA -d ${node.access_ip_v6} -n ${node.name}" ]
|
||||
}
|
||||
|
|
@ -3,3 +3,22 @@ data "openstack_images_image_v2" "debian12image" {
|
|||
name = "debian-12" # Name of image to be used
|
||||
most_recent = true
|
||||
}
|
||||
|
||||
data "openstack_images_image_v2" "debian12image-dco" {
|
||||
name = "debian-12" # Name of image to be used
|
||||
most_recent = true
|
||||
provider = openstack.dco
|
||||
}
|
||||
|
||||
data "openstack_images_image_v2" "debian12image-sto4" {
|
||||
name = "debian-12" # Name of image to be used
|
||||
most_recent = true
|
||||
provider = openstack.sto4
|
||||
}
|
||||
|
||||
data "openstack_images_image_v2" "debian12image-sto3" {
|
||||
name = "debian-12" # Name of image to be used
|
||||
most_recent = true
|
||||
provider = openstack.sto3
|
||||
}
|
||||
|
||||
|
|
138
IaC-prod/k8snodes-dco.tf
Normal file
138
IaC-prod/k8snodes-dco.tf
Normal file
|
@ -0,0 +1,138 @@
|
|||
#
|
||||
# Global DCO definitions
|
||||
#
|
||||
|
||||
locals {
|
||||
dcodc = "dco"
|
||||
dconodenrbase = index(var.datacenters, "dco")
|
||||
dcoindexjump = length(var.datacenters)
|
||||
}
|
||||
|
||||
#
|
||||
# Control node resources DCO
|
||||
#
|
||||
|
||||
resource "openstack_networking_port_v2" "kubecport-dco" {
|
||||
name = "${var.controller_name}${count.index * local.dcoindexjump + 1 + local.dconodenrbase}-${replace(var.dns_suffix,".","-")}-${local.dcodc}-port"
|
||||
# We create as many ports as there are instances created
|
||||
count = var.controllerdcreplicas
|
||||
network_id = data.openstack_networking_network_v2.public-dco.id
|
||||
# A list of security group ID
|
||||
security_group_ids = [
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-dco.id,
|
||||
resource.openstack_networking_secgroup_v2.microk8s-dco.id,
|
||||
resource.openstack_networking_secgroup_v2.k8s-external-control-dco.id
|
||||
]
|
||||
admin_state_up = "true"
|
||||
provider = openstack.dco
|
||||
}
|
||||
|
||||
# Boot volume for node
|
||||
resource "openstack_blockstorage_volume_v3" "kubecvolumeboot-dco" {
|
||||
count = var.controllerdcreplicas # Replicas per datacenter
|
||||
name = "${var.controller_name}${count.index * local.dcoindexjump + 1 + local.dconodenrbase}-${replace(var.dns_suffix,".","-")}-${local.dcodc}-vol"
|
||||
description = "OS volume for kubernetes worker node ${count.index * local.dcoindexjump + 1 + local.dconodenrbase}"
|
||||
size = 100
|
||||
image_id = data.openstack_images_image_v2.debian12image-dco.id
|
||||
enable_online_resize = true # Allow us to resize volume while attached.
|
||||
provider = openstack.dco
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "controller-nodes-dco" {
|
||||
count = var.controllerdcreplicas # Replicas per datacenter
|
||||
name = "${var.controller_name}${count.index * local.dcoindexjump + 1 + local.dconodenrbase}.${var.dns_suffix}"
|
||||
flavor_name = "${var.controller_instance_type}"
|
||||
key_pair = "${var.keynameworkers}"
|
||||
provider = openstack.dco
|
||||
security_groups = [
|
||||
resource.openstack_networking_secgroup_v2.microk8s-dco.name,
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-dco.name,
|
||||
resource.openstack_networking_secgroup_v2.k8s-external-control-dco.name
|
||||
]
|
||||
|
||||
block_device {
|
||||
uuid = resource.openstack_blockstorage_volume_v3.kubecvolumeboot-dco[count.index].id
|
||||
source_type = "volume"
|
||||
destination_type = "volume"
|
||||
boot_index = 0
|
||||
}
|
||||
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.controllers-dco.id
|
||||
}
|
||||
network {
|
||||
port = resource.openstack_networking_port_v2.kubecport-dco[count.index].id
|
||||
}
|
||||
}
|
||||
|
||||
#
|
||||
# Worker node resources DCO
|
||||
#
|
||||
|
||||
resource "openstack_networking_port_v2" "kubewport-dco" {
|
||||
name = "${var.worker_name}${count.index * local.dcoindexjump + 1 + local.dconodenrbase}-${replace(var.dns_suffix,".","-")}-${local.dcodc}-port"
|
||||
# We create as many ports as there are instances created
|
||||
count = var.workerdcreplicas
|
||||
network_id = data.openstack_networking_network_v2.public-dco.id
|
||||
# A list of security group ID
|
||||
security_group_ids = [
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-dco.id,
|
||||
resource.openstack_networking_secgroup_v2.microk8s-dco.id,
|
||||
resource.openstack_networking_secgroup_v2.k8s-external-worker-dco.id
|
||||
]
|
||||
admin_state_up = "true"
|
||||
provider = openstack.dco
|
||||
}
|
||||
|
||||
# Boot volume for node
|
||||
resource "openstack_blockstorage_volume_v3" "kubewvolumeboot-dco" {
|
||||
count = var.workerdcreplicas # Replicas per datacenter
|
||||
name = "${var.worker_name}${count.index * local.dcoindexjump + 1 + local.dconodenrbase}-${replace(var.dns_suffix,".","-")}-${local.dcodc}-vol"
|
||||
description = "OS volume for kubernetes worker node ${count.index * local.dcoindexjump + 1 + local.dconodenrbase}"
|
||||
size = 100
|
||||
image_id = data.openstack_images_image_v2.debian12image-dco.id
|
||||
enable_online_resize = true # Allow us to resize volume while attached.
|
||||
provider = openstack.dco
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v3" "kubewvolumerook-dco" {
|
||||
count = var.workerdcreplicas # Replicas per datacenter
|
||||
name = "${var.worker_name}${count.index * local.dcoindexjump + 1 + local.dconodenrbase}-${replace(var.dns_suffix,".","-")}-${local.dcodc}-rook-vol"
|
||||
description = "Rook storage volume for kubernetes worker node ${count.index * local.dcoindexjump + 1 + local.dconodenrbase}"
|
||||
size = 100
|
||||
enable_online_resize = true # Allow us to resize volume while attached.
|
||||
provider = openstack.dco
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "worker-nodes-dco" {
|
||||
count = var.workerdcreplicas # Replicas per datacenter
|
||||
name = "${var.worker_name}${count.index * local.dcoindexjump + 1 + local.dconodenrbase}.${var.dns_suffix}"
|
||||
flavor_name = "${var.worker_instance_type}"
|
||||
key_pair = "${var.keynameworkers}"
|
||||
provider = openstack.dco
|
||||
security_groups = [
|
||||
resource.openstack_networking_secgroup_v2.microk8s-dco.name,
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-dco.name,
|
||||
resource.openstack_networking_secgroup_v2.k8s-external-worker-dco.name
|
||||
]
|
||||
|
||||
block_device {
|
||||
uuid = resource.openstack_blockstorage_volume_v3.kubewvolumeboot-dco[count.index].id
|
||||
source_type = "volume"
|
||||
destination_type = "volume"
|
||||
boot_index = 0
|
||||
}
|
||||
block_device {
|
||||
uuid = resource.openstack_blockstorage_volume_v3.kubewvolumerook-dco[count.index].id
|
||||
source_type = "volume"
|
||||
destination_type = "volume"
|
||||
boot_index = 1
|
||||
}
|
||||
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.workers-dco.id
|
||||
}
|
||||
network {
|
||||
port = resource.openstack_networking_port_v2.kubewport-dco[count.index].id
|
||||
}
|
||||
}
|
139
IaC-prod/k8snodes-sto3.tf
Normal file
139
IaC-prod/k8snodes-sto3.tf
Normal file
|
@ -0,0 +1,139 @@
|
|||
#
|
||||
# Global definitions sto3
|
||||
#
|
||||
|
||||
locals {
|
||||
sto3dc = "sto3"
|
||||
sto3nodenrbase = index(var.datacenters, "sto3")
|
||||
sto3indexjump = length(var.datacenters)
|
||||
}
|
||||
|
||||
#
|
||||
# Control node resources STO3
|
||||
#
|
||||
|
||||
resource "openstack_networking_port_v2" "kubecport-sto3" {
|
||||
name = "${var.controller_name}${count.index * local.sto3indexjump + 1 + local.sto3nodenrbase}-${replace(var.dns_suffix,".","-")}-${local.sto3dc}-port"
|
||||
# We create as many ports as there are instances created
|
||||
count = var.controllerdcreplicas
|
||||
network_id = data.openstack_networking_network_v2.public-sto3.id
|
||||
# A list of security group ID
|
||||
security_group_ids = [
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-sto3.id,
|
||||
resource.openstack_networking_secgroup_v2.microk8s-sto3.id,
|
||||
resource.openstack_networking_secgroup_v2.k8s-external-control-sto3.id
|
||||
]
|
||||
admin_state_up = "true"
|
||||
provider = openstack.sto3
|
||||
}
|
||||
|
||||
# Boot volume for node
|
||||
resource "openstack_blockstorage_volume_v3" "kubecvolumeboot-sto3" {
|
||||
count = var.controllerdcreplicas # Replicas per datacenter
|
||||
name = "${var.controller_name}${count.index * local.sto3indexjump + 1 + local.sto3nodenrbase}-${replace(var.dns_suffix,".","-")}-${local.sto3dc}-vol"
|
||||
description = "OS volume for kubernetes worker node ${count.index * local.sto3indexjump + 1 + local.sto3nodenrbase}"
|
||||
size = 100
|
||||
image_id = data.openstack_images_image_v2.debian12image-sto3.id
|
||||
enable_online_resize = true # Allow us to resize volume while attached.
|
||||
provider = openstack.sto3
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "controller-nodes-sto3" {
|
||||
count = var.controllerdcreplicas # Replicas per datacenter
|
||||
name = "${var.controller_name}${count.index * local.sto3indexjump + 1 + local.sto3nodenrbase}.${var.dns_suffix}"
|
||||
flavor_name = "${var.controller_instance_type}"
|
||||
key_pair = "${var.keynameworkers}"
|
||||
provider = openstack.sto3
|
||||
security_groups = [
|
||||
resource.openstack_networking_secgroup_v2.microk8s-sto3.name,
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-sto3.name,
|
||||
resource.openstack_networking_secgroup_v2.k8s-external-control-sto3.name
|
||||
]
|
||||
|
||||
block_device {
|
||||
uuid = resource.openstack_blockstorage_volume_v3.kubecvolumeboot-sto3[count.index].id
|
||||
source_type = "volume"
|
||||
destination_type = "volume"
|
||||
boot_index = 0
|
||||
}
|
||||
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.controllers-sto3.id
|
||||
}
|
||||
network {
|
||||
port = resource.openstack_networking_port_v2.kubecport-sto3[count.index].id
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# Worker node resources STO3
|
||||
#
|
||||
|
||||
resource "openstack_networking_port_v2" "kubewport-sto3" {
|
||||
name = "${var.worker_name}${count.index * local.sto3indexjump + 1 + local.sto3nodenrbase}-${replace(var.dns_suffix,".","-")}-${local.sto3dc}-port"
|
||||
# We create as many ports as there are instances created
|
||||
count = var.workerdcreplicas
|
||||
network_id = data.openstack_networking_network_v2.public-sto3.id
|
||||
# A list of security group ID
|
||||
security_group_ids = [
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-sto3.id,
|
||||
resource.openstack_networking_secgroup_v2.microk8s-sto3.id,
|
||||
resource.openstack_networking_secgroup_v2.k8s-external-worker-sto3.id
|
||||
]
|
||||
admin_state_up = "true"
|
||||
provider = openstack.sto3
|
||||
}
|
||||
|
||||
# Boot volume for node
|
||||
resource "openstack_blockstorage_volume_v3" "kubewvolumeboot-sto3" {
|
||||
count = var.workerdcreplicas # Replicas per datacenter
|
||||
name = "${var.worker_name}${count.index * local.sto3indexjump + 1 + local.sto3nodenrbase}-${replace(var.dns_suffix,".","-")}-${local.sto3dc}-vol"
|
||||
description = "OS volume for kubernetes worker node ${count.index * local.sto3indexjump + 1 + local.sto3nodenrbase}"
|
||||
size = 100
|
||||
image_id = data.openstack_images_image_v2.debian12image-sto3.id
|
||||
enable_online_resize = true # Allow us to resize volume while attached.
|
||||
provider = openstack.sto3
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v3" "kubewvolumerook-sto3" {
|
||||
count = var.workerdcreplicas # Replicas per datacenter
|
||||
name = "${var.worker_name}${count.index * local.sto3indexjump + 1 + local.sto3nodenrbase}-${replace(var.dns_suffix,".","-")}-${local.sto3dc}-rook-vol"
|
||||
description = "Rook storage volume for kubernetes worker node ${count.index * local.sto3indexjump + 1 + local.sto3nodenrbase}"
|
||||
size = 100
|
||||
enable_online_resize = true # Allow us to resize volume while attached.
|
||||
provider = openstack.sto3
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "worker-nodes-sto3" {
|
||||
count = var.workerdcreplicas # Replicas per datacenter
|
||||
name = "${var.worker_name}${count.index * local.sto3indexjump + 1 + local.sto3nodenrbase}.${var.dns_suffix}"
|
||||
flavor_name = "${var.worker_instance_type}"
|
||||
key_pair = "${var.keynameworkers}"
|
||||
provider = openstack.sto3
|
||||
security_groups = [
|
||||
resource.openstack_networking_secgroup_v2.microk8s-sto3.name,
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-sto3.name,
|
||||
resource.openstack_networking_secgroup_v2.k8s-external-worker-sto3.name
|
||||
]
|
||||
|
||||
block_device {
|
||||
uuid = resource.openstack_blockstorage_volume_v3.kubewvolumeboot-sto3[count.index].id
|
||||
source_type = "volume"
|
||||
destination_type = "volume"
|
||||
boot_index = 0
|
||||
}
|
||||
block_device {
|
||||
uuid = resource.openstack_blockstorage_volume_v3.kubewvolumerook-sto3[count.index].id
|
||||
source_type = "volume"
|
||||
destination_type = "volume"
|
||||
boot_index = 1
|
||||
}
|
||||
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.workers-sto3.id
|
||||
}
|
||||
network {
|
||||
port = resource.openstack_networking_port_v2.kubewport-sto3[count.index].id
|
||||
}
|
||||
}
|
138
IaC-prod/k8snodes-sto4.tf
Normal file
138
IaC-prod/k8snodes-sto4.tf
Normal file
|
@ -0,0 +1,138 @@
|
|||
#
|
||||
# Global definitions for sto4
|
||||
#
|
||||
locals {
|
||||
sto4dc = "sto4"
|
||||
sto4nodenrbase = index(var.datacenters, "sto4")
|
||||
sto4indexjump = length(var.datacenters)
|
||||
}
|
||||
|
||||
#
|
||||
# Controller node resources
|
||||
#
|
||||
|
||||
resource "openstack_networking_port_v2" "kubecport-sto4" {
|
||||
name = "${var.controller_name}${count.index * local.sto4indexjump + 1 + local.sto4nodenrbase}-${replace(var.dns_suffix,".","-")}-${local.sto4dc}-port"
|
||||
# We create as many ports as there are instances created
|
||||
count = var.controllerdcreplicas
|
||||
network_id = data.openstack_networking_network_v2.public-sto4.id
|
||||
# A list of security group ID
|
||||
security_group_ids = [
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-sto4.id,
|
||||
resource.openstack_networking_secgroup_v2.microk8s-sto4.id,
|
||||
resource.openstack_networking_secgroup_v2.k8s-external-control-sto4.id
|
||||
]
|
||||
admin_state_up = "true"
|
||||
provider = openstack.sto4
|
||||
}
|
||||
|
||||
# Boot volume for node
|
||||
resource "openstack_blockstorage_volume_v3" "kubecvolumeboot-sto4" {
|
||||
count = var.controllerdcreplicas # Replicas per datacenter
|
||||
name = "${var.controller_name}${count.index * local.sto4indexjump + 1 + local.sto4nodenrbase}-${replace(var.dns_suffix,".","-")}-${local.sto4dc}-vol"
|
||||
description = "OS volume for kubernetes worker node ${count.index * local.sto4indexjump + 1 + local.sto4nodenrbase}"
|
||||
size = 100
|
||||
image_id = data.openstack_images_image_v2.debian12image-sto4.id
|
||||
enable_online_resize = true # Allow us to resize volume while attached.
|
||||
provider = openstack.sto4
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "controller-nodes-sto4" {
|
||||
count = var.controllerdcreplicas # Replicas per datacenter
|
||||
name = "${var.controller_name}${count.index * local.sto4indexjump + 1 + local.sto4nodenrbase}.${var.dns_suffix}"
|
||||
flavor_name = "${var.controller_instance_type}"
|
||||
key_pair = "${var.keynameworkers}"
|
||||
provider = openstack.sto4
|
||||
security_groups = [
|
||||
resource.openstack_networking_secgroup_v2.microk8s-sto4.name,
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-sto4.name,
|
||||
resource.openstack_networking_secgroup_v2.k8s-external-control-sto4.name
|
||||
]
|
||||
|
||||
block_device {
|
||||
uuid = resource.openstack_blockstorage_volume_v3.kubecvolumeboot-sto4[count.index].id
|
||||
source_type = "volume"
|
||||
destination_type = "volume"
|
||||
boot_index = 0
|
||||
}
|
||||
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.controllers-sto4.id
|
||||
}
|
||||
network {
|
||||
port = resource.openstack_networking_port_v2.kubecport-sto4[count.index].id
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#
|
||||
# Worker node resources
|
||||
#
|
||||
|
||||
resource "openstack_networking_port_v2" "kubewport-sto4" {
|
||||
name = "${var.worker_name}${count.index * local.sto4indexjump + 1 + local.sto4nodenrbase}-${replace(var.dns_suffix,".","-")}-${local.sto4dc}-port"
|
||||
# We create as many ports as there are instances created
|
||||
count = var.workerdcreplicas
|
||||
network_id = data.openstack_networking_network_v2.public-sto4.id
|
||||
# A list of security group ID
|
||||
security_group_ids = [
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-sto4.id,
|
||||
resource.openstack_networking_secgroup_v2.microk8s-sto4.id,
|
||||
resource.openstack_networking_secgroup_v2.k8s-external-worker-sto4.id
|
||||
]
|
||||
admin_state_up = "true"
|
||||
provider = openstack.sto4
|
||||
}
|
||||
|
||||
# Boot volume for node
|
||||
resource "openstack_blockstorage_volume_v3" "kubewvolumeboot-sto4" {
|
||||
count = var.workerdcreplicas # Replicas per datacenter
|
||||
name = "${var.worker_name}${count.index * local.sto4indexjump + 1 + local.sto4nodenrbase}-${replace(var.dns_suffix,".","-")}-${local.sto4dc}-vol"
|
||||
description = "OS volume for kubernetes worker node ${count.index * local.sto4indexjump + 1 + local.sto4nodenrbase}"
|
||||
size = 100
|
||||
image_id = data.openstack_images_image_v2.debian12image-sto4.id
|
||||
enable_online_resize = true # Allow us to resize volume while attached.
|
||||
provider = openstack.sto4
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v3" "kubewvolumerook-sto4" {
|
||||
count = var.workerdcreplicas # Replicas per datacenter
|
||||
name = "${var.worker_name}${count.index * local.sto4indexjump + 1 + local.sto4nodenrbase}-${replace(var.dns_suffix,".","-")}-${local.sto4dc}-rook-vol"
|
||||
description = "Rook storage volume for kubernetes worker node ${count.index * local.sto4indexjump + 1 + local.sto4nodenrbase}"
|
||||
size = 100
|
||||
enable_online_resize = true # Allow us to resize volume while attached.
|
||||
provider = openstack.sto4
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "worker-nodes-sto4" {
|
||||
count = var.workerdcreplicas # Replicas per datacenter
|
||||
name = "${var.worker_name}${count.index * local.sto4indexjump + 1 + local.sto4nodenrbase}.${var.dns_suffix}"
|
||||
flavor_name = "${var.worker_instance_type}"
|
||||
key_pair = "${var.keynameworkers}"
|
||||
provider = openstack.sto4
|
||||
security_groups = [
|
||||
resource.openstack_networking_secgroup_v2.microk8s-sto4.name,
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-sto4.name,
|
||||
resource.openstack_networking_secgroup_v2.k8s-external-worker-sto4.name
|
||||
]
|
||||
|
||||
block_device {
|
||||
uuid = resource.openstack_blockstorage_volume_v3.kubewvolumeboot-sto4[count.index].id
|
||||
source_type = "volume"
|
||||
destination_type = "volume"
|
||||
boot_index = 0
|
||||
}
|
||||
block_device {
|
||||
uuid = resource.openstack_blockstorage_volume_v3.kubewvolumerook-sto4[count.index].id
|
||||
source_type = "volume"
|
||||
destination_type = "volume"
|
||||
boot_index = 1
|
||||
}
|
||||
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.workers-sto4.id
|
||||
}
|
||||
network {
|
||||
port = resource.openstack_networking_port_v2.kubewport-sto4[count.index].id
|
||||
}
|
||||
}
|
48
IaC-prod/lb.tf
Normal file
48
IaC-prod/lb.tf
Normal file
|
@ -0,0 +1,48 @@
|
|||
|
||||
# Netowrk port
|
||||
resource "openstack_networking_port_v2" "lb1-port-dco" {
|
||||
name = "lb1-${replace(var.dns_suffix,".","-")}-${local.dcodc}-port"
|
||||
network_id = data.openstack_networking_network_v2.public-dco.id
|
||||
# A list of security group ID
|
||||
security_group_ids = [
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-dco.id,
|
||||
resource.openstack_networking_secgroup_v2.lb-dco.id
|
||||
]
|
||||
admin_state_up = "true"
|
||||
provider = openstack.dco
|
||||
}
|
||||
|
||||
# Boot volume
|
||||
# Boot volume for node
|
||||
resource "openstack_blockstorage_volume_v3" "lb1volumeboot-dco" {
|
||||
name = "lb1-${replace(var.dns_suffix,".","-")}-${local.dcodc}-vol"
|
||||
description = "OS volume for lb1.matrix.test.sunet.se"
|
||||
size = 50
|
||||
image_id = data.openstack_images_image_v2.debian12image-dco.id
|
||||
enable_online_resize = true # Allow us to resize volume while attached.
|
||||
provider = openstack.dco
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "lb1-node-dco" {
|
||||
name = "lb1.${var.dns_suffix}"
|
||||
flavor_name = "${var.lb_instance_type}"
|
||||
key_pair = "${var.keynameworkers}"
|
||||
provider = openstack.dco
|
||||
security_groups = [
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-dco.name,
|
||||
resource.openstack_networking_secgroup_v2.lb-dco.name
|
||||
]
|
||||
|
||||
block_device {
|
||||
uuid = resource.openstack_blockstorage_volume_v3.lb1volumeboot-dco.id
|
||||
source_type = "volume"
|
||||
destination_type = "volume"
|
||||
boot_index = 0
|
||||
}
|
||||
|
||||
network {
|
||||
port = resource.openstack_networking_port_v2.lb1-port-dco.id
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -11,5 +11,23 @@ required_version = ">= 0.14.0"
|
|||
|
||||
# Configure the OpenStack Provider
|
||||
provider "openstack" {
|
||||
cloud = "${var.cloud_name}"
|
||||
cloud = "${var.clouddco_name}"
|
||||
}
|
||||
|
||||
# DCO Matrix Test
|
||||
provider "openstack" {
|
||||
cloud = "${var.clouddco_name}"
|
||||
alias = "dco"
|
||||
}
|
||||
|
||||
# STO3 Matrix test
|
||||
provider "openstack" {
|
||||
cloud = "${var.cloudsto3_name}"
|
||||
alias = "sto3"
|
||||
}
|
||||
|
||||
# STO4 Matrix test
|
||||
provider "openstack" {
|
||||
cloud = "${var.cloudsto4_name}"
|
||||
alias = "sto4"
|
||||
}
|
||||
|
|
46
IaC-prod/mgmt.tf
Normal file
46
IaC-prod/mgmt.tf
Normal file
|
@ -0,0 +1,46 @@
|
|||
|
||||
# Netowrk port
|
||||
resource "openstack_networking_port_v2" "mgmt1-port-dco" {
|
||||
name = "mgmt1-${replace(var.dns_suffix,".","-")}-${local.dcodc}-port"
|
||||
network_id = data.openstack_networking_network_v2.public-dco.id
|
||||
# A list of security group ID
|
||||
security_group_ids = [
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-dco.id
|
||||
]
|
||||
admin_state_up = "true"
|
||||
provider = openstack.dco
|
||||
}
|
||||
|
||||
# Boot volume
|
||||
# Boot volume for node
|
||||
resource "openstack_blockstorage_volume_v3" "mgmt1volumeboot-dco" {
|
||||
name = "mgmt1-${replace(var.dns_suffix,".","-")}-${local.dcodc}-vol"
|
||||
description = "OS volume for mgmt1.matrix.test.sunet.se"
|
||||
size = 50
|
||||
image_id = data.openstack_images_image_v2.debian12image-dco.id
|
||||
enable_online_resize = true # Allow us to resize volume while attached.
|
||||
provider = openstack.dco
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "mgmt1-node-dco" {
|
||||
name = "mgmt1.${var.dns_suffix}"
|
||||
flavor_name = "${var.lb_instance_type}"
|
||||
key_pair = "${var.keynameworkers}"
|
||||
provider = openstack.dco
|
||||
security_groups = [
|
||||
resource.openstack_networking_secgroup_v2.ssh-from-jump-hosts-dco.name
|
||||
]
|
||||
|
||||
block_device {
|
||||
uuid = resource.openstack_blockstorage_volume_v3.mgmt1volumeboot-dco.id
|
||||
source_type = "volume"
|
||||
destination_type = "volume"
|
||||
boot_index = 0
|
||||
}
|
||||
|
||||
network {
|
||||
port = resource.openstack_networking_port_v2.mgmt1-port-dco.id
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1,3 +1,18 @@
|
|||
data "openstack_networking_network_v2" "public" {
|
||||
name = "public" # Name of network to use.
|
||||
}
|
||||
|
||||
data "openstack_networking_network_v2" "public-dco" {
|
||||
name = "public" # Name of network to use.
|
||||
provider = openstack.dco
|
||||
}
|
||||
|
||||
data "openstack_networking_network_v2" "public-sto4" {
|
||||
name = "public" # Name of network to use.
|
||||
provider = openstack.sto4
|
||||
}
|
||||
|
||||
data "openstack_networking_network_v2" "public-sto3" {
|
||||
name = "public" # Name of network to use.
|
||||
provider = openstack.sto3
|
||||
}
|
||||
|
|
|
@ -1,111 +0,0 @@
|
|||
|
||||
#
|
||||
# Controller node resources
|
||||
#
|
||||
|
||||
resource "openstack_networking_port_v2" "kubecport" {
|
||||
name = "${var.controller_name}${count.index+1}-${replace(var.dns_suffix,".","-")}-port"
|
||||
# We create as many ports as there are instances created
|
||||
count = var.controller_instance_count
|
||||
network_id = data.openstack_networking_network_v2.public.id
|
||||
# A list of security group ID
|
||||
security_group_ids = [
|
||||
data.openstack_networking_secgroup_v2.sshfromjumphosts.id,
|
||||
data.openstack_networking_secgroup_v2.allegress.id,
|
||||
resource.openstack_networking_secgroup_v2.microk8s.id,
|
||||
resource.openstack_networking_secgroup_v2.https.id
|
||||
]
|
||||
admin_state_up = "true"
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v3" "kubecvolumeboot" {
|
||||
count = var.controller_instance_count # size of cluster
|
||||
name = "${var.controller_name}${count.index+1}-${replace(var.dns_suffix,".","-")}-vol"
|
||||
description = "OS volume for kubernetes control node ${count.index + 1}"
|
||||
size = 100
|
||||
image_id = data.openstack_images_image_v2.debian12image.id
|
||||
enable_online_resize = true # Allow us to resize volume while attached.
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "controller-nodes" {
|
||||
count = var.controller_instance_count
|
||||
name = "${var.controller_name}${count.index+1}.${var.dns_suffix}"
|
||||
flavor_name = "${var.controller_instance_type}"
|
||||
key_pair = "${var.keyname}"
|
||||
security_groups = [
|
||||
data.openstack_networking_secgroup_v2.sshfromjumphosts.name,
|
||||
data.openstack_networking_secgroup_v2.allegress.name,
|
||||
resource.openstack_networking_secgroup_v2.microk8s.name,
|
||||
resource.openstack_networking_secgroup_v2.https.name
|
||||
]
|
||||
block_device {
|
||||
uuid = resource.openstack_blockstorage_volume_v3.kubecvolumeboot[count.index].id
|
||||
source_type = "volume"
|
||||
destination_type = "volume"
|
||||
boot_index = 0
|
||||
}
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.controllers.id
|
||||
}
|
||||
network {
|
||||
port = resource.openstack_networking_port_v2.kubecport[count.index].id
|
||||
}
|
||||
}
|
||||
|
||||
#
|
||||
# Worker node resources
|
||||
#
|
||||
|
||||
|
||||
#
|
||||
# Controller node resources
|
||||
#
|
||||
|
||||
resource "openstack_networking_port_v2" "kubewport" {
|
||||
name = "${var.worker_name}${count.index+1}-${replace(var.dns_suffix,".","-")}-port"
|
||||
# We create as many ports as there are instances created
|
||||
count = var.controller_instance_count
|
||||
network_id = data.openstack_networking_network_v2.public.id
|
||||
# A list of security group ID
|
||||
security_group_ids = [
|
||||
data.openstack_networking_secgroup_v2.sshfromjumphosts.id,
|
||||
data.openstack_networking_secgroup_v2.allegress.id,
|
||||
resource.openstack_networking_secgroup_v2.microk8s.id
|
||||
]
|
||||
admin_state_up = "true"
|
||||
}
|
||||
|
||||
resource "openstack_blockstorage_volume_v3" "kubewvolumeboot" {
|
||||
count = var.controller_instance_count # size of cluster
|
||||
name = "${var.worker_name}${count.index+1}-${replace(var.dns_suffix,".","-")}-vol"
|
||||
description = "OS volume for kubernetes worker node ${count.index + 1}"
|
||||
size = 100
|
||||
image_id = data.openstack_images_image_v2.debian12image.id
|
||||
enable_online_resize = true # Allow us to resize volume while attached.
|
||||
}
|
||||
|
||||
|
||||
resource "openstack_compute_instance_v2" "worker-nodes" {
|
||||
count = var.worker_instance_count
|
||||
name = "${var.worker_name}${count.index+1}.${var.dns_suffix}"
|
||||
flavor_name = "${var.worker_instance_type}"
|
||||
key_pair = "${var.keyname}"
|
||||
security_groups = [
|
||||
data.openstack_networking_secgroup_v2.sshfromjumphosts.name,
|
||||
data.openstack_networking_secgroup_v2.allegress.name,
|
||||
resource.openstack_networking_secgroup_v2.microk8s.name
|
||||
]
|
||||
|
||||
block_device {
|
||||
uuid = resource.openstack_blockstorage_volume_v3.kubewvolumeboot[count.index].id
|
||||
source_type = "volume"
|
||||
destination_type = "volume"
|
||||
boot_index = 0
|
||||
}
|
||||
scheduler_hints {
|
||||
group = openstack_compute_servergroup_v2.workers.id
|
||||
}
|
||||
network {
|
||||
port = resource.openstack_networking_port_v2.kubewport[count.index].id
|
||||
}
|
||||
}
|
177
IaC-prod/securitygroups-k8s-dco.tf
Normal file
177
IaC-prod/securitygroups-k8s-dco.tf
Normal file
|
@ -0,0 +1,177 @@
|
|||
|
||||
# Security groups dco
|
||||
resource "openstack_networking_secgroup_v2" "microk8s-dco" {
|
||||
name = "microk8s"
|
||||
description = "Traffic to allow between microk8s hosts"
|
||||
provider=openstack.dco
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "ssh-from-jump-hosts-dco" {
|
||||
name = "ssh-from-jumphosts"
|
||||
description = "Allow ssh traffic from sunet jumphosts."
|
||||
provider=openstack.dco
|
||||
}
|
||||
|
||||
#
|
||||
# Security group rules for microk8s
|
||||
#
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule_v4_dco" {
|
||||
count = length(var.k8sports)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[count.index][keys(var.k8sports[count.index])[0]]
|
||||
port_range_min = keys(var.k8sports[count.index])[0]
|
||||
port_range_max = keys(var.k8sports[count.index])[0]
|
||||
provider = openstack.dco
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s-dco.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-dco.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule_v6_dco" {
|
||||
count = length(var.k8sports)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[count.index][keys(var.k8sports[count.index])[0]]
|
||||
port_range_min = keys(var.k8sports[count.index])[0]
|
||||
port_range_max = keys(var.k8sports[count.index])[0]
|
||||
provider = openstack.dco
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s-dco.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-dco.id
|
||||
}
|
||||
|
||||
#
|
||||
# From STO3 to DCO
|
||||
#
|
||||
|
||||
# Control nodes
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_controller_rule_v4_sto3_to_dco" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.controller-nodes-sto3)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))])[0]
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = join("/", [ resource.openstack_compute_instance_v2.controller-nodes-sto3[count.index % length(resource.openstack_compute_instance_v2.controller-nodes-sto3)].access_ip_v4, "32" ])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-dco.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_controller_rule_v6_sto3_to_dco" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.controller-nodes-sto3)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))])[0]
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = join("/",[ replace(resource.openstack_compute_instance_v2.controller-nodes-sto3[count.index % length(resource.openstack_compute_instance_v2.controller-nodes-sto3)].access_ip_v6, "/[\\[\\]']/",""), "128"])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-dco.id
|
||||
}
|
||||
|
||||
# Worker nodes
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_worker_rule_v4_sto3_to_dco" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.worker-nodes-sto3)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))])[0]
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = join("/", [ resource.openstack_compute_instance_v2.worker-nodes-sto3[count.index % length(resource.openstack_compute_instance_v2.worker-nodes-sto3)].access_ip_v4, "32" ])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-dco.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_worker_rule_v6_sto3_to_dco" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.worker-nodes-sto3)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))])[0]
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = join("/",[ replace(resource.openstack_compute_instance_v2.worker-nodes-sto3[count.index % length(resource.openstack_compute_instance_v2.worker-nodes-sto3)].access_ip_v6, "/[\\[\\]']/",""), "128"])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-dco.id
|
||||
}
|
||||
|
||||
#
|
||||
# From STO4 to DCO
|
||||
#
|
||||
|
||||
#Controllers
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_controller_rule_v4_sto4_to_dco" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.controller-nodes-sto4)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))])[0]
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = join("/", [ resource.openstack_compute_instance_v2.controller-nodes-sto4[count.index % length(resource.openstack_compute_instance_v2.controller-nodes-sto4)].access_ip_v4, "32" ])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-dco.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_controller_rule_v6_sto4_to_dco" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.controller-nodes-sto4)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))])[0]
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = join("/",[ replace(resource.openstack_compute_instance_v2.controller-nodes-sto4[count.index % length(resource.openstack_compute_instance_v2.controller-nodes-sto4)].access_ip_v6, "/[\\[\\]']/",""), "128"])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-dco.id
|
||||
}
|
||||
|
||||
# Workers
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_worker_rule_v4_sto4_to_dco" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.worker-nodes-sto4)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))])[0]
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = join("/", [ resource.openstack_compute_instance_v2.worker-nodes-sto4[count.index % length(resource.openstack_compute_instance_v2.worker-nodes-sto4)].access_ip_v4, "32" ])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-dco.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_worker_rule_v6_sto4_to_dco" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.worker-nodes-sto4)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))])[0]
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = join("/",[ replace(resource.openstack_compute_instance_v2.worker-nodes-sto4[count.index % length(resource.openstack_compute_instance_v2.worker-nodes-sto4)].access_ip_v6, "/[\\[\\]']/",""), "128"])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-dco.id
|
||||
}
|
||||
|
||||
#
|
||||
# Security group rules for ssh-from-jump-hosts
|
||||
#
|
||||
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "ssh-from-jumphosts-v4rules-dco" {
|
||||
count = length(var.jumphostv4-ips)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "22"
|
||||
port_range_max = "22"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "${var.jumphostv4-ips[count.index]}/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.ssh-from-jump-hosts-dco.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "ssh-from-jumphosts-v6rules-dco" {
|
||||
count = length(var.jumphostv6-ips)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = "22"
|
||||
port_range_max = "22"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "${var.jumphostv6-ips[count.index]}/128"
|
||||
security_group_id = openstack_networking_secgroup_v2.ssh-from-jump-hosts-dco.id
|
||||
}
|
125
IaC-prod/securitygroups-k8s-external.tf
Normal file
125
IaC-prod/securitygroups-k8s-external.tf
Normal file
|
@ -0,0 +1,125 @@
|
|||
# Security groups for external acccess k8s control nodes in dco.
|
||||
resource "openstack_networking_secgroup_v2" "k8s-external-control-dco" {
|
||||
name = "k8s-external"
|
||||
description = "External ingress traffic to k8s control nodes."
|
||||
provider=openstack.dco
|
||||
}
|
||||
|
||||
# Security groups for external acccess k8s control nodes in sto3.
|
||||
resource "openstack_networking_secgroup_v2" "k8s-external-control-sto3" {
|
||||
name = "k8s-external"
|
||||
description = "External ingress traffic to k8s control nodes."
|
||||
provider=openstack.sto3
|
||||
}
|
||||
# Security groups for external acccess k8s control nodes in sto4.
|
||||
resource "openstack_networking_secgroup_v2" "k8s-external-control-sto4" {
|
||||
name = "k8s-external"
|
||||
description = "External ingress traffic to k8s control nodes."
|
||||
provider=openstack.sto4
|
||||
}
|
||||
|
||||
# Rules dco
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_external_ingress_control_rule1_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "16443"
|
||||
port_range_max = "16443"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "89.47.191.43/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.k8s-external-control-dco.id
|
||||
}
|
||||
|
||||
# Rules sto3
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_external_ingress_control_rule1_v4_sto3" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "16443"
|
||||
port_range_max = "16443"
|
||||
provider = openstack.sto3
|
||||
remote_ip_prefix = "89.47.191.43/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.k8s-external-control-sto3.id
|
||||
}
|
||||
|
||||
# Rules sto4
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_external_ingress_control_rule1_v4_sto4" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "16443"
|
||||
port_range_max = "16443"
|
||||
provider = openstack.sto4
|
||||
remote_ip_prefix = "89.47.191.43/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.k8s-external-control-sto4.id
|
||||
}
|
||||
|
||||
|
||||
# Security groups for external acccess k8s worker nodes in dco.
|
||||
resource "openstack_networking_secgroup_v2" "k8s-external-worker-dco" {
|
||||
name = "k8s-external-worker"
|
||||
description = "External ingress traffic to k8s worker nodes."
|
||||
provider=openstack.dco
|
||||
}
|
||||
|
||||
# Security groups for external acccess k8s worker nodes in sto3.
|
||||
resource "openstack_networking_secgroup_v2" "k8s-external-worker-sto3" {
|
||||
name = "k8s-external-worker"
|
||||
description = "External ingress traffic to k8s worker nodes."
|
||||
provider=openstack.sto3
|
||||
}
|
||||
# Security groups for external acccess k8s worker nodes in sto4.
|
||||
resource "openstack_networking_secgroup_v2" "k8s-external-worker-sto4" {
|
||||
name = "k8s-external-worker"
|
||||
description = "External ingress traffic to k8s worker nodes."
|
||||
provider=openstack.sto4
|
||||
}
|
||||
|
||||
# Rules dco
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_external_ingress_worker_rule1_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "443"
|
||||
port_range_max = "443"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "89.47.191.43/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.k8s-external-worker-dco.id
|
||||
}
|
||||
|
||||
# Rules sto3
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_external_ingress_worker_rule1_v4_sto3" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "443"
|
||||
port_range_max = "443"
|
||||
provider = openstack.sto3
|
||||
remote_ip_prefix = "89.47.191.43/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.k8s-external-worker-sto3.id
|
||||
}
|
||||
|
||||
# Rules sto4
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_external_ingress_worker_rule1_v4_sto4" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "443"
|
||||
port_range_max = "443"
|
||||
provider = openstack.sto4
|
||||
remote_ip_prefix = "89.47.191.43/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.k8s-external-worker-sto4.id
|
||||
}
|
||||
|
||||
# Rules sto4
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_external_ingress_worker_rule2_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "443"
|
||||
port_range_max = "443"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.k8s-external-worker-dco.id
|
||||
}
|
||||
|
177
IaC-prod/securitygroups-k8s-sto3.tf
Normal file
177
IaC-prod/securitygroups-k8s-sto3.tf
Normal file
|
@ -0,0 +1,177 @@
|
|||
|
||||
# Security groups sto3
|
||||
resource "openstack_networking_secgroup_v2" "microk8s-sto3" {
|
||||
name = "microk8s"
|
||||
description = "Traffic to allow between microk8s hosts"
|
||||
provider=openstack.sto3
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "ssh-from-jump-hosts-sto3" {
|
||||
name = "ssh-from-jumphosts"
|
||||
description = "Allow ssh traffic from sunet jumphosts."
|
||||
provider=openstack.sto3
|
||||
}
|
||||
|
||||
#
|
||||
# Security group rules for microk8s
|
||||
#
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule_v4_sto3" {
|
||||
count = length(var.k8sports)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[count.index][keys(var.k8sports[count.index])[0]]
|
||||
port_range_min = keys(var.k8sports[count.index])[0]
|
||||
port_range_max = keys(var.k8sports[count.index])[0]
|
||||
provider = openstack.sto3
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s-sto3.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto3.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule_v6_sto3" {
|
||||
count = length(var.k8sports)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[count.index][keys(var.k8sports[count.index])[0]]
|
||||
port_range_min = keys(var.k8sports[count.index])[0]
|
||||
port_range_max = keys(var.k8sports[count.index])[0]
|
||||
provider = openstack.sto3
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s-sto3.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto3.id
|
||||
}
|
||||
|
||||
#
|
||||
# From DCO to STO3
|
||||
#
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_controller_rule_v4_dco_to_sto3" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.controller-nodes-dco)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))])[0]
|
||||
provider = openstack.sto3
|
||||
remote_ip_prefix = join("/", [ resource.openstack_compute_instance_v2.controller-nodes-dco[count.index % length(resource.openstack_compute_instance_v2.controller-nodes-dco)].access_ip_v4, "32" ])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto3.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_controller_rule_v6_dco_to_sto3" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.controller-nodes-dco)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))])[0]
|
||||
provider = openstack.sto3
|
||||
remote_ip_prefix = join("/",[ replace(resource.openstack_compute_instance_v2.controller-nodes-dco[count.index % length(resource.openstack_compute_instance_v2.controller-nodes-dco)].access_ip_v6, "/[\\[\\]']/",""), "128"])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto3.id
|
||||
}
|
||||
|
||||
# Worker nodes
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_worker_rule_v4_dco_to_sto3" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.worker-nodes-dco)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))])[0]
|
||||
provider = openstack.sto3
|
||||
remote_ip_prefix = join("/", [ resource.openstack_compute_instance_v2.worker-nodes-dco[count.index % length(resource.openstack_compute_instance_v2.worker-nodes-dco)].access_ip_v4, "32" ])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto3.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_worker_rule_v6_dco_to_sto3" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.worker-nodes-dco)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))])[0]
|
||||
provider = openstack.sto3
|
||||
remote_ip_prefix = join("/",[ replace(resource.openstack_compute_instance_v2.worker-nodes-dco[count.index % length(resource.openstack_compute_instance_v2.worker-nodes-dco)].access_ip_v6, "/[\\[\\]']/",""), "128"])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto3.id
|
||||
}
|
||||
|
||||
#
|
||||
# From STO4 to STO3
|
||||
#
|
||||
|
||||
# Controllers
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_controller_rule_v4_sto4_to_sto3" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.controller-nodes-sto4)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))])[0]
|
||||
provider = openstack.sto3
|
||||
remote_ip_prefix = join("/", [ resource.openstack_compute_instance_v2.controller-nodes-sto4[count.index % length(resource.openstack_compute_instance_v2.controller-nodes-sto4)].access_ip_v4, "32" ])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto3.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_controller_rule_v6_sto4_to_sto3" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.controller-nodes-sto4)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto4))])[0]
|
||||
provider = openstack.sto3
|
||||
remote_ip_prefix = join("/",[ replace(resource.openstack_compute_instance_v2.controller-nodes-sto4[count.index % length(resource.openstack_compute_instance_v2.controller-nodes-sto4)].access_ip_v6, "/[\\[\\]']/",""), "128"])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto3.id
|
||||
}
|
||||
|
||||
|
||||
# Workers
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_worker_rule_v4_sto4_to_sto3" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.worker-nodes-sto4)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))])[0]
|
||||
provider = openstack.sto3
|
||||
remote_ip_prefix = join("/", [ resource.openstack_compute_instance_v2.worker-nodes-sto4[count.index % length(resource.openstack_compute_instance_v2.worker-nodes-sto4)].access_ip_v4, "32" ])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto3.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_worker_rule_v6_sto4_to_sto3" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.worker-nodes-sto4)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto4))])[0]
|
||||
provider = openstack.sto3
|
||||
remote_ip_prefix = join("/",[ replace(resource.openstack_compute_instance_v2.worker-nodes-sto4[count.index % length(resource.openstack_compute_instance_v2.worker-nodes-sto4)].access_ip_v6, "/[\\[\\]']/",""), "128"])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto3.id
|
||||
}
|
||||
|
||||
#
|
||||
# Security group rules for ssh-from-jump-hosts
|
||||
#
|
||||
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "ssh-from-jumphosts-v4rules-sto3" {
|
||||
count = length(var.jumphostv4-ips)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "22"
|
||||
port_range_max = "22"
|
||||
provider = openstack.sto3
|
||||
remote_ip_prefix = "${var.jumphostv4-ips[count.index]}/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.ssh-from-jump-hosts-sto3.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "ssh-from-jumphosts-v6rules-sto3" {
|
||||
count = length(var.jumphostv6-ips)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = "22"
|
||||
port_range_max = "22"
|
||||
provider = openstack.sto3
|
||||
remote_ip_prefix = "${var.jumphostv6-ips[count.index]}/128"
|
||||
security_group_id = openstack_networking_secgroup_v2.ssh-from-jump-hosts-sto3.id
|
||||
}
|
177
IaC-prod/securitygroups-k8s-sto4.tf
Normal file
177
IaC-prod/securitygroups-k8s-sto4.tf
Normal file
|
@ -0,0 +1,177 @@
|
|||
|
||||
# Security groups sto4
|
||||
resource "openstack_networking_secgroup_v2" "microk8s-sto4" {
|
||||
name = "microk8s"
|
||||
description = "Traffic to allow between microk8s hosts"
|
||||
provider=openstack.sto4
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "ssh-from-jump-hosts-sto4" {
|
||||
name = "ssh-from-jumphosts"
|
||||
description = "Allow ssh traffic from sunet jumphosts."
|
||||
provider=openstack.sto4
|
||||
}
|
||||
|
||||
#
|
||||
# Security group rules for microk8s
|
||||
#
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule_v4_sto4" {
|
||||
count = length(var.k8sports)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[count.index][keys(var.k8sports[count.index])[0]]
|
||||
port_range_min = keys(var.k8sports[count.index])[0]
|
||||
port_range_max = keys(var.k8sports[count.index])[0]
|
||||
provider = openstack.sto4
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s-sto4.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto4.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule_v6_sto4" {
|
||||
count = length(var.k8sports)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[count.index][keys(var.k8sports[count.index])[0]]
|
||||
port_range_min = keys(var.k8sports[count.index])[0]
|
||||
port_range_max = keys(var.k8sports[count.index])[0]
|
||||
provider = openstack.sto4
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s-sto4.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto4.id
|
||||
}
|
||||
|
||||
#
|
||||
# DCO to STO4
|
||||
#
|
||||
|
||||
# Controllers
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_controller_rule_v4_dco_to_sto4" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.controller-nodes-dco)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))])[0]
|
||||
provider = openstack.sto4
|
||||
remote_ip_prefix = join("/", [ resource.openstack_compute_instance_v2.controller-nodes-dco[count.index % length(resource.openstack_compute_instance_v2.controller-nodes-dco)].access_ip_v4, "32" ])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto4.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_controller_rule_v6_dco_to_sto4" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.controller-nodes-dco)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-dco))])[0]
|
||||
provider = openstack.sto4
|
||||
remote_ip_prefix = join("/",[ replace(resource.openstack_compute_instance_v2.controller-nodes-dco[count.index % length(resource.openstack_compute_instance_v2.controller-nodes-dco)].access_ip_v6, "/[\\[\\]']/",""), "128"])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto4.id
|
||||
}
|
||||
|
||||
# Workers
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_worker_rule_v4_dco_to_sto4" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.worker-nodes-dco)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))])[0]
|
||||
provider = openstack.sto4
|
||||
remote_ip_prefix = join("/", [ resource.openstack_compute_instance_v2.worker-nodes-dco[count.index % length(resource.openstack_compute_instance_v2.worker-nodes-dco)].access_ip_v4, "32" ])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto4.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_worker_rule_v6_dco_to_sto4" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.worker-nodes-dco)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-dco))])[0]
|
||||
provider = openstack.sto4
|
||||
remote_ip_prefix = join("/",[ replace(resource.openstack_compute_instance_v2.worker-nodes-dco[count.index % length(resource.openstack_compute_instance_v2.worker-nodes-dco)].access_ip_v6, "/[\\[\\]']/",""), "128"])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto4.id
|
||||
}
|
||||
|
||||
#
|
||||
# From STO3 to STO4
|
||||
#
|
||||
|
||||
# Control nodes
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_controller_rule_v4_sto3_to_sto4" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.controller-nodes-sto3)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))])[0]
|
||||
provider = openstack.sto4
|
||||
remote_ip_prefix = join("/", [ resource.openstack_compute_instance_v2.controller-nodes-sto3[count.index % length(resource.openstack_compute_instance_v2.controller-nodes-sto3)].access_ip_v4, "32" ])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto4.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_controller_rule_v6_sto3_to_sto4" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.controller-nodes-sto3)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.controller-nodes-sto3))])[0]
|
||||
provider = openstack.sto4
|
||||
remote_ip_prefix = join("/",[ replace(resource.openstack_compute_instance_v2.controller-nodes-sto3[count.index % length(resource.openstack_compute_instance_v2.controller-nodes-sto3)].access_ip_v6, "/[\\[\\]']/",""), "128"])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto4.id
|
||||
}
|
||||
|
||||
# Worker nodes
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_worker_rule_v4_sto3_to_sto4" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.worker-nodes-sto3)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))])[0]
|
||||
provider = openstack.sto4
|
||||
remote_ip_prefix = join("/", [ resource.openstack_compute_instance_v2.worker-nodes-sto3[count.index % length(resource.openstack_compute_instance_v2.worker-nodes-sto3)].access_ip_v4, "32" ])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto4.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_worker_rule_v6_sto3_to_sto4" {
|
||||
count = length(var.k8sports) * length(resource.openstack_compute_instance_v2.worker-nodes-sto3)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))][keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))])[0]]
|
||||
port_range_min = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))])[0]
|
||||
port_range_max = keys(var.k8sports[floor(count.index/length(resource.openstack_compute_instance_v2.worker-nodes-sto3))])[0]
|
||||
provider = openstack.sto4
|
||||
remote_ip_prefix = join("/",[ replace(resource.openstack_compute_instance_v2.worker-nodes-sto3[count.index % length(resource.openstack_compute_instance_v2.worker-nodes-sto3)].access_ip_v6, "/[\\[\\]']/",""), "128"])
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s-sto4.id
|
||||
}
|
||||
|
||||
#
|
||||
# Security group rules for ssh-from-jump-hosts
|
||||
#
|
||||
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "ssh-from-jumphosts-v4rules-sto4" {
|
||||
count = length(var.jumphostv4-ips)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "22"
|
||||
port_range_max = "22"
|
||||
provider = openstack.sto4
|
||||
remote_ip_prefix = "${var.jumphostv4-ips[count.index]}/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.ssh-from-jump-hosts-sto4.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "ssh-from-jumphosts-v6rules-sto4" {
|
||||
count = length(var.jumphostv6-ips)
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = "22"
|
||||
port_range_max = "22"
|
||||
provider = openstack.sto4
|
||||
remote_ip_prefix = "${var.jumphostv6-ips[count.index]}/128"
|
||||
security_group_id = openstack_networking_secgroup_v2.ssh-from-jump-hosts-sto4.id
|
||||
}
|
109
IaC-prod/securitygroups-lb.tf
Normal file
109
IaC-prod/securitygroups-lb.tf
Normal file
|
@ -0,0 +1,109 @@
|
|||
# Security groups lb-frontend
|
||||
resource "openstack_networking_secgroup_v2" "lb-dco" {
|
||||
name = "lb-frontend"
|
||||
description = "Ingress lb traffic to allow."
|
||||
provider=openstack.dco
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb_ingress_rule_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "8443"
|
||||
port_range_max = "8443"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "87.251.31.118/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.lb-dco.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb_ingress_rule2_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "16443"
|
||||
port_range_max = "16443"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "87.251.31.118/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.lb-dco.id
|
||||
}
|
||||
|
||||
# From mgmt1
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb_ingress_rule3_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "16443"
|
||||
port_range_max = "16443"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "89.47.191.66/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.lb-dco.id
|
||||
}
|
||||
|
||||
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb_ingress_rule4_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "80"
|
||||
port_range_max = "80"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "89.47.191.66/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.lb-dco.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb_ingress_rule5_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "443"
|
||||
port_range_max = "443"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "89.47.191.66/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.lb-dco.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb_ingress_rule6_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "8443"
|
||||
port_range_max = "8443"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "89.47.191.66/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.lb-dco.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb_ingress_rule7_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "8080"
|
||||
port_range_max = "8080"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "89.47.191.66/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.lb-dco.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb_ingress_rule8_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "16443"
|
||||
port_range_max = "16443"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "89.47.184.88/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.lb-dco.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "lb_ingress_rule9_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "16443"
|
||||
port_range_max = "16443"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "130.242.121.23/32"
|
||||
security_group_id = openstack_networking_secgroup_v2.lb-dco.id
|
||||
}
|
|
@ -1,194 +0,0 @@
|
|||
# Datasource of sunet ssh-from-jumphost security group.
|
||||
data "openstack_networking_secgroup_v2" "sshfromjumphosts" {
|
||||
name = "ssh-from-jumphost"
|
||||
}
|
||||
|
||||
data "openstack_networking_secgroup_v2" "allegress" {
|
||||
name = "allegress"
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "microk8s" {
|
||||
name = "microk8s"
|
||||
description = "Traffic to allow between microk8s hosts"
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule1" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 16443
|
||||
port_range_max = 16443
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule2" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 16443
|
||||
port_range_max = 16443
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule3" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10250
|
||||
port_range_max = 10250
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule4" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10250
|
||||
port_range_max = 10250
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule5" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10255
|
||||
port_range_max = 10255
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule6" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10255
|
||||
port_range_max = 10255
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule7" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 25000
|
||||
port_range_max = 25000
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule8" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 25000
|
||||
port_range_max = 25000
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule9" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 12379
|
||||
port_range_max = 12379
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule10" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 12379
|
||||
port_range_max = 12379
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule11" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10257
|
||||
port_range_max = 10257
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule12" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10257
|
||||
port_range_max = 10257
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule13" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10259
|
||||
port_range_max = 10259
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule14" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 10259
|
||||
port_range_max = 10259
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule15" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 19001
|
||||
port_range_max = 19001
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule16" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "tcp"
|
||||
port_range_min = 19001
|
||||
port_range_max = 19001
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule17" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 4789
|
||||
port_range_max = 4789
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
resource "openstack_networking_secgroup_rule_v2" "microk8s_rule18" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv6"
|
||||
protocol = "udp"
|
||||
port_range_min = 4789
|
||||
port_range_max = 4789
|
||||
remote_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
security_group_id = openstack_networking_secgroup_v2.microk8s.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_v2" "https" {
|
||||
name = "https"
|
||||
description = "Allow https to ingress controller"
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "https_rule1" {
|
||||
# External traffic
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 443
|
||||
port_range_max = 443
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.https.id
|
||||
}
|
11
IaC-prod/servergroups-dco.tf
Normal file
11
IaC-prod/servergroups-dco.tf
Normal file
|
@ -0,0 +1,11 @@
|
|||
resource "openstack_compute_servergroup_v2" "workers-dco" {
|
||||
name = "workers"
|
||||
policies = ["anti-affinity"]
|
||||
provider = openstack.dco
|
||||
}
|
||||
resource "openstack_compute_servergroup_v2" "controllers-dco" {
|
||||
name = "controllers"
|
||||
policies = ["anti-affinity"]
|
||||
provider = openstack.dco
|
||||
}
|
||||
|
11
IaC-prod/servergroups-sto3.tf
Normal file
11
IaC-prod/servergroups-sto3.tf
Normal file
|
@ -0,0 +1,11 @@
|
|||
resource "openstack_compute_servergroup_v2" "workers-sto3" {
|
||||
name = "workers"
|
||||
policies = ["anti-affinity"]
|
||||
provider = openstack.sto3
|
||||
}
|
||||
resource "openstack_compute_servergroup_v2" "controllers-sto3" {
|
||||
name = "controllers"
|
||||
policies = ["anti-affinity"]
|
||||
provider = openstack.sto3
|
||||
}
|
||||
|
11
IaC-prod/servergroups-sto4.tf
Normal file
11
IaC-prod/servergroups-sto4.tf
Normal file
|
@ -0,0 +1,11 @@
|
|||
resource "openstack_compute_servergroup_v2" "workers-sto4" {
|
||||
name = "workers"
|
||||
policies = ["anti-affinity"]
|
||||
provider = openstack.sto4
|
||||
}
|
||||
resource "openstack_compute_servergroup_v2" "controllers-sto4" {
|
||||
name = "controllers"
|
||||
policies = ["anti-affinity"]
|
||||
provider = openstack.sto4
|
||||
}
|
||||
|
|
@ -3,16 +3,45 @@ variable "datacenter_name" {
|
|||
default = "dco"
|
||||
}
|
||||
|
||||
variable "keyname" {
|
||||
type = string
|
||||
default = "manderssonpub"
|
||||
variable "datacenters" {
|
||||
type = list(string)
|
||||
default = [ "dco", "sto3", "sto4" ]
|
||||
}
|
||||
|
||||
variable "worker_instance_count" {
|
||||
default = "3"
|
||||
# Cloud names in clouds.yaml file
|
||||
variable "clouddco_name" {
|
||||
type = string
|
||||
default = "dco-matrixprod"
|
||||
}
|
||||
variable "controller_instance_count" {
|
||||
default = "3"
|
||||
|
||||
variable "cloudsto3_name" {
|
||||
type = string
|
||||
default = "sto3-matrixprod"
|
||||
}
|
||||
|
||||
variable "cloudsto4_name" {
|
||||
type = string
|
||||
default = "sto4-matrixprod"
|
||||
}
|
||||
|
||||
variable "keyname" {
|
||||
type = string
|
||||
default = "pettai-7431497"
|
||||
}
|
||||
variable "keynameworkers" {
|
||||
type = string
|
||||
default = "pettai-7431497"
|
||||
}
|
||||
|
||||
|
||||
# Replicas per datacenter
|
||||
variable "workerdcreplicas" {
|
||||
default = "2"
|
||||
}
|
||||
|
||||
# Replicas per datacenter
|
||||
variable "controllerdcreplicas" {
|
||||
default = "1"
|
||||
}
|
||||
|
||||
variable "controller_instance_type" {
|
||||
|
@ -23,6 +52,14 @@ variable "worker_instance_type" {
|
|||
default = "b2.c4r16"
|
||||
}
|
||||
|
||||
variable "lb_instance_type" {
|
||||
default = "b2.c2r4"
|
||||
}
|
||||
|
||||
variable "mgmt_instance_type" {
|
||||
default = "b2.c2r4"
|
||||
}
|
||||
|
||||
variable "worker_name" {
|
||||
default = "k8sw"
|
||||
}
|
||||
|
@ -32,9 +69,30 @@ variable "controller_name" {
|
|||
}
|
||||
|
||||
variable "dns_suffix" {
|
||||
default = "matrix.test.sunet.se"
|
||||
default = "matrix.sunet.se"
|
||||
}
|
||||
|
||||
variable "cloud_name" {
|
||||
default="dco-matrixtest"
|
||||
variable "k8sports" {
|
||||
default=[
|
||||
{"16443" = "tcp"},
|
||||
{"10250" = "tcp"},
|
||||
{"10255" = "tcp"},
|
||||
{"25000" = "tcp"},
|
||||
{"12379" = "tcp"},
|
||||
{"10257" = "tcp"},
|
||||
{"10259" = "tcp"},
|
||||
{"19001" = "tcp"},
|
||||
{"4789" = "udp"},
|
||||
{"51820" = "udp"}
|
||||
]
|
||||
}
|
||||
|
||||
variable jumphostv4-ips {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable jumphostv6-ips {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ resource "openstack_networking_secgroup_rule_v2" "k8s_external_ingress_worker_ru
|
|||
security_group_id = openstack_networking_secgroup_v2.k8s-external-worker-sto4.id
|
||||
}
|
||||
|
||||
# Rules sto4
|
||||
# Rules dco
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_external_ingress_worker_rule2_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
|
@ -123,3 +123,15 @@ resource "openstack_networking_secgroup_rule_v2" "k8s_external_ingress_worker_ru
|
|||
security_group_id = openstack_networking_secgroup_v2.k8s-external-worker-dco.id
|
||||
}
|
||||
|
||||
# Rules dco
|
||||
resource "openstack_networking_secgroup_rule_v2" "k8s_external_ingress_worker_rule3_v4_dco" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = "80"
|
||||
port_range_max = "80"
|
||||
provider = openstack.dco
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.k8s-external-worker-dco.id
|
||||
}
|
||||
|
||||
|
|
|
@ -37,33 +37,6 @@
|
|||
- 89.45.236.6 k8sw5
|
||||
- 89.46.21.195 k8sw6
|
||||
traefik: false
|
||||
'^k8sc[1-9].matrix.sunet.se$':
|
||||
sunet::microk8s::node:
|
||||
channel: 1.30/stable
|
||||
peers:
|
||||
- 89.47.190.40 k8sc1
|
||||
- 89.47.190.118 k8sc2
|
||||
- 89.47.191.240 k8sc3
|
||||
- 89.47.190.86 k8sw1
|
||||
- 89.47.190.135 k8sw2
|
||||
- 89.47.191.12 k8sw3
|
||||
sunet::frontend::register_sites:
|
||||
sites:
|
||||
'kube-matrix.matrix.sunet.se':
|
||||
frontends:
|
||||
- 'sthb-lb-1.sunet.se'
|
||||
- 'tug-lb-1.sunet.se'
|
||||
port: '443'
|
||||
'^k8sw[1-9].matrix.sunet.se$':
|
||||
sunet::microk8s::node:
|
||||
channel: 1.30/stable
|
||||
peers:
|
||||
- 89.47.190.40 k8sc1
|
||||
- 89.47.190.118 k8sc2
|
||||
- 89.47.191.240 k8sc3
|
||||
- 89.47.190.86 k8sw1
|
||||
- 89.47.190.135 k8sw2
|
||||
- 89.47.191.12 k8sw3
|
||||
'^lb[1-9]\.matrix\.test\.sunet\.se$':
|
||||
matrix::lb:
|
||||
'^mgmt[1-9]\.matrix\.test\.sunet\.se$':
|
||||
|
@ -71,3 +44,34 @@
|
|||
rootless: true
|
||||
rlusers:
|
||||
- matrixinstaller
|
||||
|
||||
'^k8sc[1-9].matrix.sunet.se$':
|
||||
sunet::microk8s::node:
|
||||
channel: 1.31/stable
|
||||
peers:
|
||||
- 89.47.190.119 k8sc1
|
||||
- 89.45.237.43 k8sc2
|
||||
- 89.46.21.148 k8sc3
|
||||
- 89.47.190.103 k8sw1
|
||||
- 89.45.237.161 k8sw2
|
||||
- 89.46.20.60 k8sw3
|
||||
- 89.47.190.237 k8sw4
|
||||
- 89.45.236.55 k8sw5
|
||||
- 89.46.20.191 k8sw6
|
||||
traefik: false
|
||||
'^k8sw[1-9].matrix.sunet.se$':
|
||||
sunet::microk8s::node:
|
||||
channel: 1.31/stable
|
||||
peers:
|
||||
- 89.47.190.119 k8sc1
|
||||
- 89.45.237.43 k8sc2
|
||||
- 89.46.21.148 k8sc3
|
||||
- 89.47.190.103 k8sw1
|
||||
- 89.45.237.161 k8sw2
|
||||
- 89.46.20.60 k8sw3
|
||||
- 89.47.190.237 k8sw4
|
||||
- 89.45.236.55 k8sw5
|
||||
- 89.46.20.191 k8sw6
|
||||
traefik: false
|
||||
'^lb[1-9]\.matrix\.sunet\.se$':
|
||||
matrix::lb:
|
||||
|
|
|
@ -20,6 +20,7 @@ if ! test -f "${stamp}" -a -f /usr/bin/puppet; then
|
|||
puppet-module-puppetlabs-apt \
|
||||
puppet-module-puppetlabs-concat \
|
||||
puppet-module-puppetlabs-cron-core \
|
||||
puppet-module-puppetlabs-sshkeys-core \
|
||||
puppet-module-puppetlabs-stdlib \
|
||||
puppet-module-puppetlabs-vcsrepo
|
||||
|
||||
|
|
6
k8s/cert-manager/README.md
Normal file
6
k8s/cert-manager/README.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
# install cert-manager addon
|
||||
microk8s enable cert-manager
|
||||
microk8s enable ingress dns
|
||||
# init the clusterissuer
|
||||
kubectl apply -f clusterissuer.yaml
|
||||
kubectl get clusterissuer -o wide
|
16
k8s/cert-manager/clusterissuer.yaml
Normal file
16
k8s/cert-manager/clusterissuer.yaml
Normal file
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt
|
||||
spec:
|
||||
acme:
|
||||
email: someemailaddress+element@sunet.se
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: lets-encrypt-private-key
|
||||
# Add a single challenge solver, HTTP01 using nginx
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: public
|
|
@ -1,3 +1,7 @@
|
|||
### Postgres password
|
||||
To create the postgres password secret you can use the following command.
|
||||
kubectl apply -f postgres-namespace.yaml
|
||||
kubectl apply -f postgres-pvc.yaml
|
||||
kubectl create secret generic postgres-secret --from-literal=postgres-password=xxXxXxX -n postgres
|
||||
kubectl apply -f postgres-deployment.yaml
|
||||
kubectl apply -f postgres-service.yaml
|
||||
|
|
|
@ -1,3 +1,74 @@
|
|||
### Rook deployment
|
||||
|
||||
In the operator.yaml change ROOK_CSI_KUBELET_DIR_PATH to "/var/snap/microk8s/common/var/lib/kubelet"
|
||||
|
||||
# initalize rook operator
|
||||
kubectl create -f crds.yaml -f common.yaml -f operator.yaml
|
||||
|
||||
kubectl get pods -n rook-ceph
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
rook-ceph-operator-6668b75686-l4zlh 1/1 Running 0 60s
|
||||
|
||||
# initalize rook cluster
|
||||
kubectl create -f cluster-multizone.yaml
|
||||
|
||||
takes lots of time before the multizone cluster is initalized
|
||||
(should be around 47 pods...)
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
csi-cephfsplugin-6xhjh 2/2 Running 1 (3m42s ago) 4m16s
|
||||
csi-cephfsplugin-cgmqs 2/2 Running 0 4m16s
|
||||
csi-cephfsplugin-hs2rx 2/2 Running 1 (3m43s ago) 4m16s
|
||||
csi-cephfsplugin-km7k6 2/2 Running 0 4m16s
|
||||
csi-cephfsplugin-ms8c2 2/2 Running 1 (3m42s ago) 4m16s
|
||||
csi-cephfsplugin-provisioner-dc97f9d65-6tvkn 5/5 Running 2 (3m35s ago) 4m15s
|
||||
csi-cephfsplugin-provisioner-dc97f9d65-bwdkn 5/5 Running 0 4m15s
|
||||
csi-cephfsplugin-wlks6 2/2 Running 0 4m16s
|
||||
csi-rbdplugin-ckgnc 2/2 Running 0 4m18s
|
||||
csi-rbdplugin-hmfhc 2/2 Running 1 (3m42s ago) 4m18s
|
||||
csi-rbdplugin-mclsz 2/2 Running 0 4m18s
|
||||
csi-rbdplugin-nt7rk 2/2 Running 1 (3m42s ago) 4m18s
|
||||
csi-rbdplugin-provisioner-7f5767b9d5-gvbkr 5/5 Running 0 4m17s
|
||||
csi-rbdplugin-provisioner-7f5767b9d5-n5mwc 5/5 Running 0 4m17s
|
||||
csi-rbdplugin-rzk9v 2/2 Running 1 (3m44s ago) 4m18s
|
||||
csi-rbdplugin-z9dmh 2/2 Running 0 4m18s
|
||||
rook-ceph-crashcollector-k8sw1-5fd979dcf9-w9g2x 1/1 Running 0 119s
|
||||
rook-ceph-crashcollector-k8sw2-68f48b45b-dwld5 1/1 Running 0 109s
|
||||
rook-ceph-crashcollector-k8sw3-7f5d749cbf-kxswk 1/1 Running 0 96s
|
||||
rook-ceph-crashcollector-k8sw4-84fd486bb6-pfkgm 1/1 Running 0 2m3s
|
||||
rook-ceph-crashcollector-k8sw5-58c7b74b4c-pdf2j 1/1 Running 0 110s
|
||||
rook-ceph-crashcollector-k8sw6-578ffc7cfb-bpzgl 1/1 Running 0 2m27s
|
||||
rook-ceph-exporter-k8sw1-66746d6cf-pljkx 1/1 Running 0 119s
|
||||
rook-ceph-exporter-k8sw2-6cc5d955d4-k7xx5 1/1 Running 0 104s
|
||||
rook-ceph-exporter-k8sw3-5d6f7d49b9-rvvbd 1/1 Running 0 96s
|
||||
rook-ceph-exporter-k8sw4-5bf54d5b86-cn6v7 1/1 Running 0 118s
|
||||
rook-ceph-exporter-k8sw5-547898b8d7-l7cmc 1/1 Running 0 110s
|
||||
rook-ceph-exporter-k8sw6-596f7d956d-n426q 1/1 Running 0 2m27s
|
||||
rook-ceph-mgr-a-6cfc895565-h9qfg 2/2 Running 0 2m37s
|
||||
rook-ceph-mgr-b-85fc4df4b5-fv6z9 2/2 Running 0 2m37s
|
||||
rook-ceph-mon-a-868c8f5cff-2tk7l 1/1 Running 0 4m10s
|
||||
rook-ceph-mon-b-6f9776cf9b-w4dtq 1/1 Running 0 3m12s
|
||||
rook-ceph-mon-c-8457f5cc77-8mbpj 1/1 Running 0 2m57s
|
||||
rook-ceph-operator-6668b75686-l4zlh 1/1 Running 0 7m36s
|
||||
rook-ceph-osd-0-79d7b6c764-shwtd 1/1 Running 0 2m4s
|
||||
rook-ceph-osd-1-65d99447b5-bnhln 1/1 Running 0 119s
|
||||
rook-ceph-osd-2-69dbd98748-5vrwn 1/1 Running 0 114s
|
||||
rook-ceph-osd-3-596b58cf7d-j2qgj 1/1 Running 0 115s
|
||||
rook-ceph-osd-4-858bc8df6d-wrlsx 1/1 Running 0 2m
|
||||
rook-ceph-osd-5-7f6fbfd96-65gpl 1/1 Running 0 96s
|
||||
rook-ceph-osd-prepare-k8sw1-5pgh9 0/1 Completed 0 2m14s
|
||||
rook-ceph-osd-prepare-k8sw2-6sdrc 0/1 Completed 0 2m14s
|
||||
rook-ceph-osd-prepare-k8sw3-mfzsh 0/1 Completed 0 2m13s
|
||||
rook-ceph-osd-prepare-k8sw4-dn8gn 0/1 Completed 0 2m13s
|
||||
rook-ceph-osd-prepare-k8sw5-lj5tj 0/1 Completed 0 2m13s
|
||||
rook-ceph-osd-prepare-k8sw6-8hw4k 0/1 Completed 0 2m12s
|
||||
|
||||
# init rook toolbox
|
||||
kubectl create -f toolbox.yaml
|
||||
|
||||
# jump into toolbox
|
||||
kubectl -n rook-ceph exec -it rook-ceph-tools-5f4464f87-zbd5p -- /bin/bash
|
||||
|
||||
# init rook filesystem & storageclass
|
||||
kubectl create -f filesystem.yaml
|
||||
kubectl create -f storageclass.yaml
|
||||
|
|
|
@ -1,169 +0,0 @@
|
|||
---
|
||||
tls-secret: &tls
|
||||
- key: tls.crt
|
||||
value: >
|
||||
ENC[PKCS7,MIISnQYJKoZIhvcNAQcDoIISjjCCEooCAQAxggKBMIICfQIBAD
|
||||
BlME0xCzAJBgNVBAYTAlNFMQ4wDAYDVQQKDAVTVU5FVDEOMAwGA1UECwwFRV
|
||||
lBTUwxHjAcBgNVBAMMFWs4c2MxLm1hdHJpeC5zdW5ldC5zZQIUUXNN3AgoU9
|
||||
LDj51WjMKFKDBvyGgwDQYJKoZIhvcNAQEBBQAEggIATZD52IMRDXX4kFc1ic
|
||||
sujwttiASzziDBvsZD1x3GDcXHTSfucMYIcHLavnZxlaON/SP113pQxBPALc
|
||||
A5ZXpLsTIxnd3iGBJJk3mwGz1U1CFEM4QZSOuh2f9cFlD5f+p9i8ggvQ8QvK
|
||||
jut0N11EwFlbGd3A1jq4s5B1pu8S0obEIrayeoK6iUUBbk8nEKnILyKf908X
|
||||
1Nnhv4XalD6rvgSZDrWzU8XpAHZ0b+ttpwE+dinu160rVELWpM8dZCqkD1SB
|
||||
ByNKdSpQHnIobHpg6e1XPLvFvFqJ0ljcKXbr54ju4YmIPEu6fh4n1TgB5jMs
|
||||
q9QUIW0xUq53Qk6QYUiasy90NUMWO/zrhV6BsTAwKSUCmvcq0YGioBtuhixt
|
||||
/CEbtyr3EqqrYJ7B9MHQ5M+4wl8TfalZTyVjiFk51itzyQI5j2Sow7q2/Fia
|
||||
mDyUR54Ks6UqbKTk6JFhj+LWCAOjz0bH2bk+2CIZfXcCD/em/wFWpS4h4Tx9
|
||||
l03iC42rG4VA0Fv/9i9W4/cd2v4j4/hYbf7m55xY5Iy83Ijbghcmo8OOyha9
|
||||
Sxb6ecCfPlO+8hAXm7xspWF0oFXfKYuVG2S/Z+OIseAaw4XFxUJ5lagqBnhL
|
||||
9hymFQXp+IAZBnNPM4W1ewOKEEfxZ3GL3flM+qYbf/TRlW4BiJ9LpqiidC4b
|
||||
LsNy0wgg/+BgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBBPh4TbOm74T+Z5NK
|
||||
NU+cGEgIIP0CAVBJmuzDH6Fsh4rg0dnqT1RshGyyttY2jk89WQ9R0K0rAvYp
|
||||
lN/5ykITDRPEjqTxzOxoC6X+G6yE6C6ILHgK9nQvuBDblMSeRaBB+f46GNrX
|
||||
BAQzsjfTD4ePk7hNx8jKaoPrzwK7eXcK+e4jwxhnAdPxBSavEWv+JFECbdvf
|
||||
43x5Rpyf4CIxhG84pFBKKejPOC/xHg2g/RQHip6iI7IN5SlirEdANQNZriWr
|
||||
dqN1fMns69VA3729T1SP9ntUlu6LB8BaJoCiexF+OTLvSvhFCPPuzIJF4zN7
|
||||
IWwCy/rrxXOLXZnHOu4ZFiV2ocDx1H8pxlsCmXq6JkNViBpddMixRfEJIJs5
|
||||
GGw59coNef3WDIyfURuvNlN5PktdPmqFs4Y1M3BUjJYDRPuLTI7FGt9QKpEt
|
||||
K2NlnJiRvspnXOzhNTV8WX/NTt831Lw3iDS2ZAMNzY6OsoGkcRmqLW4T7uM2
|
||||
+unkrnx8GitUZui6hkm+5dTUh43l+EDuv3qeNiw4HxcjwceSklZ8JYjOJuQ4
|
||||
VHX6Z2yza0EL8CACh+4KZTZhXMXfxt/w1bHVzZzKekQHtz+LOmprF0PbFxln
|
||||
xmxOg2ZRofGtMeRgtVXGpHGj7dA0FiODSRUOBgZbcaMVxSdpCnhm9q+mUXh+
|
||||
AS4YZnXKuIEDZ0zQI9iq/oNfgqtbv7taPZPgBucOdkQm9LDIGY5iRWYvMNsn
|
||||
6F4EXzBwK4zbHkiwSquzPUEtylQ8xlECwGmHZCiwNi9lvtjlNIL7syYnyqhL
|
||||
0tuBGTwaGWJVSw2yPb3GtfZ/f0wUQcQjSNNWwHolrjExpasa0PMQSRZ93+V/
|
||||
sU9/YjtVBGoKP1ldAOyEL1WmIJJC5GOl49nJjixpHwOB1DneHvc4B9JAlK/e
|
||||
4TmIqP4mEetAuA2nLJ3Y9HURVWxlA7eIc3rc4/TVkSjtq1A+bnYdDc535Pw1
|
||||
vf/N8wlU9VDCtYPhROLghlwYfMByvuOonMV6Fnd9ILkMcRJzT7zqE85hn4Hx
|
||||
dLaSBFSwcNKcvXsbtnDwPuVioZ/58ygaT+dIr0uRVfkR60S46CcHXum73IBL
|
||||
9/KsRzLsQ6ys7bjKITE2u0d3zSI87wJ7EbwiM+ljz1BRUxjP7AwGehH24Btr
|
||||
nDk2sIzdp0jRv5JxIrLuHc5TKiki9l2hnR4Owx1NxoKy2dquww1xp07G6S+/
|
||||
LK+BTWmnE/rSKJoCEcKDGC1wgItwqT+wEkxfGVibVrNb1yv94jtsdz0M4r1D
|
||||
IBB2uhZqPhIrfhG3Dgg7XYVMLXJkRF/o+WoCfD43IcyTcA961g2CXczUaQmL
|
||||
BfOoC1bx+k17u+/qNJGXvhf0J8A7v24yjQmVwR2ceHP2KAfhSsUKTrJFlS7o
|
||||
R+k4QmsvR1qzT+KgcPLkBO75cbvbBnL1e5x5rrE97TSHLvC1NrFJZodJYbeP
|
||||
kL6eyEy/aXIkFEUS44EBOsVD7cNni35crdW960PWxrh25+fyOAHvUwXfCkDq
|
||||
SYo7nnkglbR2UwbdSGfhFypUO/W0IQtu0D1liGt9W0e5k38LmnpbfDDaf5mP
|
||||
ei3uaukC+8Dss7drRCCA9ay90XhX2GACmzGPdGpfFFdihtXiYBhgfKjCCUj+
|
||||
b6M+bz8sUsKLOmyByXtKMRH072j7BxIPzre25pldbs1RxfwaVRb7TmA5e/kp
|
||||
Ldp1mE5nQXQWzcvoIAeIW1mWxQi717ncXGeN9hJ83hbW32sSNnuIoRl+CYBr
|
||||
gO3oT/ypP8wgoJ/ZS0xE99lie7/ZMELFUmhNR5MvRQExO02uvJTfRf3REuyA
|
||||
fVrw0rXRSlP/Gg+WN0rt+2vuk1qEZkZghlMTMGttt3jugC5mTYnXtXA3qqLV
|
||||
hirE+6rmDs+jAMhc+WoGVaWbwRkn/2RVU1Ne4DsI+GIKZ7GuRSWiYDvJySNH
|
||||
bL/fAt7S+xafiLuihZKTC477TIef6LBOqKzMf1+OrPna4iAmFhWSU/VKvCJX
|
||||
0EAynNNhn3gfOG6zcQy/W/walNB5ZWNRNTjM+913ZsqgtgxA6/7ZaT3DTH11
|
||||
kczhamLZHZzko5oSqpokWSPV0DKJN3/2i0j2Hj6cWBCzGYz+yGbEkDq3Kqqh
|
||||
Q0RKvBLwe6VAStdRXjvlb2swbm4rtLPoXooWrVALy5g8o0OKZCsOlLYbMk6x
|
||||
Enf9g5jgCoJcd5u14hR7b4foCu8nnfr0/2ngmpCvvRNs+UEX83a3WmKFVVzv
|
||||
acFnAx5imn1MdFFvMNcox+98TjMLAftOzxmxaR3x46NPB+69wRh1Z/0xxI0c
|
||||
Yz1pqjBYs/rQVyF3fh+5jsQmRs6ueohfvsAkwOxKcVpOzgRSY3StzFdMSMmU
|
||||
HKEmoNiGQNrnr63OGdY1wJRZ6vwtlUm9mZ6AyR9Nrd09kbJfV7bsDgydfU3P
|
||||
8n+8jdZM6HLUcl5YOOXhAFILgrDS90u7LZuUTOA4iT3KN41J/qqaSTbgUZDb
|
||||
eN73yHUcBP0cFF1fSO6+HzO6bfmEjoe/SS+w4BBjEZb12isTT5pkMHPRW9Ue
|
||||
duiJJ6DB+ebPFBAZEcIZOe2F55d64trIw/4X7IM/A/88d8UtpbYfBWlaE95I
|
||||
wb0e+Vbg380TcKo7ZwnV2QsSWe8pSpcUjADiJtlMGE2FIq9/BbwDEVqXKHNg
|
||||
zCbx1oARanwS5vfKUl6gpPi7rJONvV6j3uAFJxMgdrS6aHFWecy/24FNLxgp
|
||||
dlkv5Zo9fwWOJIvFSu/XogjGIQqCQEaSGIQIuzLefLn92ea94LJORDWdPho+
|
||||
AfnyGfovQ3PpPPHxEBIMVaL3x61/Di45s4SxYXrxWp39VJa2J0396J5JQ1r2
|
||||
NOre1SspPXg9mtBFw0UrK5uCGWLAkJUlgKdGZdL3sX8X7n0UYpEp4wQgf1Dl
|
||||
P0HU4FJvIgUsWlNB6sm3Rg3a7XBnnAXDB7jGPrM8C/TJQg1AMiPSMhSf3icI
|
||||
9pt3PW5InjUCgl87FuBshJ5CHiLOQh+QlX/8+34EbziBRocyBVu8hbjwXILM
|
||||
6p7/MEwJNmlhmEFObsKPqvQpi9lbYRNUwIZqFPWr3DIBOTkO3zatimXwdCDm
|
||||
LLJG5Au2h/shTB8Mksm9eqyb8vbnUiyOy+Yk3YDNUauMU7+S5KHT9Q4Ht0hn
|
||||
eTd7d+2e7MyXwD8gCWNFfnBpK7WBR0w1Ix3qgS7QEsCbaCGZ/yBPN39TA1XV
|
||||
u5re15RskGcYjv3L1vcLHnzn5hToe/IQm0YGglJqXauqxUMaLfjaYTyKGNFS
|
||||
QtYwt9tePAiCGHGVLJJFG72EhXhiMZ1Cl+uPixaNAp2TEhGazkaeU3egChhI
|
||||
2WItO/f/F5tYInvbPPJqkXRjL6kQvojgKSD2fmIwm/GV6GVInw1Mb/n+jzv6
|
||||
5ozwtm/pI7eLXi7aWpUjZGx64Pivn+prfQqH95k/vikv8Bdnaxg9MZd5UKK4
|
||||
CT8CWXxHxgAl/CniobU91IPqTdiDbvtfRr0EslSS4bGVV27ztfzjmXpf6bCX
|
||||
MJy3lalBGtI0tWhXo33dsZzS2L5kjTk4wnK8vF47lLfIZ5x+dah2vAohLL6G
|
||||
XXcYkKpm3NnxfmRw3i3DAKRBEs+SHwJXdB9SfrccpuKcbPqrC9Zn/DKiu09O
|
||||
+ED3T7oczUm0zqgGLPh0FWmVbQ/JIjYmkGZKHWJluczDDucacOLZUNTbJN/0
|
||||
IKWSZH20vduKQBqDkIqPpOWNmvXyuH7a1Zlh7EXRZBZCOWSPTP+HI1xnIOUN
|
||||
/m6oFKRfiVHgmA/oLkRQ6griV3M/aNnIypudS8uZW559T7T2wXj9j+GZjZAe
|
||||
gKi+qdd6P6KDlHbR1f1lu56dvrjBK+UeLeK2omXiRjHIYX8jTBtwjYUR2BQu
|
||||
mq4KZcmfPeRe+8aP1zT1uMVJBQQuSTofCiyupxNCEyhSgb/OgmfXB3TdQ5CN
|
||||
NQuhRbVzMMo20gmJJ+6hlg9m7tpenhDkqIBtzKB4f4/veXZHr8b5+/RbYMDV
|
||||
qoxmQk4H6lZ/E80EjmsgD/wsADzkhUXTMiRlSLIujPL9m8FgO5SeUnUN49Q6
|
||||
c1W8LAOenr0Xkkiv+cbfzTPLN4D+ACzb9z0LBR1KAD4ZyD+GeB8Ou5hOGE0w
|
||||
WM3GeBdimkHeyE3QrO6xhdrscHsid9sJe7yFnG15kkQZXPbYomiur8yqh+QG
|
||||
zIHauL8RyZEEu5R9/lsq/DVOwP7vDxPX1eztDhT/i+64Z64hd/twGVFk5JxF
|
||||
iDfx6yJN4SlmbFjeaQ1Zl2cHDQCaUlFiALC/SFk9Mdt+EaRYJEZhZnSgpZ+f
|
||||
YI8RWgLlD4DC10JQ5zmFjfnzMMiVMhAbT0W6C2LdVd8zPQXT+kQrlaq8s+b6
|
||||
d2s8IoYx3x0kTi7gM6OP1etVM2L2ocdWvVHippsQXfzOLQ747bbLI028vR1B
|
||||
oEucGS/RlxNqnOgmujMRty94mt+U7fU/u3uVg07meIv2Wc3nQl/tpbG8olLt
|
||||
r17lAiPsP1tyblo4iqx//GOdB6jCg1zO5KDmJH1ZgSJVCNNW+tLhVmtfB1v2
|
||||
bRnincyr7842d4l+kmuu69Y+10THMltWH63JWnBrFyiG/T5zD/EH8XXOgSC7
|
||||
0MlWYPiLtNkCcBi5lrvgISTwUGeW/qIMNTkbmJxpZ5TjBjcyYQpORtnoAXQj
|
||||
3SLgNiZdXeR4D5j0/kYrI6blz6fw1DV1AcfWUvCr684oYF8Rg+vtl3xJMA5l
|
||||
BEQwyZHxGPWgAP8nzQ66ofyktkYOskw/1KCszKv/i4myitGVwEc2PqoTva/Q
|
||||
tdjWXqdh4lwBxLvYFt19EvO76cbwnXUBNFLXkyFHtis//q7mgTwi+13Y6LbR
|
||||
bUm4JRBBxT1Fx3XmTCL+th8EI1XVM7wh+NKZkZak4ummJgf0KXC+QXkynJXb
|
||||
B+28oGU9mUW3DXBv2ceW49nLY66iJzI8YzyP+YfTDiJLtmB2hVA02ZFcKMyN
|
||||
4UGa5erlTGixAFPBcDrw603f1fjYrDVQZfPUvjl8F+jIRheevy5b7VhcQxr/
|
||||
KbNFCqarHTsO98NbRIV+2VU4nip+9Fot7TXvpYloMBp7EyDKlSpI4JLD1rRH
|
||||
Y2EjtRVyNDNubWBLo4fKQ4CwsQLaIjWdD4RD5K1O0vsgk+NWoPid/BHguF/p
|
||||
U/gn1VwGydI3T3UhkUZo/c9OnBTHzn/1BRwbLCO1zLE8aE+D4HDCyKRDuAPP
|
||||
D5iJLnrWbhaFMJMJ2/7t/fgE75mrSyWA/12ERhQiS0mTPhvvKaNM5qv5POhd
|
||||
C4pqjSnXf9C89YX8T6DTHDKF2qknGBGzpKHPWX5NGFGc6SCiIiZrTVp/PLHL
|
||||
JjJcO0N8qCmlsIWRPPgkPKa6uRndIq8dux+fjZVUruMKmQF2DwArbOWLSCWL
|
||||
ZheKiC8dHs6ObcNYh5kON3nEp9mGHfKYNKzR89o7qwP7Lfy1uKc4+y1YTj2F
|
||||
RsCaJuUiM=]
|
||||
- key: tls.key
|
||||
value: >
|
||||
ENC[PKCS7,MIID7QYJKoZIhvcNAQcDoIID3jCCA9oCAQAxggKBMIICfQIBAD
|
||||
BlME0xCzAJBgNVBAYTAlNFMQ4wDAYDVQQKDAVTVU5FVDEOMAwGA1UECwwFRV
|
||||
lBTUwxHjAcBgNVBAMMFWs4c2MxLm1hdHJpeC5zdW5ldC5zZQIUUXNN3AgoU9
|
||||
LDj51WjMKFKDBvyGgwDQYJKoZIhvcNAQEBBQAEggIAWMkR/oj9qeRsaHR7rH
|
||||
fY4RhGy36hkAO2H24s7OeuGRWvSppZCjpSQ/oQhhlX2w54pmNrpEwQiD7FRY
|
||||
85PNucFzC81uSxGg9woj4p6py0LiWagv7Q+jZFE6NaHL3W/Y0x6HR+4uC84t
|
||||
LInng4BTXZybUT6qZW2JZBzCOrcCyBfwQdIaulX0lL7yHljnnB54rXZ7JW2Q
|
||||
LFeWe8FqCC6ZDYcVFrzD/fQGgJfnJics98ATaSAyNTuEpZr75O6z3HbcS53C
|
||||
VxNyJ6O6xeYxcvijF4ykyYpTbp3iUCWGs7JuVWm1XXJNHT3VtIyvI6oqGhoC
|
||||
JKUuXX4bD4BrLjTu0uCKmRilQhMnQ4mq5zr+e1mMGTfEj9nGciFY9N9uvke7
|
||||
Ig+jIvYQ8OqGnKuVsAUEvrmnacrXUNuvaOtLcitpOBg4eROiF5Sw6xnxcHSk
|
||||
ug4FZmv2H+xpBZrjNmhCLFwDhkHOZoT2JYbS8ui9/DuR3GoJ2uSvHuGvWbsO
|
||||
FGTMOFYR5HJ15zivc18vPko3hjWCmPOJXBEBGsSgawSoCa50b+bxql0Z3t03
|
||||
2FboNGNloqi0+Hw7gNrL2VeRyp6QgWkILy0vJHjq0QOzUcCXcfzBcj1jqAli
|
||||
NU/iKCXVwUesIwU44eMBBUMqm9KF1L5Jk0vbxXo5/8WuCYAZkpbTdH5heks4
|
||||
TWNZgwggFOBgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBAQJUXVen+bf8+ePd
|
||||
0FkUEsgIIBIM/KpUADCCPTZOfBQmNkwyIt00Flw6Ko7Vm523NHu0ASzzWD4n
|
||||
rE/nA6TvwiAzL8dti77ujP3YvsCwrA7LCMsbBVJun7IUUqytdcVMhM+6LLHk
|
||||
Sy8Qj4mLsq330TA/7G8JFJLj4HmR1JYOwe4Ua2yxAyey8UxILJ71RvEyPJLk
|
||||
QxD5NMejTkD/OeN574SRQQX3Mu7RMjasImlrG/9DPdnS0friOPlDG4djPYHQ
|
||||
S5mItq4eA4NfhCrE7bLtyZuDYa0yIgaEQ/zjMFvWCMiw5r0+vhXs8pjUqg3Y
|
||||
U3S3XIFAftDVHUnfcprG9zLcfytifL3W7lr7BO64qRbZumtkxPpJx3KK+ui/
|
||||
0Vxi1eI5MXsS25gc+TIlfVmiUhhvEZyYVNCw==]
|
||||
microk8s_secrets:
|
||||
default:
|
||||
tls-secret : *tls
|
||||
health:
|
||||
tls-secret : *tls
|
||||
kube-system:
|
||||
cloud-config:
|
||||
- key: cloud.conf
|
||||
value: >
|
||||
ENC[PKCS7,MIIEHQYJKoZIhvcNAQcDoIIEDjCCBAoCAQAxggKBMIICfQIBAD
|
||||
BlME0xCzAJBgNVBAYTAlNFMQ4wDAYDVQQKDAVTVU5FVDEOMAwGA1UECwwFRV
|
||||
lBTUwxHjAcBgNVBAMMFWs4c2MxLm1hdHJpeC5zdW5ldC5zZQIUUXNN3AgoU9
|
||||
LDj51WjMKFKDBvyGgwDQYJKoZIhvcNAQEBBQAEggIAzfe0zL+en5RwlhWAIo
|
||||
z+rpqysj4xEKFhIdLOZVJouEzfzhj8eoiQheGpk8tstA51FBY9ndvGb8pguH
|
||||
bbd/mmrSU9w+U8SXj3IJGFPCm/GtooFoJk5Omo317gcSX7MbFijKUjiTGRip
|
||||
9iAsaOlfAZlRbVou6HLXLRH77w1WIa5wgkWKsmazAsdP9iD1EZHaQhqgHA7S
|
||||
XbqMO0Y0Cxz428NLDj0sW5YQW/3YsUYpMYSLSBwMMdTcxh3WOiDcvkSX6ZAZ
|
||||
z1XVaVtV5zvg1HNWgb+10mBsGhDyPdynH4jyzq7dGR7GAYOkY7BbKAQjwxJg
|
||||
3bpnLftzXPb6G/TohmJFZ9HnaL4oJFsWpsMTB9maUjmze23F9X3cqoW2Kuud
|
||||
RG2svizbNJCHgM65ja4Njaw9Xsu+OSLdY1Q9CjXWcJupUYelZu9oV5hEYXHU
|
||||
wrV3LAZBbOWA7l5zoBimmDwZMQAhIQujJylfVscjSbtM0L0HXlODT9gQ0ANp
|
||||
jhndvgH8ziHPlID8SPdjKU64QSwFpWOo9e4/Nslptx29OvunCWeh5a9iep+Y
|
||||
EAZxiVgL1s+NsizkdZHFDYrMXPeH1V+ZtgXaX6UGEQSbVPZXzQzAFK4T15jP
|
||||
IAigbknSSUOWne3VDiQ1pjY+n0bZLuk0VhcTQtKKBnjBxln/dSw+ADoF++Hq
|
||||
EuzjcwggF+BgkqhkiG9w0BBwEwHQYJYIZIAWUDBAEqBBBT9qTTK5NMbSXdDl
|
||||
rI+52FgIIBUNxHt3AmQB+hLX8DB/+hLDgO4HC6qw5j5Pj30rvcOcwZ5NjQ1/
|
||||
uRCJyp3jATLH3hmCITgi0iCUj0BR1lxglR9rto8Tx2zDoqTWzgbFBfhjf9k5
|
||||
rKIX08Wdqg6J+ztk6ElWsc2ciC0wqhEQLpGoO0iNHFTZ4CaFG5g37OKHesMg
|
||||
cdfE8rpusXFPdj8ysntTduuvdE3KyPpsYteF8ej8vHLv3B+6eoqxEviWuOl1
|
||||
wWse4tSQCcb0kwBQ+UXHrmG5oxGBdXjTfwytBbaKPo0ol+NjBpfsVyCPK/Dz
|
||||
QnESVnIFXtHgdtefLNcHukUPECa7GQtd8B+7+w50ZzWq/8v7ypMBC5S9GfWO
|
||||
OUHtONp18qIynPFyHUlsAQ0FSPXxGAX/xtTuwpkYJwYLSp0Mlupqhyq7CZUn
|
||||
gSrZGn5u/4OJhwmYJoJlF9GCa9Z/OEHs6JRCcsLA==]
|
3
k8sw4.matrix.sunet.se/README
Normal file
3
k8sw4.matrix.sunet.se/README
Normal file
|
@ -0,0 +1,3 @@
|
|||
|
||||
The system documentation is in the docs directory of the multiverse repository.
|
||||
|
3
k8sw5.matrix.sunet.se/README
Normal file
3
k8sw5.matrix.sunet.se/README
Normal file
|
@ -0,0 +1,3 @@
|
|||
|
||||
The system documentation is in the docs directory of the multiverse repository.
|
||||
|
3
k8sw6.matrix.sunet.se/README
Normal file
3
k8sw6.matrix.sunet.se/README
Normal file
|
@ -0,0 +1,3 @@
|
|||
|
||||
The system documentation is in the docs directory of the multiverse repository.
|
||||
|
3
lb1.matrix.sunet.se/README
Normal file
3
lb1.matrix.sunet.se/README
Normal file
|
@ -0,0 +1,3 @@
|
|||
|
||||
The system documentation is in the docs directory of the multiverse repository.
|
||||
|
3
mgmt1.matrix.sunet.se/README
Normal file
3
mgmt1.matrix.sunet.se/README
Normal file
|
@ -0,0 +1,3 @@
|
|||
|
||||
The system documentation is in the docs directory of the multiverse repository.
|
||||
|
Loading…
Add table
Reference in a new issue