diff --git a/functions/get_customer.pp b/functions/get_customer.pp
new file mode 100644
index 0000000..aef58a1
--- /dev/null
+++ b/functions/get_customer.pp
@@ -0,0 +1,17 @@
+# Lets determin who the customer is by looking at the hostname
+function sunetdrive::get_customer() >> String {
+ $hostnameparts = split($facts['fqdn'],'\.')
+ if $hostnameparts[1] == 'drive' {
+ if $hostnameparts[0] =~ /^gss/ {
+ return 'gss'
+ } elsif $hostnameparts[0] =~ /^lookup/ {
+ return 'lookup'
+ } else {
+ return 'common'
+ }
+ } elsif $hostnameparts[0] =~ /idp-proxy/ {
+ return 'common'
+ }
+
+ return $hostnameparts[1]
+}
diff --git a/functions/get_environment.pp b/functions/get_environment.pp
new file mode 100644
index 0000000..42fa60f
--- /dev/null
+++ b/functions/get_environment.pp
@@ -0,0 +1,13 @@
+# Lets determin where we are by looking at the hostname
+function sunetdrive::get_environment() >> String {
+ $hostname = $facts['fqdn']
+ if $hostname =~ /^.*\.drive\.sunet\.se$/ {
+ if $hostname =~ /^.*\.pilot\.drive\.sunet\.se$/ {
+ return 'pilot'
+ }
+ else {
+ return 'prod'
+ }
+ }
+ 'test'
+}
diff --git a/functions/get_node_number.pp b/functions/get_node_number.pp
new file mode 100644
index 0000000..4c256fe
--- /dev/null
+++ b/functions/get_node_number.pp
@@ -0,0 +1,4 @@
+# Lets determin where we are by looking at the hostname
+function sunetdrive::get_node_number() >> Integer {
+ Integer(regsubst($::fqdn, /^[a-zA-Z\-]+(\d).*$/, '\\1'))
+}
diff --git a/manifests/app_type.pp b/manifests/app_type.pp
new file mode 100644
index 0000000..1b11b10
--- /dev/null
+++ b/manifests/app_type.pp
@@ -0,0 +1,349 @@
+#Resource type for SUNET-Drive-Application
+define sunetdrive::app_type (
+ $bootstrap = undef,
+ $location = undef,
+ $override_config = undef,
+ $override_compose = undef
+) {
+ # Config from group.yaml and customer specific conf
+ $environment = sunetdrive::get_environment()
+ $customer = sunetdrive::get_customer()
+ $nodenumber = sunetdrive::get_node_number()
+ $is_multinode = (($override_config != undef) and ($override_compose != undef))
+ if $is_multinode {
+ # The config used
+ $config = $override_config
+ # Other settings
+ $admin_password = $config[ 'admin_password' ]
+ $dbhost = $config[ 'dbhost' ]
+ $instanceid = $config[ 'instanceid' ]
+ $mysql_user_password = $config[ 'mysql_user_password' ]
+ $passwordsalt = $config[ 'passwordsalt' ]
+ $redis_host_password = $config[ 'redis_host_password' ]
+ $s3_key = $config[ 's3_key' ]
+ $s3_secret = $config[ 's3_secret' ]
+ $secret = $config[ 'secret' ]
+ } else {
+ # The config used
+ $config = hiera_hash($environment)
+ $skeletondirectory = $config['skeletondirectory']
+ # Other settings
+ $admin_password = safe_hiera('admin_password')
+ $dbhost = 'proxysql_proxysql_1'
+ $instanceid = safe_hiera('instanceid')
+ $mysql_user_password = safe_hiera('mysql_user_password')
+ $passwordsalt = safe_hiera('passwordsalt')
+ $redis_host_password = safe_hiera('redis_host_password')
+ $redis_cluster_password = safe_hiera('redis_cluster_password')
+ $s3_key = safe_hiera('s3_key')
+ $s3_secret = safe_hiera('s3_secret')
+ $secret = safe_hiera('secret')
+ }
+ $twofactor_enforced_groups = hiera_array('twofactor_enforced_groups')
+ $twofactor_enforced_excluded_groups = hiera_array('twofactor_enforced_excluded_groups')
+ $nextcloud_version = hiera("nextcloud_version_${environment}")
+ $nextcloud_version_string = split($nextcloud_version, '[-]')[0]
+ # Common settings for multinode and full nodes
+ $nextcloud_ip = $config['app']
+ $redis_host = $config['redis_host']
+ $s3_bucket = $config['s3_bucket']
+ $s3_host = $config['s3_host']
+ $site_name = $config['site_name']
+ $trusted_domains = $config['trusted_domains']
+ $trusted_proxies = $config['trusted_proxies']
+
+ # These are encrypted values from local.eyaml
+
+ $gss_jwt_key = safe_hiera('gss_jwt_key')
+ $smtppassword = safe_hiera('smtp_password')
+
+ #These are global values from common.yaml
+ $gs_enabled = hiera('gs_enabled')
+ $gs_federation = hiera('gs_federation')
+ $gss_master_admin = hiera_array('gss_master_admin')
+ $gss_master_url = hiera("gss_master_url_${environment}")
+ $lookup_server = hiera("lookup_server_${environment}")
+ $mail_domain = hiera("mail_domain_${environment}")
+ $mail_smtphost = hiera("mail_smtphost_${environment}")
+ $mail_from_address = hiera("mail_from_address_${environment}")
+ $s3_usepath = hiera('s3_usepath')
+ $smtpuser = hiera("smtp_user_${environment}")
+ $tug_office = hiera_array('tug_office')
+
+ # This is a global value from common.yaml but overridden in the gss-servers local.yaml
+ $gss_mode = hiera('gss_mode')
+
+ # These are global values from common.yaml but can be overridden in group.yaml
+ $drive_email_template_text_left = $config['drive_email_template_text_left']
+ $drive_email_template_plain_text_left = $config['drive_email_template_plain_text_left']
+ $drive_email_template_url_left = $config['drive_email_template_url_left']
+ $lb_servers = hiera_hash($environment)['lb_servers']
+ $document_servers = hiera_hash($environment)['document_servers']
+ # set up cronjob on node3
+ if $::fqdn[0,5] == 'node3' {
+ file { '/opt/nextcloud/cron.sh':
+ ensure => file,
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ content => template('sunetdrive/application/cron.erb.sh'),
+ }
+ cron { 'cron.sh':
+ command => '/opt/nextcloud/cron.sh',
+ user => 'root',
+ minute => '*/5',
+ }
+ if $location =~ /^extern/ {
+ file { '/opt/nextcloud/user-sync.sh':
+ ensure => file,
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ content => template('sunetdrive/application/user-sync.erb.sh'),
+ }
+ -> cron { 'gss_user_sync':
+ command => '/opt/nextcloud/user-sync.sh',
+ user => 'root',
+ minute => '*/5',
+ }
+ }
+ }
+ #Create users
+ unless $is_multinode{
+ user { 'www-data': ensure => present, system => true }
+ package { 'aufs-tools': ensure => latest, provider => 'apt' }
+
+ file { '/usr/local/bin/occ':
+ ensure => present,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/application/occ.erb'),
+ mode => '0740',
+ }
+ file { '/etc/sudoers.d/99-occ':
+ ensure => file,
+ content => "script ALL=(root) NOPASSWD: /usr/local/bin/occ\n",
+ mode => '0440',
+ owner => 'root',
+ group => 'root',
+ }
+ file { '/usr/local/bin/upgrade23-25.sh':
+ ensure => present,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/application/upgrade23-25.erb.sh'),
+ mode => '0744',
+ }
+ file { '/opt/rotate/conf.d/nextcloud.conf':
+ ensure => file,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => "#This file is managed by puppet\n#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n",
+ mode => '0644',
+ }
+ file { '/opt/rotate/conf.d/redis.conf':
+ ensure => file,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => "#This file is managed by puppet
+#filename:retention days:maxsize mb\n/opt/redis/server/server.log:180:256\n/opt/redis/sentinel/sentinel.log:180:256\n",
+ mode => '0644',
+ }
+ file { '/opt/nextcloud/000-default.conf':
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/000-default.conf.erb'),
+ mode => '0644',
+ }
+ file { '/opt/nextcloud/mpm_prefork.conf':
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/mpm_prefork.conf.erb'),
+ mode => '0644',
+ }
+ file { '/opt/nextcloud/404.html':
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/404.html.erb'),
+ mode => '0644',
+ }
+ file { '/opt/nextcloud/config.php':
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/config.php.erb'),
+ mode => '0644',
+ }
+ file { '/opt/nextcloud/nextcloud.log':
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ mode => '0644',
+ }
+ file { '/opt/nextcloud/rclone.conf':
+ ensure => file,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/rclone.conf.erb'),
+ mode => '0644',
+ }
+ file { '/opt/nextcloud/apache.php.ini':
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/apache.php.ini.erb'),
+ mode => '0644',
+ }
+
+ file { '/opt/nextcloud/apcu.ini':
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/apcu.ini.erb'),
+ mode => '0644',
+ }
+
+ file { '/opt/nextcloud/cli.php.ini':
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/cli.php.ini.erb'),
+ mode => '0644',
+ }
+ file { '/usr/local/bin/migrate_external_mounts':
+ ensure => file,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/application/migrate_external_mounts.erb'),
+ mode => '0744',
+ }
+ file { '/opt/nextcloud/complete_reinstall.sh':
+ ensure => file,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/application/complete_reinstall.erb.sh'),
+ mode => '0744',
+ }
+ file { '/etc/sudoers.d/99-run-cosmos':
+ ensure => file,
+ content => "script ALL=(root) NOPASSWD: /usr/local/bin/run-cosmos\n",
+ mode => '0440',
+ owner => 'root',
+ group => 'root',
+ }
+ file { '/usr/local/bin/redis-cli':
+ ensure => present,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/application/redis-cli.erb'),
+ mode => '0740',
+ }
+ file { '/etc/sudoers.d/99-redis-cli':
+ ensure => file,
+ content => "script ALL=(root) NOPASSWD: /usr/local/bin/redis-cli\n",
+ mode => '0440',
+ owner => 'root',
+ group => 'root',
+ }
+ file { '/usr/local/bin/add_admin_user':
+ ensure => present,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/application/add_admin_user.erb'),
+ mode => '0744',
+ }
+ file { '/etc/sudoers.d/99-no_mysql_servers':
+ ensure => file,
+ content => "script ALL=(root) NOPASSWD: /home/script/bin/get_no_mysql_servers.sh\n",
+ mode => '0440',
+ owner => 'root',
+ group => 'root',
+ }
+ file { '/home/script/bin/get_no_mysql_servers.sh':
+ ensure => present,
+ force => true,
+ owner => 'script',
+ group => 'script',
+ content => template('sunetdrive/application/get_no_mysql_servers.erb.sh'),
+ mode => '0744',
+ }
+ }
+ if $location =~ /^gss-test/ {
+ file { '/opt/nextcloud/mappingfile.json':
+ ensure => present,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/mappingfile-test.json.erb'),
+ mode => '0644',
+ }
+ } elsif $location =~ /^gss/ {
+ file { '/opt/nextcloud/mappingfile.json':
+ ensure => present,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/mappingfile-prod.json.erb'),
+ mode => '0644',
+ }
+ } elsif $location =~ /^kau/ {
+ file { '/mnt':
+ ensure => directory,
+ owner => 'www-data',
+ group => 'www-data',
+ mode => '0755',
+ }
+
+ }
+ if $skeletondirectory {
+ file { '/opt/nextcloud/skeleton':
+ ensure => directory,
+ owner => 'www-data',
+ group => 'www-data',
+ mode => '0755',
+ }
+ }
+ if $customer == 'mdu' {
+ file { '/opt/nextcloud/skeleton/README.md':
+ ensure => present,
+ require => File['/opt/nextcloud/skeleton'],
+ owner => 'www-data',
+ group => 'www-data',
+ content => template('sunetdrive/application/MDU-README.md.erb'),
+ mode => '0644',
+ }
+ }
+ if $is_multinode {
+ $compose = $override_compose
+ } else {
+ $compose = sunet::docker_compose { 'drive_application_docker_compose':
+ content => template('sunetdrive/application/docker-compose_nextcloud.yml.erb'),
+ service_name => 'nextcloud',
+ compose_dir => '/opt/',
+ compose_filename => 'docker-compose.yml',
+ description => 'Nextcloud application',
+ }
+ sunet::misc::ufw_allow { 'https':
+ from => '0.0.0.0/0',
+ port => 443,
+ }
+ }
+
+}
diff --git a/manifests/application.pp b/manifests/application.pp
new file mode 100644
index 0000000..4d7b55c
--- /dev/null
+++ b/manifests/application.pp
@@ -0,0 +1,13 @@
+# A Class using the app resurce
+class sunetdrive::application (
+ $bootstrap = undef,
+ $location = undef,
+ $override_config = undef,
+ $override_compose = undef
+) {
+
+ $app = sunetdrive::app_type { 'base_app':
+ bootstrap => $bootstrap,
+ location => $location,
+ }
+}
diff --git a/manifests/cache.pp b/manifests/cache.pp
new file mode 100644
index 0000000..ddb3426
--- /dev/null
+++ b/manifests/cache.pp
@@ -0,0 +1,13 @@
+#Class for SUNET-Drive-Cache
+class sunetdrive::cache (
+ $bootstrap = undef,
+ $location = undef,
+ $override_config = undef,
+ $override_compose = undef,
+ $override_redis_conf = undef
+) {
+ $cache = sunetdrive::cache_type { 'base_cache':
+ bootstrap => $bootstrap,
+ location => $location,
+ }
+}
diff --git a/manifests/cache_type.pp b/manifests/cache_type.pp
new file mode 100644
index 0000000..a38749f
--- /dev/null
+++ b/manifests/cache_type.pp
@@ -0,0 +1,74 @@
+#Resourcetype for SUNET-Drive-Cache
+define sunetdrive::cache_type (
+ $bootstrap = undef,
+ $location = undef,
+ $override_config = undef,
+ $override_compose = undef,
+ $override_redis_conf = undef
+) {
+ $environment = sunetdrive::get_environment()
+ $is_multinode = (($override_config != undef) and ($override_compose != undef) and ($override_redis_conf != undef))
+ # Now we get the corresponding config from group.yaml
+ if $is_multinode {
+ $config = $override_config
+ } else {
+ $config = hiera_hash($environment)
+ $nextcloud_ip = $config['app']
+
+ # Pick out the first host to be redis leader
+ $leader_address = $nextcloud_ip[0]
+
+ }
+ #Static variable defined here
+ $leader_name = 'cache1'
+
+ if $is_multinode {
+ $redis_config = $override_redis_conf
+ } else {
+ $redis_host_password = safe_hiera('redis_host_password')
+ $replica_of = hiera('replica_of')
+ $announce_address = hiera('announce_address')
+
+ file { '/opt/redis/server':
+ ensure => directory,
+ recurse => true,
+ }
+ $redis_config = file { '/opt/redis/server/server.conf':
+ ensure => present,
+ content => template('sunetdrive/cache/server.conf.erb'),
+ mode => '0666',
+ require => File['/opt/redis/server'],
+ }
+ file { '/opt/redis/sentinel':
+ ensure => directory,
+ recurse => true,
+ }
+ file { '/opt/redis/sentinel/sentinel.conf':
+ ensure => present,
+ content => template('sunetdrive/cache/sentinel.conf.erb'),
+ mode => '0666',
+ require => File['/opt/redis/sentinel'],
+ }
+ sunet::misc::ufw_allow { 'redis_server_port':
+ from => '0.0.0.0/0',
+ port => 6379,
+ }
+ sunet::misc::ufw_allow { 'redis_sentinel_port':
+ from => '0.0.0.0/0',
+ port => 26379,
+ }
+ }
+ if $is_multinode {
+ $compose = $override_compose
+ } else {
+ $compose = sunet::docker_compose { 'drive_redis_docker_compose':
+ content => template('sunetdrive/cache/docker-compose_cache.yml.erb'),
+ service_name => 'redis',
+ compose_dir => '/opt/',
+ compose_filename => 'docker-compose.yml',
+ description => 'Redis cache cluster',
+ }
+ }
+
+}
+
diff --git a/manifests/cloudimage.pp b/manifests/cloudimage.pp
new file mode 100644
index 0000000..aa9cada
--- /dev/null
+++ b/manifests/cloudimage.pp
@@ -0,0 +1,50 @@
+# Wrapper with thiss common settings for sunet::cloudimage
+define sunetdrive::cloudimage(
+ String $mac,
+ String $cpus = '1',
+ String $memory = '1024',
+ String $description = undef,
+ Optional[String] $ip = undef,
+ Optional[String] $netmask = undef,
+ Optional[String] $gateway = undef,
+ Optional[String] $ip6 = undef,
+ Optional[String] $netmask6 = '64',
+ Optional[String] $gateway6 = undef,
+ Array[String] $search = ['komreg.net'],
+ String $bridge = 'br0',
+ String $size = '40G',
+ String $local_size = '0',
+ String $image_url = 'https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img',
+) {
+ # This is a hack, use SSH keys from KVM host?
+ $_ssh_key = hiera('ssh_authorized_keys')['berra+96E0A9D4']
+ $cloudimage_ssh_keys = [sprintf('%s %s %s', $_ssh_key['type'], $_ssh_key['key'], $_ssh_key['name'])]
+
+ sunet::cloudimage { $name:
+ image_url => $image_url,
+ ssh_keys => $cloudimage_ssh_keys,
+ apt_dir => '/etc/cosmos/apt',
+ disable_ec2 => true,
+ #
+ bridge => $bridge,
+ dhcp => false,
+ mac => $mac,
+ ip => $ip,
+ netmask => $netmask,
+ gateway => $gateway,
+ ip6 => $ip6,
+ netmask6 => $netmask6,
+ gateway6 => $gateway6,
+ resolver => ['130.242.80.14', '130.242.80.99'],
+ search => $search,
+ #
+ repo => $::cosmos_repo_origin_url,
+ tagpattern => $::cosmos_tag_pattern,
+ #
+ cpus => $cpus,
+ memory => $memory,
+ description => $description,
+ size => $size,
+ local_size => $local_size,
+ }
+}
diff --git a/manifests/db_type.pp b/manifests/db_type.pp
new file mode 100644
index 0000000..a819c28
--- /dev/null
+++ b/manifests/db_type.pp
@@ -0,0 +1,122 @@
+# Mariadb cluster class for SUNET Drive
+define sunetdrive::db_type(
+ $tag_mariadb=undef,
+ $bootstrap=undef,
+ $location=undef,
+ $override_config = undef,
+ $override_compose = undef)
+{
+
+ # Config from group.yaml
+ $environment = sunetdrive::get_environment()
+ $mariadb_version = hiera("mariadb_version_${environment}")
+ $is_multinode = (($override_config != undef) and ($override_compose != undef))
+ if $is_multinode {
+ $config = $override_config
+ $mysql_root_password = $config['mysql_root_password']
+ $mysql_user_password = $config['mysql_user_password']
+ $backup_password = $config['backup_password']
+ $mariadb_dir = $config['mariadb_dir']
+ $mycnf_path = $config['mycnf_path']
+ $server_id = '1000'
+ } else {
+ $config = hiera_hash($environment)
+ $mysql_root_password = safe_hiera('mysql_root_password')
+ $backup_password = safe_hiera('backup_password')
+ $proxysql_password = safe_hiera('proxysql_password')
+ $mysql_user_password = safe_hiera('mysql_user_password')
+ $mariadb_dir = '/etc/mariadb'
+ $mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
+ $server_id = 1000 + Integer($facts['hostname'][-1])
+ ensure_resource('file',"${mariadb_dir}", { ensure => directory, recurse => true } )
+ $dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
+ $dirs.each |$dir| {
+ ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
+ }
+ }
+
+ $nextcloud_ip = $config['app']
+
+ unless $is_multinode {
+ $db_ip = $config['db']
+ $db_ipv6 = $config['db_v6']
+ $backup_ip = $config['backup']
+ $backup_ipv6 = $config['backup_v6']
+ $ports = [3306, 4444, 4567, 4568]
+
+ sunet::misc::ufw_allow { 'mariadb_ports':
+ from => $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6,
+ port => $ports,
+ }
+ sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
+ }
+
+
+ if $location =~ /^lookup/ {
+ $sql_files = ['02-backup_user.sql', '03-proxysql.sql', '05-lookup.sql']
+ } else {
+ $sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql']
+ }
+ $sql_files.each |$sql_file|{
+ file { "${mariadb_dir}/init/${sql_file}":
+ ensure => present,
+ content => template("sunetdrive/mariadb/${sql_file}.erb"),
+ mode => '0744',
+ }
+ }
+ file { "${mariadb_dir}/conf/credentials.cnf":
+ ensure => present,
+ content => template('sunetdrive/mariadb/credentials.cnf.erb'),
+ mode => '0744',
+ }
+ file { "${mariadb_dir}/conf/my.cnf":
+ ensure => present,
+ content => template($mycnf_path),
+ mode => '0744',
+ }
+ file { "${mariadb_dir}/scripts/run_manual_backup_dump.sh":
+ ensure => present,
+ content => template('sunetdrive/mariadb/run_manual_backup_dump.erb.sh'),
+ mode => '0744',
+ }
+ file { "${mariadb_dir}/scripts/rename-docker.sh":
+ ensure => present,
+ content => template('sunetdrive/mariadb/rename-docker.sh'),
+ mode => '0744',
+ }
+ if $is_multinode {
+ $docker_compose = $override_compose
+ } else {
+ file { '/usr/local/bin/size-test':
+ ensure => present,
+ content => template('sunetdrive/mariadb/size-test.erb'),
+ mode => '0744',
+ }
+ file { '/usr/local/bin/status-test':
+ ensure => present,
+ content => template('sunetdrive/mariadb/status-test.erb'),
+ mode => '0744',
+ }
+ file { "/etc/sudoers.d/99-size-test":
+ ensure => file,
+ content => "script ALL=(root) NOPASSWD: /usr/local/bin/size-test\n",
+ mode => '0440',
+ owner => 'root',
+ group => 'root',
+ }
+ file { "/etc/sudoers.d/99-status-test":
+ ensure => file,
+ content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n",
+ mode => '0440',
+ owner => 'root',
+ group => 'root',
+ }
+ $docker_compose = sunet::docker_compose { 'drive_mariadb_docker_compose':
+ content => template('sunetdrive/mariadb/docker-compose_mariadb.yml.erb'),
+ service_name => 'mariadb',
+ compose_dir => '/opt/',
+ compose_filename => 'docker-compose.yml',
+ description => 'Mariadb server',
+ }
+ }
+}
diff --git a/manifests/dockerhost.pp b/manifests/dockerhost.pp
new file mode 100644
index 0000000..2060daa
--- /dev/null
+++ b/manifests/dockerhost.pp
@@ -0,0 +1,36 @@
+include apt
+# Wrapper for sunet::dockerhost to do thiss specific things
+class sunetdrive::dockerhost(
+ String $version = safe_hiera('docker_version'),
+ String $package_name = hiera('docker_package_name', 'docker-ce'),
+ Enum['stable', 'edge', 'test'] $docker_repo = hiera('docker_repo', 'stable'),
+ String $compose_version = safe_hiera('docker_compose_version'),
+ String $docker_args = '',
+ Optional[String] $docker_dns = undef,
+ String $storage_driver = 'aufs',
+) {
+ if $version == 'NOT_SET_IN_HIERA' {
+ fail('Docker version not set in Hiera')
+ }
+ if $compose_version == 'NOT_SET_IN_HIERA' {
+ fail('Docker-compose version not set in Hiera')
+ }
+ class { 'sunet::dockerhost':
+ docker_version => $version,
+ docker_package_name => $package_name,
+ docker_repo => $docker_repo,
+ run_docker_cleanup => true,
+ manage_dockerhost_unbound => true,
+ docker_extra_parameters => $docker_args,
+ docker_dns => $docker_dns,
+ storage_driver => $storage_driver,
+ docker_network => true, # let docker choose a network for the 'docker' bridge
+ compose_version => $compose_version,
+ }
+ file { '/etc/apt/preferences.d/containerd.io.pref':
+ ensure => absent,
+ }
+ -> package { 'containerd.io':
+ ensure => 'held',
+ }
+}
diff --git a/manifests/infra_script.pp b/manifests/infra_script.pp
new file mode 100644
index 0000000..fac5198
--- /dev/null
+++ b/manifests/infra_script.pp
@@ -0,0 +1,89 @@
+#Class for SUNET-Drive-Script
+class sunetdrive::infra_script (
+ $bootstrap = undef,
+ $location = undef
+) {
+ $environment = sunetdrive::get_environment()
+ $customer = "common"
+ $config = hiera_hash($environment)
+ $gss_backup_server = $config['gss_backup_server']
+ $lookup_backup_server = $config['lookup_backup_server']
+ $ssh_config = "Host *.sunet.se
+ User script
+ IdentityFile /root/.ssh/id_script"
+ $site_name = $config['site_name']
+ package { 'python3-pip':
+ ensure => installed,
+ provider => apt,
+ }
+ package { 'drive-utils':
+ ensure => installed,
+ provider => pip3,
+ source => 'https://pypi.sunet.se/packages/drive-utils-0.1.3.tar.gz',
+ require => Package['python3-pip'],
+ }
+ file { '/root/.ssh/':
+ ensure => directory,
+ mode => '0700',
+ }
+ file { '/root/tasks/':
+ ensure => directory,
+ mode => '0700',
+ }
+ file { '/root/.ssh/id_script':
+ ensure => file,
+ content => safe_hiera('ssh_priv_key'),
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ }
+ file { '/root/.ssh/config':
+ ensure => file,
+ content => $ssh_config,
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ }
+ file { '/root/tasks/backupdb.sh':
+ ensure => file,
+ content => template('sunetdrive/script/backupdb.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/maintenance.sh':
+ ensure => file,
+ content => template('sunetdrive/script/maintenance.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ sunet::scriptherder::cronjob { 'backupgssdb':
+ cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
+ hour => '2',
+ minute => '0',
+ ok_criteria => ['exit_status=0','max_age=2d'],
+ warn_criteria => ['exit_status=1','max_age=3d'],
+ }
+ sunet::scriptherder::cronjob { 'backuplookupdb':
+ cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
+ hour => '2',
+ minute => '0',
+ ok_criteria => ['exit_status=0','max_age=2d'],
+ warn_criteria => ['exit_status=1','max_age=3d'],
+ }
+ # sunet::scriptherder::cronjob { 'scriptherder_daily':
+ # cmd => '/bin/true',
+ # special => 'daily',
+ # ok_criteria => ['exit_status=0','max_age=4d'],
+ # warn_criteria => ['exit_status=1','max_age=8d'],
+ # }
+ # cron { 'example_job':
+ # ensure => 'present',
+ # command => '/bin/true',
+ # hour => ['0'],
+ # target => 'root',
+ # user => 'root',
+ # }
+}
+
diff --git a/manifests/lb.pp b/manifests/lb.pp
new file mode 100644
index 0000000..7116a19
--- /dev/null
+++ b/manifests/lb.pp
@@ -0,0 +1,47 @@
+class sunetdrive::lb($location=undef) {
+ $nodenumber = $::fqdn[2,1]
+
+ sunet::nagios::nrpe_command {'check_exabgp_announce':
+ command_line => '/usr/lib/nagios/plugins/check_exabgp_announce -w 1 -c 10',
+ require => File['/usr/lib/nagios/plugins/check_exabgp_announce'],
+ }
+ sunet::nagios::nrpe_command {'check_sarimner':
+ command_line => '/usr/lib/nagios/plugins/check_sarimner',
+ require => File['/usr/lib/nagios/plugins/check_sarimner'],
+ }
+
+ file { '/etc/sudoers.d/99-docker-logs':
+ ensure => file,
+ content => "nagios ALL=(root) NOPASSWD: /usr/bin/docker logs*\n",
+ mode => '0440',
+ owner => 'root',
+ group => 'root',
+ }
+ file { '/usr/lib/nagios/plugins/check_exabgp_announce':
+ ensure => 'file',
+ mode => '0755',
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/lb/check_exabgp_announce.erb'),
+ }
+ file { '/usr/lib/nagios/plugins/check_sarimner':
+ ensure => 'file',
+ mode => '0755',
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/lb/check_sarimner.erb'),
+ }
+ file { '/opt/frontend/errorfiles':
+ ensure => 'directory',
+ mode => '0755',
+ owner => 'root',
+ group => 'root',
+ }
+ -> file { '/opt/frontend/errorfiles/503.http':
+ ensure => 'file',
+ mode => '0644',
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/lb/503.http.erb'),
+ }
+}
diff --git a/manifests/lookup.pp b/manifests/lookup.pp
new file mode 100644
index 0000000..ccd7b43
--- /dev/null
+++ b/manifests/lookup.pp
@@ -0,0 +1,42 @@
+#Class for SUNET-Drive-Lookup-Server
+class sunetdrive::lookup (
+ $bootstrap = undef,
+ $location = undef
+) {
+
+ $environment = sunetdrive::get_environment()
+
+
+ # Firewall settings
+ $nextcloud_ip = hiera_array("${location}_app", [])
+ $tug_office = hiera_array('tug_office')
+
+ $dbhost = 'proxysql_proxysql_1'
+ $gss_jwt_key = safe_hiera('gss_jwt_key')
+ $mysql_user_password = safe_hiera('mysql_user_password')
+ $lookup_version = hiera("lookup_version_${environment}")
+
+ #Create users
+ user { 'www-data': ensure => present, system => true }
+
+ file { '/opt/lookup/config.php':
+ ensure => present,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/lookup/config.php.erb'),
+ mode => '0644',
+ }
+
+ sunet::docker_compose { 'drive_lookup_docker_compose':
+ content => template('sunetdrive/lookup/docker-compose_lookup.yml.erb'),
+ service_name => 'lookup',
+ compose_dir => '/opt/',
+ compose_filename => 'docker-compose.yml',
+ description => 'Lookup server',
+ }
+
+ sunet::misc::ufw_allow { 'https':
+ from => '0.0.0.0/0',
+ port => 443,
+ }
+}
diff --git a/manifests/mariadb.pp b/manifests/mariadb.pp
new file mode 100644
index 0000000..81f1cf2
--- /dev/null
+++ b/manifests/mariadb.pp
@@ -0,0 +1,27 @@
+# A Class using the db resurce
+class sunetdrive::mariadb (
+ $bootstrap = undef,
+ $location = undef,
+ $tag_mariadb = undef,
+ $override_config = undef,
+ $override_compose = undef
+) {
+
+ $quorum_id = $::fqdn
+ $quorum_password = safe_hiera('quorum_password')
+ $db = sunetdrive::db_type { 'base_db':
+ bootstrap => $bootstrap,
+ tag_mariadb => $tag_mariadb,
+ location => $location,
+ }
+ file { '/etc/quorum.conf':
+ ensure => file,
+ mode => '0644',
+ content => template('sunetdrive/mariadb/quorum.conf.erb'),
+ }
+ file { '/usr/local/bin/quorum':
+ ensure => file,
+ mode => '0700',
+ content => template('sunetdrive/mariadb/quorum.erb.sh'),
+ }
+}
diff --git a/manifests/mariadb_backup.pp b/manifests/mariadb_backup.pp
new file mode 100644
index 0000000..2a9dd6c
--- /dev/null
+++ b/manifests/mariadb_backup.pp
@@ -0,0 +1,126 @@
+# This is a asyncronous replica of the Maria DB Cluster for SUNET Drive
+class sunetdrive::mariadb_backup($tag_mariadb=undef, $location=undef) {
+ $dirs = [ 'datadir', 'init', 'conf', 'backups' ]
+ $dirs.each | $dir | {
+ ensure_resource('file',"/opt/mariadb_backup/${dir}", { ensure => directory, recurse => true } )
+ }
+ # Config from group.yaml
+ $environment = sunetdrive::get_environment()
+ $config = hiera_hash($environment)
+ $first_db = $config['first_db']
+
+ # Secrets from local.eyaml
+ $mysql_root_password = safe_hiera('mysql_root_password')
+ $backup_password = safe_hiera('backup_password')
+ $mysql_user_password = safe_hiera('mysql_user_password')
+ $statistics_secret = safe_hiera('statistics_secret')
+
+ sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
+
+ $sql_files = ['02-backup_user.sql']
+ $sql_files.each |$sql_file|{
+ file { "/opt/mariadb_backup/init/${sql_file}":
+ ensure => present,
+ content => template("sunetdrive/mariadb_backup/${sql_file}.erb"),
+ mode => '0744',
+ }
+ }
+ $conf_files = ['credentials.cnf', 'my.cnf']
+ $conf_files.each |$conf_file|{
+ file { "/opt/mariadb_backup/conf/${conf_file}":
+ ensure => present,
+ content => template("sunetdrive/mariadb_backup/${conf_file}.erb"),
+ mode => '0744',
+ }
+ }
+ file { '/opt/mariadb_backup/start_replica_from_init.sh':
+ ensure => present,
+ content => template('sunetdrive/mariadb_backup/start_replica_from_init.erb.sh'),
+ mode => '0744',
+ }
+ file { '/opt/mariadb_backup/do_backup.sh':
+ ensure => present,
+ content => template('sunetdrive/mariadb_backup/do_backup.erb.sh'),
+ mode => '0744',
+ }
+ file { '/opt/mariadb_backup/check_replication.sh':
+ ensure => absent,
+ }
+ file { '/etc/sudoers.d/99-check_replication':
+ ensure => absent,
+ }
+ file { '/usr/local/bin/check_replication':
+ ensure => present,
+ content => template('sunetdrive/mariadb_backup/check_replication.erb'),
+ mode => '0744',
+ }
+ file { '/usr/local/bin/status-test':
+ ensure => present,
+ content => template('sunetdrive/mariadb_backup/status-test.erb'),
+ mode => '0744',
+ }
+ file { '/etc/sudoers.d/99-status-test':
+ ensure => file,
+ content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n",
+ mode => '0440',
+ owner => 'root',
+ group => 'root',
+ }
+ sunet::docker_compose { 'mariadb_backup':
+ content => template('sunetdrive/mariadb_backup/docker-compose_mariadb_backup.yml.erb'),
+ service_name => 'mariadb_backup',
+ compose_dir => '/opt/',
+ compose_filename => 'docker-compose.yml',
+ description => 'Mariadb replica',
+ }
+
+ # Rclone stuff
+ $rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
+ $local_path = '/tmp/rclone-current-linux-amd64.deb'
+ exec { 'rclone_deb':
+ command => "/usr/bin/wget -q ${rclone_url} -O ${local_path}",
+ creates => $local_path,
+ }
+ package { 'rclone':
+ ensure => installed,
+ provider => dpkg,
+ source => $local_path,
+ require => Exec['rclone_deb'],
+ }
+
+ file { '/root/.rclone.conf':
+ ensure => file,
+ content => template('sunetdrive/mariadb_backup/rclone.conf.erb'),
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ }
+ file { '/opt/mariadb_backup/listusers.sh':
+ ensure => file,
+ content => template('sunetdrive/mariadb_backup/listusers.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/opt/mariadb_backup/find_disabled_sharing.sh':
+ ensure => file,
+ content => template('sunetdrive/mariadb_backup/find_disabled_sharing.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ sunet::scriptherder::cronjob { 'listusers':
+ cmd => '/opt/mariadb_backup/listusers.sh',
+ minute => '*/5',
+ ok_criteria => ['exit_status=0','max_age=30m'],
+ warn_criteria => ['exit_status=1', 'max_age=60m'],
+ }
+ sunet::scriptherder::cronjob { 'disabledsharing':
+ cmd => '/opt/mariadb_backup/find_disabled_sharing.sh',
+ minute => '5',
+ hour => '3',
+ ok_criteria => ['exit_status=0','max_age=2d'],
+ warn_criteria => ['exit_status=1','max_age=3d'],
+ }
+
+}
diff --git a/manifests/multinode.pp b/manifests/multinode.pp
new file mode 100644
index 0000000..9ee5c78
--- /dev/null
+++ b/manifests/multinode.pp
@@ -0,0 +1,364 @@
+# This class uses all the other classes to create a multinode server
+class sunetdrive::multinode (
+ $bootstrap = undef,
+ $location = undef
+)
+{
+ $myname = $facts['hostname']
+ $is_multinode = true;
+ $environment = sunetdrive::get_environment()
+ $lb_servers = hiera_hash($environment)['lb_servers']
+ $document_servers = hiera_hash($environment)['document_servers']
+
+ $twofactor_enforced_groups = []
+ $twofactor_enforced_excluded_groups = []
+ $allcustomers = hiera_hash('multinode_mapping')
+ $allnames = $allcustomers.keys
+ $tempcustomers = $allnames.map | $index, $potential | {
+ if $myname =~ $allcustomers[$potential]['server'] {
+ $potential
+ }
+ else {
+ nil
+ }
+ }
+ $nodenumber = $::fqdn[9,1]
+ $customers = $tempcustomers - nil
+ user { 'www-data': ensure => present, system => true }
+ sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
+ ensure_resource('file', '/opt/nextcloud' , { ensure => directory, recurse => true } )
+ file { '/usr/local/bin/occ':
+ ensure => present,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/application/occ.erb'),
+ mode => '0740',
+ }
+ file { '/etc/sudoers.d/99-occ':
+ ensure => file,
+ content => "script ALL=(root) NOPASSWD: /usr/local/bin/occ\n",
+ mode => '0440',
+ owner => 'root',
+ group => 'root',
+ }
+ file { '/usr/local/bin/upgrade23-25.sh':
+ ensure => present,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/multinode/upgrade23-25.erb.sh'),
+ mode => '0744',
+ }
+ file { '/usr/local/bin/get_containers':
+ ensure => present,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/multinode/get_containers'),
+ mode => '0744',
+ }
+ file { '/usr/local/bin/add_admin_user':
+ ensure => present,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/application/add_admin_user.erb'),
+ mode => '0744',
+ }
+ file { '/opt/nextcloud/prune.sh':
+ ensure => file,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/multinode/prune.erb.sh'),
+ mode => '0744',
+ }
+ cron { 'multinode_prune':
+ command => '/opt/nextcloud/prune.sh',
+ require => File['/opt/nextcloud/prune.sh'],
+ user => 'root',
+ minute => '25',
+ hour => '3',
+ }
+ file { '/opt/nextcloud/apache.php.ini':
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/apache.php.ini.erb'),
+ mode => '0644',
+ }
+
+ file { '/opt/nextcloud/apcu.ini':
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/apcu.ini.erb'),
+ mode => '0644',
+ }
+
+ file { '/opt/nextcloud/cli.php.ini':
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/cli.php.ini.erb'),
+ mode => '0644',
+ }
+
+ file { '/opt/nextcloud/cron.sh':
+ ensure => file,
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ content => template('sunetdrive/application/cron.erb.sh'),
+ }
+
+ file { '/opt/nextcloud/000-default.conf':
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/000-default.conf.erb'),
+ mode => '0644',
+ }
+
+ file { '/opt/nextcloud/404.html':
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/404.html.erb'),
+ mode => '0644',
+ }
+ $link_content = '[Match]
+Driver=bridge veth
+
+[Link]
+MACAddressPolicy=none'
+ file { '/etc/systemd/network/98-default.link':
+ ensure => file,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => $link_content,
+ mode => '0744',
+ }
+ file { '/opt/nextcloud/compress-logs.sh':
+ ensure => file,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/multinode/compress-logs.erb.sh'),
+ mode => '0744',
+ }
+ cron { 'multinode_compress_logs':
+ command => '/opt/nextcloud/compress-logs.sh',
+ require => File['/opt/nextcloud/compress-logs.sh'],
+ user => 'root',
+ minute => '10',
+ hour => '0',
+ weekday => '0',
+ }
+ cron { 'multinode_cron':
+ command => '/opt/nextcloud/cron.sh',
+ require => File['/opt/nextcloud/cron.sh'],
+ user => 'root',
+ minute => '*/10',
+ }
+ $customers.each | $index, $customer | {
+ if $environment == 'prod' {
+ $s3_bucket = "primary-${customer}-drive.sunet.se"
+ $site_name = "${customer}.drive.sunet.se"
+ $trusted_proxies = ['lb1.drive.sunet.se','lb2.drive.sunet.se', 'lb3.drive.sunet.se', 'lb4.drive.sunet.se']
+ } else {
+ $s3_bucket = "primary-${customer}-${environment}.sunet.se"
+ $site_name = "${customer}.drive.${environment}.sunet.se"
+ $trusted_proxies = ["lb1.drive.${environment}.sunet.se","lb2.drive.${environment}.sunet.se",
+ "lb3.drive.${environment}.sunet.se","lb4.drive.${environment}.sunet.se"]
+ }
+ $apache_default_path = "/opt/multinode/${customer}/000-default.conf"
+ $apache_error_path = "/opt/multinode/${customer}/404.html"
+ $config_php_path = "/opt/multinode/${customer}/config.php"
+ $cron_log_path ="/opt/multinode/${customer}/cron.log"
+ $customer_config_full = hiera_hash($customer)
+ $customer_config = $customer_config_full[$environment]
+ $dbhost = "mariadb${customer}_db_1"
+
+
+
+ $gs_enabled = hiera('gs_enabled')
+ $gs_federation = hiera('gs_federation')
+ $gss_master_admin = hiera_array('gss_master_admin')
+ $gss_master_url = hiera("gss_master_url_${environment}")
+ $https_port = hiera_hash('multinode_mapping')[$customer]['port']
+ $lookup_server = hiera("lookup_server_${environment}")
+ $mail_domain = hiera("mail_domain_${environment}")
+ $mail_from_address = hiera("mail_from_address_${environment}")
+ $mail_smtphost = hiera("mail_smtphost_${environment}")
+ $nextcloud_log_path ="/opt/multinode/${customer}/nextcloud.log"
+ $nextcloud_version = hiera("nextcloud_version_${environment}")
+ $nextcloud_version_string = split($nextcloud_version, '[-]')[0]
+ $rclone_conf_path = "/opt/multinode/${customer}/rclone.conf"
+ $redis_conf_dir = "/opt/multinode/${customer}/server"
+ $redis_conf_path = "${redis_conf_dir}/redis.conf"
+ $redis_host= "redis${customer}_redis-server_1"
+ $s3_host = $customer_config['s3_host']
+ $s3_usepath = hiera('s3_usepath')
+ $smtpuser = hiera("smtp_user_${environment}")
+ $trusted_domains = [$site_name, $facts['fqdn'], 'localhost']
+ $tug_office = hiera_array('tug_office')
+
+ # Secrets from local.eyaml
+ $admin_password = safe_hiera("${customer}_admin_password")
+ $instanceid = safe_hiera("${customer}_instanceid")
+ $mysql_root_password = safe_hiera("${customer}_mysql_root_password")
+ $backup_password = safe_hiera("${customer}_backup_password")
+ $mysql_user_password = safe_hiera("${customer}_mysql_user_password")
+ $s3_key = safe_hiera("${customer}_s3_key")
+ $s3_secret = safe_hiera("${customer}_s3_secret")
+ $secret = safe_hiera("${customer}_secret")
+ $passwordsalt= safe_hiera("${customer}_passwordsalt")
+ $redis_host_password = safe_hiera("${customer}_redis_host_password")
+ $gss_jwt_key = safe_hiera('gss_jwt_key')
+ $smtppassword = safe_hiera('smtp_password')
+
+ $extra_config = {
+ admin_password => $admin_password,
+ backup_password => $backup_password,
+ dbhost => $dbhost,
+ drive_email_template_plain_text_left => hiera($environment)['drive_email_template_plain_text_left'],
+ drive_email_template_text_left => hiera($environment)['drive_email_template_text_left'],
+ drive_email_template_url_left => hiera($environment)['drive_email_template_url_left'],
+ mariadb_dir => "/opt/multinode/${customer}/mariadb-${customer}",
+ mycnf_path => 'sunetdrive/multinode/my.cnf.erb',
+ mysql_root_password => $mysql_root_password,
+ mysql_user_password => $mysql_user_password,
+ trusted_domains => $trusted_domains,
+ trusted_proxies => $trusted_proxies,
+ }
+ $config = deep_merge($customer_config, $extra_config)
+ ensure_resource('file', "/opt/multinode/${customer}" , { ensure => directory, recurse => true } )
+ $dirs = ['datadir', 'init', 'conf', 'scripts' ]
+ $dirs.each |$dir| {
+ ensure_resource('file',"${config['mariadb_dir']}/${dir}", { ensure => directory, recurse => true } )
+ }
+
+ ensure_resource('file',"${config['mariadb_dir']}/backups", {
+ ensure => directory,
+ owner => 'root',
+ group => 'script',
+ mode => '0750',
+ recurse => true
+ } )
+ # Use the other sunetdrive classes with overridden config
+ $db_ip = ['127.0.0.1']
+ $app_compose = sunet::docker_compose { "drive_${customer}_app_docker_compose":
+ content => template('sunetdrive/multinode/docker-compose_nextcloud.yml.erb'),
+ service_name => "nextcloud-${customer}",
+ compose_dir => "/opt/multinode/${customer}",
+ compose_filename => 'docker-compose.yml',
+ description => "Nextcloud application for ${customer}",
+ require => File[$config_php_path,
+ '/opt/nextcloud/apache.php.ini',
+ '/opt/nextcloud/cli.php.ini',
+ "/opt/multinode/${customer}/complete_reinstall.sh",
+ ],
+ }
+ $cache_compose = sunet::docker_compose { "drive_${customer}_redis_docker_compose":
+ content => template('sunetdrive/multinode/docker-compose_cache.yml.erb'),
+ service_name => "redis-${customer}",
+ compose_dir => "/opt/multinode/${customer}",
+ compose_filename => 'docker-compose.yml',
+ description => "Redis cache server for ${customer}",
+ require => File[$redis_conf_path],
+ }
+ $mariadb_compose = sunet::docker_compose { "drive_mariadb_${customer}_compose":
+ content => template('sunetdrive/multinode/docker-compose_mariadb.yml.erb'),
+ service_name => "mariadb-${customer}",
+ compose_dir => "/opt/multinode/${customer}",
+ compose_filename => 'docker-compose.yml',
+ description => "Mariadb server for ${customer}",
+ owner => 'root',
+ group => 'script',
+ mode => '0750',
+ }
+
+ file { "/opt/multinode/${customer}/mariadb-${customer}/do_backup.sh":
+ ensure => present,
+ content => template('sunetdrive/mariadb_backup/do_backup.erb.sh'),
+ mode => '0744',
+ }
+ sunetdrive::app_type { "app_${customer}":
+ location => $location,
+ override_config => $config,
+ override_compose => $app_compose,
+ }
+
+ sunetdrive::db_type { "db_${customer}":
+ location => $location,
+ override_config => $config,
+ override_compose => $mariadb_compose,
+ }
+ file { $redis_conf_dir:
+ ensure => directory,
+ recurse => true,
+ }
+ $redis_config = file { $redis_conf_path:
+ ensure => present,
+ content => template('sunetdrive/multinode/redis.conf.erb'),
+ mode => '0666',
+ require => [ File[$redis_conf_dir]]
+ }
+ sunetdrive::cache_type { "cache_${customer}":
+ location => $location,
+ override_config => $config,
+ override_compose => $cache_compose,
+ override_redis_conf => $redis_config,
+ require => File[$redis_conf_path],
+ }
+ file { $config_php_path:
+ ensure => present,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/application/config.php.erb'),
+ mode => '0644',
+ }
+ file { $cron_log_path:
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ mode => '0644',
+ }
+ file { $nextcloud_log_path:
+ ensure => file,
+ force => true,
+ owner => 'www-data',
+ group => 'root',
+ mode => '0644',
+ }
+ file { $rclone_conf_path:
+ ensure => present,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/multinode/rclone.conf.erb'),
+ mode => '0644',
+ }
+ file { "/opt/multinode/${customer}/complete_reinstall.sh":
+ ensure => file,
+ force => true,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/multinode/complete_reinstall.erb.sh'),
+ mode => '0744',
+ }
+ # Open ports
+ sunet::misc::ufw_allow { "https_port_${customer}":
+ from => '0.0.0.0',
+ port => $https_port,
+ }
+ }
+}
diff --git a/manifests/nagios.pp b/manifests/nagios.pp
new file mode 100644
index 0000000..f3067da
--- /dev/null
+++ b/manifests/nagios.pp
@@ -0,0 +1,22 @@
+# Nagios health check
+define sunetdrive::nagios::check_mdq_health ($url=undef) {
+ $mdq_url = $url ? {
+ undef => $title,
+ default => $url
+ }
+ ensure_resource('file', '/usr/lib/nagios/plugins/check_mdq_health', {
+ ensure => 'file',
+ mode => '0555',
+ group => 'nagios',
+ require => Package['nagios-nrpe-server'],
+ content => template('thiss/mdq/check_mdq_health.erb'),
+ })
+ ensure_resource('nagioscfg::command','check_mdq_health', {
+ command_line => "/usr/lib/nagios/plugins/check_mdq_health '\$HOSTNAME\'"
+ })
+ nagioscfg::service {"check_mdq_health_${name}":
+ host_name => [$name],
+ check_command => 'check_mdq_health',
+ description => "Check MDQ health at ${name}"
+ }
+}
diff --git a/manifests/ni.pp b/manifests/ni.pp
new file mode 100644
index 0000000..d9fd4bb
--- /dev/null
+++ b/manifests/ni.pp
@@ -0,0 +1,74 @@
+# This is NI for SUNET Drive
+class sunetdrive::ni() {
+ if $environment == 'prod' {
+ $domain = 'ni.drive.sunet.se'
+ } else {
+ $domain = 'ni.drive.test.sunet.se'
+ }
+
+ file { '/opt/sri/postgresql':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/postgresql/data':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/neo4j':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/neo4j/data':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/ni':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/ni/etc':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/ni/log':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/backup':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/backup/neo4j':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/staticfiles':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/srifrontfiles':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/nginx':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/nginx/etc':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/nginx/log':
+ ensure => directory,
+ }
+ -> file { '/opt/sri/nginx/etc/ni.http':
+ ensure => present,
+ content => '';
+ }
+ -> file { '/opt/sri/nginx/etc/dhparams.pem':
+ ensure => present,
+ content => '';
+ }
+ -> file { '/opt/sri/ni/etc/dotenv':
+ ensure => present,
+ content => '';
+ }
+ -> file { '/opt/sri/postgresql/init/init-noclook-db.sh':
+ ensure => present,
+ content => '';
+ }
+ sunet::docker_compose { 'drive_ni_compose':
+ content => template('sunetdrive/ni/docker-compose.yml.erb'),
+ service_name => 'sri',
+ compose_dir => '/opt/',
+ compose_filename => 'docker-compose.yml',
+ description => 'NI',
+ }
+}
diff --git a/manifests/onlyoffice.pp b/manifests/onlyoffice.pp
new file mode 100644
index 0000000..6ffc009
--- /dev/null
+++ b/manifests/onlyoffice.pp
@@ -0,0 +1,23 @@
+#Class for SUNET-Drive-OnlyOffice
+class sunetdrive::onlyoffice () {
+ $environment = sunetdrive::get_environment()
+ $extra_hosts = hiera_hash($environment)['extra_hosts']
+ $docker_tag = hiera_hash($environment)['collabora_tag']
+ $customers = hiera('fullnodes')
+ $multinode_customers = keys(hiera_hash('multinode_mapping'))
+ if $environment == 'prod' {
+ $domain = 'drive.sunet.se'
+ } else {
+ $domain = 'drive.test.sunet.se'
+ }
+ sunet::collabora::docs { 'sunet-onlyoffice':
+ dns => [ '89.32.32.32' ],
+ extra_hosts => $extra_hosts,
+ extra_volumes => ['/opt/collabora/coolwsd.xml:/etc/coolwsd/coolwsd.xml'],
+ docker_tag => $docker_tag,
+ }
+ file {'/opt/collabora/coolwsd.xml':
+ ensure => present,
+ content => template('sunetdrive/document/coolwsd.xml.erb'),
+ }
+}
diff --git a/manifests/proxysql.pp b/manifests/proxysql.pp
new file mode 100644
index 0000000..38f370c
--- /dev/null
+++ b/manifests/proxysql.pp
@@ -0,0 +1,99 @@
+#Class for SUNET-Drive-Proxysql
+class sunetdrive::proxysql (
+ $bootstrap = undef,
+ $location = undef,
+ $proxysql_container_name = 'proxysql_proxysql_1',
+) {
+
+ # Config from group.yaml
+ $environment = sunetdrive::get_environment()
+ $config = hiera_hash($environment)
+ $db_ip = $config['db']
+ $nextcloud_ip = $config['app']
+ $proxysql_ok_num = length($nextcloud_ip)
+ $proxysql_warn_num = $proxysql_ok_num - 1
+
+ # Global config from common.yaml
+ $proxysql_version = hiera('proxysql_version')
+ $tug_office = hiera_array('tug_office')
+
+ # Config from local.yaml and local.eyaml
+ $admin_password = safe_hiera('admin_password')
+ $cluster_admin_password = safe_hiera('cluster_admin_password')
+ $monitor_password = safe_hiera('proxysql_password')
+ $mysql_user_password = safe_hiera('mysql_user_password')
+ $mysql_user = safe_hiera('mysql_user')
+
+ $transaction_persistent = 1
+
+ file { '/usr/local/bin/proxysql':
+ ensure => file,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/proxysql/proxysql.erb.sh'),
+ mode => '0755',
+ }
+ file { '/opt/proxysql/insert_server_in_proxysql.sh':
+ ensure => file,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/proxysql/insert_server_in_proxysql.erb.sh'),
+ mode => '0755',
+ }
+ file {'/usr/lib/nagios/plugins/check_proxysql_server':
+ ensure => 'file',
+ mode => '0755',
+ group => 'nagios',
+ require => Package['nagios-nrpe-server'],
+ content => template('sunetdrive/proxysql/check_proxysql_server.erb'),
+ }
+ file {'/usr/lib/nagios/plugins/check_mysql_server_status':
+ ensure => 'file',
+ mode => '0755',
+ group => 'nagios',
+ require => Package['nagios-nrpe-server'],
+ content => template('sunetdrive/proxysql/check_mysql_server_status.erb'),
+ }
+ file { '/opt/proxysql/proxysql.cnf':
+ ensure => present,
+ content => template('sunetdrive/proxysql/proxysql.cnf.erb'),
+ mode => '0644',
+ }
+
+ file { '/opt/proxysql/my.cnf':
+ ensure => present,
+ content => template('sunetdrive/proxysql/my.cnf.erb'),
+ mode => '0644',
+ }
+ sunet::misc::ufw_allow { 'stats_ports':
+ from => $tug_office,
+ port => 6080,
+ }
+
+ sunet::docker_compose { 'drive_proxysql_docker_compose':
+ content => template('sunetdrive/proxysql/docker-compose_proxysql.yml.erb'),
+ service_name => 'proxysql',
+ compose_dir => '/opt/',
+ compose_filename => 'docker-compose.yml',
+ description => 'Proxysql',
+ }
+ if $::fqdn[0,5] == 'node1' {
+ sunet::scriptherder::cronjob { 'insert_server_in_proxysql':
+ cmd => '/opt/proxysql/insert_server_in_proxysql.sh',
+ hour => '*',
+ minute => '*/5',
+ ok_criteria => ['exit_status=0','max_age=1h'],
+ warn_criteria => ['exit_status=1','max_age=3h'],
+ }
+ } else {
+ sunet::scriptherder::cronjob { 'insert_server_in_proxysql':
+ ensure => 'absent',
+ cmd => '/opt/proxysql/insert_server_in_proxysql.sh',
+ purge_results => true,
+ }
+
+ }
+
+
+}
+
diff --git a/manifests/redis_cluster.pp b/manifests/redis_cluster.pp
new file mode 100644
index 0000000..b0bdf06
--- /dev/null
+++ b/manifests/redis_cluster.pp
@@ -0,0 +1,26 @@
+#Class for SUNET-Drive-Cache
+class sunetdrive::redis_cluster (
+ $location = undef,
+)
+{
+ $customer = sunetdrive::get_customer()
+ $redis_password = safe_hiera('redis_password')
+ package { 'redis-tools': ensure => latest, provider => 'apt' }
+
+ file { '/usr/local/bin/bootstrap_cluster':
+ ensure => present,
+ content => template('sunetdrive/redis_cluster/bootstrap_cluster.erb.sh'),
+ mode => '0700',
+ }
+ file { '/usr/local/bin/reset_cluster':
+ ensure => present,
+ content => template('sunetdrive/redis_cluster/reset_cluster.erb.sh'),
+ mode => '0700',
+ }
+ file { '/root/.bashrc':
+ ensure => present,
+ content => template('sunetdrive/redis_cluster/bashrc.erb'),
+ mode => '0644',
+ }
+
+}
diff --git a/manifests/resolve.pp b/manifests/resolve.pp
new file mode 100644
index 0000000..557c54d
--- /dev/null
+++ b/manifests/resolve.pp
@@ -0,0 +1,40 @@
+include stdlib
+# Sunet drive resolver
+class sunetdrive::resolve($location=undef) {
+ $unbound_conf = '# This file is managed by puppet
+server:
+ interface: 0.0.0.0
+ interface: ::0
+ access-control: 37.156.195.0/24 allow
+ access-control: 89.45.237.0/24 allow
+ access-control: 89.45.20.0/24 allow
+ access-control: 89.45.21.0/24 allow
+ access-control: 2001:6b0:1c::/64 allow
+ access-control: 2001:6b0:6c::/64 allow'
+
+ file { 'sunetdrive_unbound_conf' :
+ ensure => 'file',
+ name => '/etc/unbound/unbound.conf.d/sunetdrive.conf',
+ mode => '0644',
+ content => $unbound_conf,
+ }
+ file_line {'disable_systemd_stubresolver':
+ line => 'DNSStubListener=no',
+ path => '/etc/systemd/resolved.conf'
+ }
+ -> exec {'disable_systemd_resolved':
+ command => 'systemctl disable --now systemd-resolved.service',
+ onlyif => 'systemctl is-enabled systemd-resolved.service',
+ }
+ sunet::misc::ufw_allow { 'dns_port_ufw_udp':
+ from => 'any',
+ port => 53,
+ proto => 'udp',
+ }
+ sunet::misc::ufw_allow { 'dns_port_ufw_tcp':
+ from => 'any',
+ port => 53,
+ proto => 'tcp',
+ }
+
+}
diff --git a/manifests/reva.pp b/manifests/reva.pp
new file mode 100644
index 0000000..99f8d77
--- /dev/null
+++ b/manifests/reva.pp
@@ -0,0 +1,46 @@
+#Class for SUNET-Drive-Lookup-Server
+class sunetdrive::reva (
+ String $domain = '',
+ String $reva_domain = ''
+) {
+
+ $environment = sunetdrive::get_environment()
+ $shared_secret = safe_hiera('shared_secret')
+ $iopsecret = safe_hiera('iopsecret')
+
+ # Firewall settings
+ #Create users
+ user { 'www-data': ensure => present, system => true }
+
+ file { '/opt/reva/revad.toml':
+ ensure => present,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/reva/revad.toml.erb'),
+ mode => '0644',
+ }
+ file { '/opt/reva/data':
+ ensure => directory,
+ owner => 'www-data',
+ }
+ file { '/opt/reva/ocm-providers.json':
+ ensure => present,
+ owner => 'www-data',
+ group => 'root',
+ content => template('sunetdrive/reva/ocm-providers.json.erb'),
+ mode => '0644',
+ }
+
+ sunet::docker_compose { 'drive_reva_docker_compose':
+ content => template('sunetdrive/reva/docker-compose.yml.erb'),
+ service_name => 'reva',
+ compose_dir => '/opt/',
+ compose_filename => 'docker-compose.yml',
+ description => 'Sciencemesh reva server',
+ }
+
+ sunet::misc::ufw_allow { 'https_reva':
+ from => '0.0.0.0/0',
+ port => 443,
+ }
+}
diff --git a/manifests/satosa.pp b/manifests/satosa.pp
new file mode 100644
index 0000000..08b0a9e
--- /dev/null
+++ b/manifests/satosa.pp
@@ -0,0 +1,99 @@
+# IDP proxy used in SUNET Drive
+class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',$tag=undef) {
+
+ $proxy_conf = hiera('satosa_proxy_conf')
+ $default_conf = {
+ 'STATE_ENCRYPTION_KEY' => hiera('satosa_state_encryption_key'),
+ 'USER_ID_HASH_SALT' => hiera('satosa_user_id_hash_salt'),
+ 'CUSTOM_PLUGIN_MODULE_PATHS' => ['plugins'],
+ 'COOKIE_STATE_NAME' => 'SATOSA_STATE'
+ }
+ $merged_conf = merge($proxy_conf,$default_conf)
+
+ ensure_resource('file','/etc', { ensure => directory } )
+ ensure_resource('file','/etc/satosa', { ensure => directory } )
+ ensure_resource('file','/etc/satosa/', { ensure => directory } )
+ ensure_resource('file','/etc/satosa/run', { ensure => directory } )
+ ensure_resource('file','/etc/satosa/plugins', { ensure => directory } )
+ ensure_resource('file','/etc/satosa/metadata', { ensure => directory } )
+
+ ['backend','frontend','metadata'].each |$id| {
+ if hiera("satosa_${id}_key",undef) != undef {
+ sunet::snippets::secret_file { "/etc/satosa/${id}.key": hiera_key => "satosa_${id}_key" }
+ # assume cert is in cosmos repo
+ } else {
+ # make key pair
+ sunet::snippets::keygen {"satosa_${id}":
+ key_file => "/etc/satosa/${id}.key",
+ cert_file => "/etc/satosa/${id}.crt"
+ }
+ }
+ }
+ sunet::docker_run {'satosa':
+ image => $image,
+ imagetag => $tag,
+ volumes => ['/etc/satosa:/etc/satosa','/etc/dehydrated:/etc/dehydrated'],
+ ports => ['443:8000'],
+ env => ['METADATA_DIR=/etc/satosa/metadata', 'WORKER_TIMEOUT=120']
+ }
+ file {'/etc/satosa/proxy_conf.yaml':
+ content => inline_template("<%= @merged_conf.to_yaml %>\n"),
+ notify => Sunet::Docker_run['satosa']
+ }
+ $plugins = hiera('satosa_config')
+ sort(keys($plugins)).each |$n| {
+ $conf = hiera($n)
+ $fn = $plugins[$n]
+ file { $fn:
+ content => inline_template("<%= @conf.to_yaml %>\n"),
+ notify => Sunet::Docker_run['satosa']
+ }
+ }
+ ufw::allow { 'satosa-allow-https':
+ ip => 'any',
+ port => '443'
+ }
+ $dehydrated_status = $dehydrated_name ? {
+ undef => 'absent',
+ default => 'present'
+ }
+ sunet::docker_run {'alwayshttps':
+ ensure => $dehydrated_status,
+ image => 'docker.sunet.se/always-https',
+ ports => ['80:80'],
+ env => ['ACME_URL=http://acme-c.sunet.se']
+ }
+ ufw::allow { 'satosa-allow-http':
+ ensure => $dehydrated_status,
+ ip => 'any',
+ port => '80'
+ }
+ if ($dehydrated_name) {
+ file { '/etc/satosa/https.key': ensure => link, target => "/etc/dehydrated/certs/${dehydrated_name}.key" }
+ file { '/etc/satosa/https.crt': ensure => link, target => "/etc/dehydrated/certs/${dehydrated_name}/fullchain.pem" }
+ } else {
+ sunet::snippets::keygen {'satosa_https':
+ key_file => '/etc/satosa/https.key',
+ cert_file => '/etc/satosa/https.crt'
+ }
+ }
+ file { '/opt/satosa':
+ ensure => directory,
+ owner => 'root',
+ group => 'root',
+ mode => '0755',
+ }
+ -> file { '/opt/satosa/restart.sh':
+ ensure => file,
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ content => template('sunetdrive/satosa/restart.erb.sh'),
+ }
+ -> cron { 'restart_satosa':
+ command => '/opt/satosa/restart.sh',
+ user => 'root',
+ minute => '15',
+ hour => '*/8',
+ }
+}
diff --git a/manifests/script.pp b/manifests/script.pp
new file mode 100644
index 0000000..a29642c
--- /dev/null
+++ b/manifests/script.pp
@@ -0,0 +1,414 @@
+#Class for SUNET-Drive-Script
+class sunetdrive::script (
+ $bootstrap = undef,
+ $location = undef
+) {
+ $environment = sunetdrive::get_environment()
+ $customer = sunetdrive::get_customer()
+ $apikey_test = safe_hiera('monitor_apikey_test')
+ $apikey_prod = safe_hiera('monitor_apikey_prod')
+ $full_project_mapping = hiera_hash('project_mapping')
+ $project_mapping = $full_project_mapping[$customer][$environment]
+ $primary_project = $project_mapping['primary_project']
+ $mirror_project = $project_mapping['mirror_project']
+ $assigned_projects = $project_mapping['assigned']
+ $full_backup_retention = hiera('full_backup_retention')
+ $config = hiera_hash($environment)
+ $backup_server = $config['backup_server']
+ $rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
+ $local_path = '/tmp/rclone-current-linux-amd64.deb'
+ $singlenodes = hiera('singlenodes')
+
+ $ssh_config = "Host *.sunet.se
+ User script
+ IdentityFile /root/.ssh/id_script"
+
+ $s3_key = safe_hiera('s3_key')
+ $s3_secret = safe_hiera('s3_secret')
+ $statistics_secret = safe_hiera('statistics_secret')
+ $s3_key_pilot = hiera('s3_key_pilot', false)
+ $s3_secret_pilot = hiera('s3_secret_pilot', false)
+ # FIXME: This will not work if we start to mess around with the location of multinode customer data
+ $s3_host = $config['s3_host']
+ if $s3_host == 's3.sto4.safedc.net' {
+ $s3_host_mirror = 's3.sto3.safedc.net'
+ $s3_key_mirror = safe_hiera('s3_key_sto3')
+ $s3_secret_mirror = safe_hiera('s3_secret_sto3')
+ } else {
+ $s3_host_mirror = 's3.sto4.safedc.net'
+ $s3_key_mirror = safe_hiera('s3_key_sto4')
+ $s3_secret_mirror = safe_hiera('s3_secret_sto4')
+ }
+ $site_name = $config['site_name']
+ $user_bucket_name = $config['user_bucket_name']
+
+ # It is a start that will get us user buckets and primary buckets
+ $backup_projects = $location
+ exec { 'rclone_deb':
+ command => "/usr/bin/wget -q ${rclone_url} -O ${local_path}",
+ creates => $local_path,
+ }
+ package { 'rclone':
+ ensure => installed,
+ provider => dpkg,
+ source => $local_path,
+ require => Exec['rclone_deb'],
+ }
+ package { 'python3.9':
+ ensure => installed,
+ provider => apt,
+ }
+ -> package { 'python3-pip':
+ ensure => installed,
+ provider => apt,
+ }
+ package { 'duplicity':
+ ensure => installed,
+ provider => apt,
+ }
+ $drive_version = '0.3.1'
+ exec { 'drive-utils':
+ command => "python3.9 -m pip install https://pypi.sunet.se/packages/drive-utils-${drive_version}.tar.gz",
+ unless => "python3.9 -m pip list | grep drive-utils | grep ${drive_version}",
+ require => Package['python3.9'],
+ }
+ file { '/root/.ssh/':
+ ensure => directory,
+ mode => '0700',
+ }
+ file { '/root/tasks/':
+ ensure => directory,
+ mode => '0700',
+ }
+ file { '/root/scripts/':
+ ensure => directory,
+ mode => '0700',
+ }
+ file { '/root/.ssh/id_script':
+ ensure => file,
+ content => safe_hiera('ssh_priv_key'),
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ }
+ file { '/root/.ssh/config':
+ ensure => file,
+ content => $ssh_config,
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ }
+ file { '/root/.rclone.conf':
+ ensure => file,
+ content => template('sunetdrive/script/rclone.conf.erb'),
+ owner => 'root',
+ group => 'root',
+ mode => '0600',
+ }
+ if $s3_key_pilot and $s3_secret_pilot {
+ file { '/root/scripts/migratebuckets.sh':
+ ensure => file,
+ content => template('sunetdrive/script/migratebuckets.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ }
+
+ file { '/root/tasks/backupsingleproject.sh':
+ ensure => file,
+ content => template('sunetdrive/script/backupsingleproject.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/backupbuckets.sh':
+ ensure => file,
+ content => template('sunetdrive/script/backup-all-buckets.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/backup-projectbuckets.sh':
+ ensure => absent,
+ }
+ file { '/root/tasks/backupdb.sh':
+ ensure => file,
+ content => template('sunetdrive/script/backupdb.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/switch_redis_master_to.sh':
+ ensure => file,
+ content => template('sunetdrive/script/switch_redis_master_to.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/restart-nextcloud-farm':
+ ensure => file,
+ content => template('sunetdrive/script/restart-nextcloud-farm.erb'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/restart-db-cluster':
+ ensure => file,
+ content => template('sunetdrive/script/restart-db-cluster.erb'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/restart-proxysql.sh':
+ ensure => file,
+ content => template('sunetdrive/script/restart-proxysql.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/usage.sh':
+ ensure => file,
+ content => template('sunetdrive/script/usage.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/maintenance.sh':
+ ensure => file,
+ content => template('sunetdrive/script/maintenance.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/reboot-customer.sh':
+ ensure => file,
+ content => template('sunetdrive/script/reboot-customer.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/usr/local/bin/check_backups':
+ ensure => file,
+ content => template('sunetdrive/script/check_backup.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/collect_backup_data.sh':
+ ensure => file,
+ content => template('sunetdrive/script/collect_backup_data.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/makebuckets.sh':
+ ensure => file,
+ content => template('sunetdrive/script/makebuckets.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/makemanualuserbucket.sh':
+ ensure => file,
+ content => template('sunetdrive/script/makemanualuserbucket.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ if $environment == 'test' {
+ sunet::scriptherder::cronjob { 'reboot-customer':
+ cmd => '/root/tasks/reboot-customer.sh',
+ hour => '2',
+ minute => '10',
+ ok_criteria => ['exit_status=0','max_age=21d'],
+ warn_criteria => ['exit_status=1','max_age=31d'],
+ }
+ }
+ # Opt out of userbuckets
+ unless $customer in ['extern', 'gih', 'suni', 'common'] {
+ sunet::scriptherder::cronjob { 'makebuckets':
+ cmd => '/root/tasks/makebuckets.sh',
+ minute => '*/5',
+ ok_criteria => ['exit_status=0','max_age=15m'],
+ warn_criteria => ['exit_status=1','max_age=30m'],
+ }
+ }
+ # Opt in folder structer for multinode customers
+ if $customer in ['common'] {
+
+ file { '/root/tasks/listusers.sh':
+ ensure => file,
+ content => template('sunetdrive/script/listusers.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ file { '/root/tasks/create_folders_in_singlenode_buckets.sh':
+ ensure => file,
+ content => template('sunetdrive/script/create_folders_in_singlenode_buckets.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ sunet::scriptherder::cronjob { 'create_folders_in_singlenode_buckets_for_kmh':
+ cmd => '/root/tasks/create_folders_in_singlenode_buckets.sh kmh true',
+ minute => '*/30',
+ ok_criteria => ['exit_status=0','max_age=1h'],
+ warn_criteria => ['exit_status=1','max_age=2h'],
+ }
+ }
+ # Opt in to folder structure in projectbuckets
+ if $customer in ['gih', 'mdu'] {
+ file { '/root/tasks/create_folders_in_project_buckets.sh':
+ ensure => file,
+ content => template('sunetdrive/script/create_folders_in_project_buckets.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ }
+ if $customer in ['gih'] {
+ sunet::scriptherder::cronjob { 'create_folders_in_project_buckets':
+ cmd => '/root/tasks/create_folders_in_project_buckets.sh',
+ minute => '*/30',
+ ok_criteria => ['exit_status=0','max_age=1h'],
+ warn_criteria => ['exit_status=1','max_age=2h'],
+ }
+ }
+ if $customer in ['mdu'] {
+ sunet::scriptherder::cronjob { 'create_folders_in_project_buckets':
+ cmd => '/root/tasks/create_folders_in_project_buckets.sh "Arbetsmaterial (work material)" "Bevarande (retention)" "Gallringsbart (disposal)"',
+ minute => '*/30',
+ ok_criteria => ['exit_status=0','max_age=1h'],
+ warn_criteria => ['exit_status=1','max_age=2h'],
+ }
+ }
+ if $customer == 'common' {
+ if $environment == 'prod' {
+ file { '/root/tasks/aggregate.sh':
+ ensure => file,
+ content => template('sunetdrive/script/aggregate.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ sunet::scriptherder::cronjob { 'aggregate_billing':
+ cmd => '/root/tasks/aggregate.sh',
+ hour => '4',
+ minute => '10',
+ ok_criteria => ['exit_status=0','max_age=2d'],
+ warn_criteria => ['exit_status=1','max_age=3d'],
+ }
+
+ }
+ file { '/root/tasks/backupsinglenodedb.sh':
+ ensure => file,
+ content => template('sunetdrive/script/backupsinglenodedb.erb.sh'),
+ owner => 'root',
+ group => 'root',
+ mode => '0700',
+ }
+ $singlenodes.each | $singlenode| {
+ $multinode = hiera_hash('multinode_mapping')[$singlenode]['server']
+ $multinodeserver = "${multinode}.${site_name}"
+ $nccontainer = "nextcloud${singlenode}_app_1"
+
+ sunet::scriptherder::cronjob { "listusers_${singlenode}":
+ cmd => "/root/tasks/listusers.sh ${singlenode} ${multinodeserver}",
+ minute => '*/5',
+ ok_criteria => ['exit_status=0','max_age=30m'],
+ warn_criteria => ['exit_status=1', 'max_age=60m'],
+ }
+ sunet::scriptherder::cronjob { "backup${singlenode}db":
+ cmd => "/root/tasks/backupsinglenodedb.sh ${multinodeserver} ${singlenode}",
+ hour => '2',
+ minute => '0',
+ ok_criteria => ['exit_status=0','max_age=2d'],
+ warn_criteria => ['exit_status=1','max_age=3d'],
+ }
+ if $environment == 'prod' {
+ sunet::scriptherder::cronjob { "statistics${singlenode}":
+ cmd => "/root/tasks/usage.sh ${singlenode} ${multinodeserver}",
+ hour => '2',
+ minute => '0',
+ ok_criteria => ['exit_status=0','max_age=2d'],
+ warn_criteria => ['exit_status=1','max_age=3d'],
+ }
+ }
+ unless $singlenode in ['mau'] {
+ sunet::scriptherder::cronjob { "make${singlenode}buckets":
+ cmd => "/root/tasks/makebuckets.sh ${multinodeserver} ${nccontainer} ${singlenode}-${environment}",
+ minute => '*',
+ ok_criteria => ['exit_status=0','max_age=15m'],
+ warn_criteria => ['exit_status=1','max_age=30m'],
+ }
+ }
+ }
+ $gss_backup_server = $config['gss_backup_server']
+ $lookup_backup_server = $config['lookup_backup_server']
+ sunet::scriptherder::cronjob { 'backupgssdb':
+ cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
+ hour => '2',
+ minute => '0',
+ ok_criteria => ['exit_status=0','max_age=2d'],
+ warn_criteria => ['exit_status=1','max_age=3d'],
+ }
+ sunet::scriptherder::cronjob { 'backuplookupdb':
+ cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
+ hour => '2',
+ minute => '0',
+ ok_criteria => ['exit_status=0','max_age=2d'],
+ warn_criteria => ['exit_status=1','max_age=3d'],
+ }
+ } else {
+ sunet::scriptherder::cronjob { 'backupdb':
+ cmd => "/root/tasks/backupdb.sh ${backup_server}",
+ hour => '2',
+ minute => '0',
+ ok_criteria => ['exit_status=0','max_age=2d'],
+ warn_criteria => ['exit_status=1','max_age=3d'],
+ }
+ sunet::scriptherder::cronjob { 'restart_proxysql':
+ ensure => 'absent',
+ cmd => '/bin/true',
+ purge_results => true,
+ }
+ if $environment == 'prod' {
+ sunet::scriptherder::cronjob { 'statistics':
+ cmd => '/root/tasks/usage.sh',
+ hour => '2',
+ minute => '0',
+ ok_criteria => ['exit_status=0','max_age=2d'],
+ warn_criteria => ['exit_status=1','max_age=3d'],
+ }
+ }
+ }
+ sunet::scriptherder::cronjob { 'collect_backup_data':
+ cmd => '/root/tasks/collect_backup_data.sh',
+ hour => '*',
+ minute => '3',
+ ok_criteria => ['exit_status=0','max_age=2d'],
+ warn_criteria => ['exit_status=1','max_age=3d'],
+ }
+ sunet::scriptherder::cronjob { 'backupbuckets':
+ cmd => '/root/tasks/backupbuckets.sh',
+ hour => '2',
+ minute => '0',
+ ok_criteria => ['exit_status=0','max_age=2d'],
+ warn_criteria => ['exit_status=1','max_age=3d'],
+ }
+ # sunet::scriptherder::cronjob { 'scriptherder_daily':
+ # cmd => '/bin/true',
+ # special => 'daily',
+ # ok_criteria => ['exit_status=0','max_age=4d'],
+ # warn_criteria => ['exit_status=1','max_age=8d'],
+ # }
+ # cron { 'example_job':
+ # ensure => 'present',
+ # command => '/bin/true',
+ # hour => ['0'],
+ # target => 'root',
+ # user => 'root',
+ # }
+}
diff --git a/manifests/scriptreceiver.pp b/manifests/scriptreceiver.pp
new file mode 100644
index 0000000..d417440
--- /dev/null
+++ b/manifests/scriptreceiver.pp
@@ -0,0 +1,110 @@
+#Class for SUNET-Drive-Script-receiver
+class sunetdrive::scriptreceiver()
+{
+ sunet::system_user {'script': username => 'script', group => 'script', managehome => true, shell => '/bin/bash' }
+
+ # These tasks correspond to a ${task}.erb.sh template
+ $tasks = ['list_users', 'list_files_for_user', 'create_bucket', 'backup_db', 'purge_backups', 'maintenancemode', 'restart_sunet_service', 'start_sentinel', 'stop_sentinel']
+
+ $environment = sunetdrive::get_environment()
+ $config = hiera_hash($environment)
+ $script_server = $config['script_server']
+ $script_ipv4 = $config['script']
+ $script_ipv6 = $config['script_v6']
+ $script_pub_key = $config['script_pub_key']
+ file { '/etc/sudoers.d/99-script-user':
+ ensure => absent,
+ }
+
+ file { '/home/script/bin':
+ ensure => directory,
+ mode => '0750',
+ owner => 'script',
+ group => 'script',
+ }
+
+ $kano_shell = ['89.46.21.246','2001:6b0:6c::1bc']
+ sunet::misc::ufw_allow { 'script_port':
+ from => $script_ipv4 + $script_ipv6 + $kano_shell,
+ port => 22,
+ }
+
+ ssh_authorized_key { "script@${script_server}":
+ ensure => present,
+ user => 'script',
+ type => 'ssh-ed25519',
+ key => $script_pub_key,
+ }
+
+ file { '/opt/rotate':
+ ensure => directory,
+ mode => '0750',
+ owner => 'root',
+ group => 'root',
+ }
+ -> file { '/opt/rotate/conf.d':
+ ensure => directory,
+ mode => '0750',
+ owner => 'root',
+ group => 'root',
+ }
+ file { '/usr/local/bin/safer_reboot':
+ ensure => file,
+ content => template('sunetdrive/scriptreceiver/safer_reboot.erb'),
+ mode => '0740',
+ owner => 'root',
+ group => 'root',
+ }
+ file { "/etc/sudoers.d/99-safer_reboot":
+ ensure => file,
+ content => "script ALL=(root) NOPASSWD: /usr/local/bin/safer_reboot\n",
+ mode => '0440',
+ owner => 'root',
+ group => 'root',
+ }
+ file { '/usr/local/bin/rotatefiles':
+ ensure => file,
+ content => template('sunetdrive/scriptreceiver/rotatefiles.erb'),
+ mode => '0740',
+ owner => 'root',
+ group => 'root',
+ }
+ file { '/usr/local/bin/ini2json':
+ ensure => file,
+ content => template('sunetdrive/scriptreceiver/ini2json.py'),
+ mode => '0740',
+ owner => 'root',
+ group => 'root',
+ }
+ -> file { '/etc/scriptherder/check/rotatefiles.ini':
+ ensure => file,
+ content => "[check]\nok = exit_status=0, max_age=35m\nwarning = exit_status=0, max_age=1h\n",
+ mode => '0644',
+ owner => 'root',
+ group => 'root',
+ }
+ cron { 'rotate_logs':
+ command => ' /usr/local/bin/scriptherder --mode wrap --syslog --name rotatefiles -- /usr/local/bin/rotatefiles',
+ require => File['/usr/local/bin/rotatefiles'],
+ user => 'root',
+ minute => '*',
+ hour => '*',
+ }
+ $tasks.each |String $task| {
+ file { "/home/script/bin/${task}.sh":
+ ensure => file,
+ content => template("sunetdrive/scriptreceiver/${task}.erb.sh"),
+ mode => '0740',
+ owner => 'script',
+ group => 'script',
+ }
+ file { "/etc/sudoers.d/99-${task}":
+ ensure => file,
+ content => "script ALL=(root) NOPASSWD: /home/script/bin/${task}.sh\n",
+ mode => '0440',
+ owner => 'root',
+ group => 'root',
+ }
+ }
+}
+
diff --git a/manifests/sitemonitor.pp b/manifests/sitemonitor.pp
new file mode 100644
index 0000000..29bf48d
--- /dev/null
+++ b/manifests/sitemonitor.pp
@@ -0,0 +1,27 @@
+# Class for site monitor
+class sunetdrive::sitemonitor() {
+
+ $sites = hiera_array('sites')
+ $tls_servers = flatten($sites,hiera_array('tls_servers'))
+ $tls_servers_with_port = hiera_array('tls_servers_with_port')
+ $nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0]
+ $nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0]
+
+ file { '/etc/nagios4/conf.d/sunetdrive_sites.cfg':
+ ensure => present,
+ content => template('sunetdrive/monitor/sunetdrive_sites.cfg.erb'),
+ mode => '0644',
+ }
+ file { '/etc/nagios4/conf.d/sunetdrive_ssl_checks.cfg':
+ ensure => present,
+ content => template('sunetdrive/monitor/sunetdrive_ssl_checks.cfg.erb'),
+ mode => '0644',
+ }
+ cron { 'restart_socket':
+ command => 'test -S /var/cache/thruk/live.sock || systemctl restart nagios4',
+ user => root,
+ minute => '*/5',
+ }
+
+}
+
diff --git a/manifests/sitemonitornaemon.pp b/manifests/sitemonitornaemon.pp
new file mode 100644
index 0000000..0739be6
--- /dev/null
+++ b/manifests/sitemonitornaemon.pp
@@ -0,0 +1,100 @@
+# Class for site monitor
+class sunetdrive::sitemonitornaemon() {
+
+ $sites = hiera_array('sites')
+ $fullnodes = hiera_array('fullnodes')
+ $tls_servers = flatten($sites,hiera_array('tls_servers'))
+ $tls_servers_with_port = hiera_array('tls_servers_with_port')
+ $nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0]
+ $nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0]
+ $monitorhost = $::fqdn
+ $environment = sunetdrive::get_environment()
+ $influx_passwd = safe_hiera('influx_passwd')
+ $slack_url = safe_hiera('slack_url')
+
+ file { '/usr/local/bin/slack_nagios.sh':
+ ensure => present,
+ content => template('sunetdrive/monitor/notify_slack.erb.sh'),
+ mode => '0755',
+ }
+ file { '/etc/nagios-plugins/config/ping.cfg':
+ ensure => present,
+ owner => 'root',
+ group => 'root',
+ content => template('sunetdrive/monitor/ping.cfg.erb'),
+ mode => '0644',
+ }
+ #definition for check_nrpe_1arg
+ file { '/etc/nagios-plugins/config/check_nrpe.cfg':
+ ensure => file,
+ mode => '0644',
+ content => template('sunetdrive/monitor/check_nrpe.cfg.erb'),
+ }
+ file { '/etc/naemon/conf.d/sunetdrive_sites.cfg':
+ ensure => present,
+ content => template('sunetdrive/monitor/sunetdrive_sites.cfg.erb'),
+ mode => '0644',
+ }
+ file { '/etc/naemon/conf.d/sunetdrive_ssl_checks.cfg':
+ ensure => present,
+ content => template('sunetdrive/monitor/sunetdrive_ssl_checks.cfg.erb'),
+ mode => '0644',
+ }
+ file { '/etc/naemon/conf.d/sunetdrive_thruk_templates.conf':
+ ensure => present,
+ owner => 'naemon',
+ group => 'naemon',
+ content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'),
+ mode => '0644',
+ }
+ nagioscfg::service {'check_galera_cluster':
+ hostgroup_name => ['galera_monitor'],
+ check_command => 'check_nrpe_1arg!check_galera_cluster',
+ description => 'Galera Cluster Health',
+ contact_groups => ['alerts']
+ }
+ nagioscfg::service {'check_async_replication':
+ hostgroup_name => ['sunetdrive::mariadb_backup'],
+ check_command => 'check_nrpe_1arg!check_async_replication',
+ description => 'MySQL Replication Health',
+ contact_groups => ['alerts']
+ }
+ nagioscfg::service {'check_backups':
+ action_url => '/grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true',
+ hostgroup_name => ['sunetdrive::script'],
+ check_command => 'check_nrpe_1arg_to600!check_backups',
+ check_interval => '720',
+ retry_interval => '180',
+ description => 'Backup Status',
+ contact_groups => ['alerts']
+ }
+ nagioscfg::service {'check_proxysql_server':
+ hostgroup_name => ['sunetdrive::proxysql'],
+ check_command => 'check_nrpe_1arg!check_proxysql_server',
+ description => 'Number of ProxySQL servers available',
+ contact_groups => ['alerts']
+ }
+ nagioscfg::service {'check_mysql_server_status':
+ action_url => '/grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true',
+ hostgroup_name => ['sunetdrive::proxysql'],
+ check_command => 'check_nrpe_1arg!check_mysql_server_status',
+ description => 'Status of mysql servers',
+ contact_groups => ['alerts']
+ }
+ nagioscfg::service {'check_exabgp_announce':
+ action_url => '/grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true',
+ hostgroup_name => ['sunetdrive::lb'],
+ check_command => 'check_nrpe_1arg!check_exabgp_announce',
+ description => 'Status of exabgp routes',
+ contact_groups => ['alerts']
+ }
+ nagioscfg::service {'check_sarimner':
+ action_url => '/grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true',
+ hostgroup_name => ['sunetdrive::lb'],
+ check_command => 'check_nrpe_1arg_to300!check_sarimner',
+ description => 'Status of sarimner interface',
+ contact_groups => ['alerts']
+ }
+
+}
+
diff --git a/manifests/thruk.pp b/manifests/thruk.pp
new file mode 100644
index 0000000..ba93389
--- /dev/null
+++ b/manifests/thruk.pp
@@ -0,0 +1,81 @@
+include stdlib
+class sunetdrive::thruk($location=undef) {
+
+
+ $thruk_local_config = '# File managed by puppet
+
+
+ name = Core
+ type = livestatus
+
+ peer = /var/cache/thruk/live.sock
+ resource_file = /etc/nagios4/resource.cfg
+
+
+ core_conf = /etc/nagios4/nagios.cfg
+ obj_check_cmd = /usr/sbin/nagios4 -v /etc/nagios4/nagios.cfg
+ obj_reload_cmd = systemctl reload nagios4.service
+
+
+
+cookie_auth_restricted_url = https://monitor.drive.sunet.se/thruk/cgi-bin/restricted.cgi
+'
+
+ file_line {'nagios_livestatus_conf':
+ line => 'broker_module=/usr/local/lib/mk-livestatus/livestatus.o /var/cache/thruk/live.sock',
+ path => '/etc/nagios4/nagios.cfg'
+ }
+ file_line {'nagiosadmin_cgi_conf':
+ line => 'authorized_for_admin=nagiosadmin',
+ match => '^authorized_for_admin=thrukadmin',
+ path => '/etc/thruk/cgi.cfg',
+ require => Package['thruk'],
+ }
+ exec {'mk-livestatus-src':
+ command => 'curl -s https://download.checkmk.com/checkmk/1.5.0p24/mk-livestatus-1.5.0p24.tar.gz --output /opt/mk-livestatus-1.5.0p24.tar.gz',
+ unless => 'ls /usr/local/lib/mk-livestatus/livestatus.o',
+ }
+ exec {'mk-livestatus-tar':
+ command => 'cd /opt && tar xfv mk-livestatus-1.5.0p24.tar.gz',
+ require => Exec['mk-livestatus-src'],
+ unless => 'ls /usr/local/lib/mk-livestatus/livestatus.o',
+ }
+ exec {'mk-livestatus-build':
+ command => 'apt update && apt install -y make libboost-system1.71.0 clang librrd-dev libboost-dev libasio-dev libboost-system-dev && cd /opt/mk-livestatus-1.5.0p24 && ./configure --with-nagios4 && make && make install && apt -y remove clang librrd-dev libboost-dev libasio-dev libboost-system-dev make && apt autoremove -y',
+ require => [Exec['mk-livestatus-tar'], File_line['nagios_livestatus_conf'], Exec['www-data_in_nagios_group']],
+ unless => 'ls /usr/local/lib/mk-livestatus/livestatus.o',
+ }
+ exec {'www-data_in_nagios_group':
+ command => 'usermod -a -G nagios www-data && usermod -a -G www-data nagios',
+ unless => 'id www-data | grep nagios',
+ }
+ package {'thruk':
+ ensure => 'installed',
+ require => Exec['mk-livestatus-build'],
+ }
+ package {'thruk-plugin-reporting':
+ ensure => 'installed',
+ require => Package['thruk'],
+ }
+ file { 'thruk_repo' :
+ ensure => 'file',
+ name => '/etc/apt/sources.list.d/labs-consol-stable.list',
+ mode => '0644',
+ content => 'deb http://labs.consol.de/repo/stable/ubuntu focal main',
+ require => Exec['thruk_gpg_key'],
+ }
+ file { 'thruk_conf' :
+ ensure => 'file',
+ name => '/etc/thruk/thruk_local.conf',
+ mode => '0640',
+ owner => 'www-data',
+ group => 'www-data',
+ content => $thruk_local_config,
+ require => Package['thruk'],
+ }
+ exec { 'thruk_gpg_key':
+ command => 'curl -s "https://labs.consol.de/repo/stable/RPM-GPG-KEY" | sudo apt-key add -',
+ unless => 'apt-key list 2> /dev/null | grep "F2F9 7737 B59A CCC9 2C23 F8C7 F8C1 CA08 A57B 9ED7"',
+ }
+
+}
diff --git a/templates/application/000-default.conf.erb b/templates/application/000-default.conf.erb
new file mode 100644
index 0000000..fb3ccea
--- /dev/null
+++ b/templates/application/000-default.conf.erb
@@ -0,0 +1,51 @@
+# This file is manged by puppet
+LoadModule remoteip_module /usr/lib/apache2/modules/mod_remoteip.so
+LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so
+
+ ServerAdmin webmaster@localhost
+ DocumentRoot /var/www/html
+
+ ErrorLog ${APACHE_LOG_DIR}/error.log
+ CustomLog ${APACHE_LOG_DIR}/access.log combined
+
+ Require all granted
+ AllowOverride All
+ Options FollowSymLinks MultiViews
+
+
+ Dav off
+
+
+
+
+
+
+ ServerAdmin webmaster@localhost
+ DocumentRoot /var/www/html
+ # Log format config
+ LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" common
+ SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded
+
+ # Header config
+ RemoteIPHeader X-Forwarded-For
+<%- @lb_servers.each do |ip| -%>
+ RemoteIPInternalProxy <%= ip %>
+<%- end -%>
+ ErrorDocument 404 /404.html
+ ErrorLog ${APACHE_LOG_DIR}/error.log
+ CustomLog ${APACHE_LOG_DIR}/access.log combined env=forwarded
+ SSLEngine On
+ SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem
+ SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
+
+ LimitRequestBody 0
+ Require all granted
+ AllowOverride All
+ Options FollowSymLinks MultiViews
+
+
+ Dav off
+
+
+
+# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
diff --git a/templates/application/404.html.erb b/templates/application/404.html.erb
new file mode 100644
index 0000000..485c750
--- /dev/null
+++ b/templates/application/404.html.erb
@@ -0,0 +1,54 @@
+
+
+
+
+
+
+
+ 404
+
+
This page does not exist.
+
+
+ You must have clicked on a dead link.
+
+
+
diff --git a/templates/application/MDU-README.md.erb b/templates/application/MDU-README.md.erb
new file mode 100644
index 0000000..365bfdf
--- /dev/null
+++ b/templates/application/MDU-README.md.erb
@@ -0,0 +1,13 @@
+Hello and welcome to your personal space for research data storage,
+
+It is important that research data is managed in a secure and careful manner, so that it is protected from damage and destruction. Mälardalen University offers its researchers and doctoral students a secure and stable IT service that makes it possible to store research data. Furthermore, it enables research collaboration internally and externally since researchers can easily share their data within the project or with other individual researchers at MDU in a secure way. This technical infrastructure entails a good collaboration space for researchers, a secure handling of data, the possibility for the researcher to be able to store data in such a way that data management according to the FAIR principles can be complied with at MDU.
+
+To meet the general need for research data storage, MDU has purchased Sunet Drive's S3 research data storage service. Each researcher will be offered a space of 200GB per person. To simplify administration, all spaces will be appropriated in advance in the same way, regardless of the research domain or total amount of research data.
+
+200 GB will not correspond to all of MDU researchers' data storage needs; therefore, the researcher will have the possibility to purchase additional data space via a digital form. The additional purchase function is done by internal billing and the purchased space is then visible at the next login in the researcher's personal space.
+
+If there is a need for a specific project space for research data storage and/or sharing, this is also possible. Such a space is created by an administrator after ordering project space through the same form as when purchasing additional storage space. The project space will then be visible to project members in the personal storage space at the next login.
+
+Can a MDU student be included in the research project? It is possible to assign a student a time-limited access/sharing opportunity to specific folders in either the project space or the researcher's personal space. Contact the Data Access Unit (DAU) to enable such a feature.
+
+If you have any questions about the storage space, please contact dau@mdu.se.
diff --git a/templates/application/add_admin_user.erb b/templates/application/add_admin_user.erb
new file mode 100644
index 0000000..13ac704
--- /dev/null
+++ b/templates/application/add_admin_user.erb
@@ -0,0 +1,20 @@
+#!/bin/bash
+user=${1}
+email=${2}
+container=${3}
+
+
+usage() {
+ echo "Usage: ${0} [container]"
+ exit 1
+}
+
+if [[ "x${user}" == "x" || "x${email}" == "x" ]]; then
+ usage
+fi
+
+
+pass=$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c40;echo -n;)
+OC_PASS="${pass}" /usr/local/bin/occ ${container} user:add --password-from-env --group admin "${user}"
+unset OC_PASS
+/usr/local/bin/occ ${container} user:setting ${user} settings email "${email}"
diff --git a/templates/application/apache.php.ini.erb b/templates/application/apache.php.ini.erb
new file mode 100644
index 0000000..ef04c9f
--- /dev/null
+++ b/templates/application/apache.php.ini.erb
@@ -0,0 +1,160 @@
+[PHP]
+allow_url_fopen = On
+allow_url_include = Off
+auto_append_file =
+auto_globals_jit = On
+auto_prepend_file =
+default_charset = "UTF-8"
+default_mimetype = "text/html"
+default_socket_timeout = 60
+disable_classes =
+disable_functions = pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_get_handler,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,pcntl_async_signals,pcntl_unshare,
+display_errors = Off
+display_startup_errors = Off
+doc_root =
+enable_dl = Off
+engine = On
+error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
+expose_php = Off
+file_uploads = On
+ignore_repeated_errors = Off
+ignore_repeated_source = Off
+implicit_flush = Off
+log_errors = On
+log_errors_max_len = 1024
+max_execution_time = 86400
+max_file_uploads = 20
+max_input_time = 86400
+memory_limit = 512M
+output_buffering = Off
+post_max_size = 30G
+precision = 14
+register_argc_argv = Off
+report_memleaks = On
+request_order = "GP"
+serialize_precision = -1
+short_open_tag = Off
+unserialize_callback_func =
+upload_max_filesize = 30G
+user_dir =
+variables_order = "GPCS"
+zend.enable_gc = On
+zend.exception_ignore_args = On
+zlib.output_compression = Off
+<% if @customer == "kau" -%>
+upload_tmp_dir = /opt/tmp/
+<% end %>
+
+[CLI Server]
+cli_server.color = On
+[Date]
+; Nothing here
+[filter]
+; Nothing here
+[iconv]
+; Nothing here
+[imap]
+; Nothing here
+[intl]
+; Nothing here
+[sqlite3]
+; Nothing here
+[Pcre]
+; Nothing here
+[Pdo]
+; Nothing here
+[Pdo_mysql]
+pdo_mysql.default_socket=
+[Phar]
+; Nothing here
+[mail function]
+SMTP = localhost
+smtp_port = 25
+mail.add_x_header = Off
+[ODBC]
+odbc.allow_persistent = On
+odbc.check_persistent = On
+odbc.max_persistent = -1
+odbc.max_links = -1
+odbc.defaultlrl = 4096
+odbc.defaultbinmode = 1
+[MySQLi]
+mysqli.max_persistent = -1
+mysqli.allow_persistent = On
+mysqli.max_links = -1
+mysqli.default_port = 3306
+mysqli.default_socket =
+mysqli.default_host =
+mysqli.default_user =
+mysqli.default_pw =
+mysqli.reconnect = Off
+[mysqlnd]
+mysqlnd.collect_statistics = On
+mysqlnd.collect_memory_statistics = Off
+[OCI8]
+; Nothing here
+[PostgreSQL]
+pgsql.allow_persistent = On
+pgsql.auto_reset_persistent = Off
+pgsql.max_persistent = -1
+pgsql.max_links = -1
+pgsql.ignore_notice = 0
+pgsql.log_notice = 0
+[bcmath]
+bcmath.scale = 0
+[browscap]
+; Nothing here
+[Session]
+session.save_handler = files
+session.use_strict_mode = 0
+session.use_cookies = 1
+session.use_only_cookies = 1
+session.name = PHPSESSID
+session.auto_start = 0
+session.cookie_lifetime = 0
+session.cookie_path = /
+session.cookie_domain =
+session.cookie_httponly =
+session.cookie_samesite =
+session.serialize_handler = php
+session.gc_probability = 0
+session.gc_divisor = 1000
+session.gc_maxlifetime = 1440
+session.referer_check =
+session.cache_limiter = nocache
+session.cache_expire = 180
+session.use_trans_sid = 0
+session.sid_length = 26
+session.trans_sid_tags = "a=href,area=href,frame=src,form="
+session.sid_bits_per_character = 5
+[Assertion]
+zend.assertions = -1
+[COM]
+; Nothing here
+[mbstring]
+; Nothing here
+[gd]
+; Nothing here
+[exif]
+; Nothing here
+[Tidy]
+tidy.clean_output = Off
+[soap]
+soap.wsdl_cache_enabled=1
+soap.wsdl_cache_dir="/tmp"
+soap.wsdl_cache_ttl=86400
+soap.wsdl_cache_limit = 5
+[sysvshm]
+; Nothing here
+[ldap]
+ldap.max_links = -1
+[dba]
+; Nothing here
+[opcache]
+; Nothing here
+[curl]
+; Nothing here
+[openssl]
+; Nothing here
+[ffi]
+; Nothing here
diff --git a/templates/application/apcu.ini.erb b/templates/application/apcu.ini.erb
new file mode 100644
index 0000000..b005655
--- /dev/null
+++ b/templates/application/apcu.ini.erb
@@ -0,0 +1,2 @@
+extension=apcu.so
+apc.enable_cli=1
diff --git a/templates/application/cli.php.ini.erb b/templates/application/cli.php.ini.erb
new file mode 100644
index 0000000..8bc8bd4
--- /dev/null
+++ b/templates/application/cli.php.ini.erb
@@ -0,0 +1,156 @@
+[PHP]
+allow_url_fopen = On
+allow_url_include = Off
+auto_append_file =
+auto_globals_jit = On
+auto_prepend_file =
+default_charset = "UTF-8"
+default_mimetype = "text/html"
+default_socket_timeout = 60
+disable_classes =
+disable_functions =
+display_errors = Off
+display_startup_errors = Off
+doc_root =
+enable_dl = Off
+engine = On
+error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
+expose_php = On
+file_uploads = On
+ignore_repeated_errors = Off
+ignore_repeated_source = Off
+implicit_flush = Off
+log_errors = On
+log_errors_max_len = 1024
+max_execution_time = 86400
+max_file_uploads = 20
+max_input_time = 86400
+memory_limit = -1
+output_buffering = Off
+post_max_size = 16G
+precision = 14
+register_argc_argv = Off
+report_memleaks = On
+request_order = "GP"
+serialize_precision = -1
+short_open_tag = Off
+unserialize_callback_func =
+upload_max_filesize = 16G
+user_dir =
+variables_order = "GPCS"
+zend.enable_gc = On
+zend.exception_ignore_args = On
+zlib.output_compression = Off
+[CLI Server]
+cli_server.color = On
+[Date]
+; Nothing here
+[filter]
+; Nothing here
+[iconv]
+; Nothing here
+[imap]
+; Nothing here
+[intl]
+; Nothing here
+[sqlite3]
+; Nothing here
+[Pcre]
+; Nothing here
+[Pdo]
+; Nothing here
+[Pdo_mysql]
+pdo_mysql.default_socket=
+[Phar]
+; Nothing here
+[mail function]
+SMTP = localhost
+smtp_port = 25
+mail.add_x_header = Off
+[ODBC]
+odbc.allow_persistent = On
+odbc.check_persistent = On
+odbc.max_persistent = -1
+odbc.max_links = -1
+odbc.defaultlrl = 4096
+odbc.defaultbinmode = 1
+[MySQLi]
+mysqli.max_persistent = -1
+mysqli.allow_persistent = On
+mysqli.max_links = -1
+mysqli.default_port = 3306
+mysqli.default_socket =
+mysqli.default_host =
+mysqli.default_user =
+mysqli.default_pw =
+mysqli.reconnect = Off
+[mysqlnd]
+mysqlnd.collect_statistics = On
+mysqlnd.collect_memory_statistics = Off
+[OCI8]
+; Nothing here
+[PostgreSQL]
+pgsql.allow_persistent = On
+pgsql.auto_reset_persistent = Off
+pgsql.max_persistent = -1
+pgsql.max_links = -1
+pgsql.ignore_notice = 0
+pgsql.log_notice = 0
+[bcmath]
+bcmath.scale = 0
+[browscap]
+; Nothing here
+[Session]
+session.save_handler = files
+session.use_strict_mode = 0
+session.use_cookies = 1
+session.use_only_cookies = 1
+session.name = PHPSESSID
+session.auto_start = 0
+session.cookie_lifetime = 0
+session.cookie_path = /
+session.cookie_domain =
+session.cookie_httponly =
+session.cookie_samesite =
+session.serialize_handler = php
+session.gc_probability = 0
+session.gc_divisor = 1000
+session.gc_maxlifetime = 1440
+session.referer_check =
+session.cache_limiter = nocache
+session.cache_expire = 180
+session.use_trans_sid = 0
+session.sid_length = 26
+session.trans_sid_tags = "a=href,area=href,frame=src,form="
+session.sid_bits_per_character = 5
+[Assertion]
+zend.assertions = -1
+[COM]
+; Nothing here
+[mbstring]
+; Nothing here
+[gd]
+; Nothing here
+[exif]
+; Nothing here
+[Tidy]
+tidy.clean_output = Off
+[soap]
+soap.wsdl_cache_enabled=1
+soap.wsdl_cache_dir="/tmp"
+soap.wsdl_cache_ttl=86400
+soap.wsdl_cache_limit = 5
+[sysvshm]
+; Nothing here
+[ldap]
+ldap.max_links = -1
+[dba]
+; Nothing here
+[opcache]
+; Nothing here
+[curl]
+; Nothing here
+[openssl]
+; Nothing here
+[ffi]
+; Nothing here
diff --git a/templates/application/complete_reinstall.erb.sh b/templates/application/complete_reinstall.erb.sh
new file mode 100644
index 0000000..ec43c88
--- /dev/null
+++ b/templates/application/complete_reinstall.erb.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+
+config_php='/var/www/html/config/config.php'
+dbhost="<%= @dbhost %>"
+mysql_user_password="<%= @mysql_user_password %>"
+admin_password="<%= @admin_password %>"
+location="<%= @location %>"
+bucket="<%= @s3_bucket %>"
+
+echo "Input 'IKnowWhatIAmDoing' if you are sure you want to delete everything and reinstall Nextcloud: "
+read -r user_input
+
+if [[ "${user_input}" == "IKnowWhatIAmDoing" ]]; then
+ echo "WARNING: This will delete everything in the database and reinstall Nextcloud."
+ echo "You have 10 seconds to abort by hitting CTRL/C"
+ sleep 10s
+ echo "Ok, proceeding."
+ echo "Dropping database in 3 seconds"
+ sleep 3s
+ /usr/bin/mysql -e "drop database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}"
+ /usr/bin/mysql -e "create database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}"
+ if [[ "x${location}" != "x" || "x${bucket}" != "x" ]]; then
+ bucket_content=$(/usr/bin/rclone ls "${location}":"${bucket}" --config /rclone.conf)
+ if [[ "x${bucket_content}" != "x" ]]; then
+ echo "Deleting all files in ${location}:${bucket} in 3 seconds"
+ sleep 3s
+ /usr/bin/rclone purge "${location}:${bucket}" --config /rclone.conf
+ /usr/bin/rclone mkdir "${location}:${bucket}" --config /rclone.conf
+ fi
+ fi
+ : >${config_php}
+ echo "Running maintenance:install"
+ su - www-data -s /bin/bash < '\\OC\\Memcache\\APCu',
+ 'appstoreenabled' => false,
+ 'apps_paths' =>
+ array (
+ 0 =>
+ array (
+ 'path' => '/var/www/html/apps',
+ 'url' => '/apps',
+ 'writable' => false,
+ ),
+ 1 =>
+ array (
+ 'path' => '/var/www/html/custom_apps',
+ 'url' => '/custom_apps',
+ 'writable' => true,
+ ),
+ ),
+ 'memcache.distributed' => '\\OC\\Memcache\\Redis',
+ 'memcache.locking' => '\\OC\\Memcache\\Redis',
+<% if @location == 'sunet-test' -%>
+ 'redis.cluster' => [
+ 'seeds' => [
+ 'redis1.drive.test.sunet.se:6379',
+ 'redis2.drive.test.sunet.se:6379',
+ 'redis3.drive.test.sunet.se:6379',
+ 'redis1.drive.test.sunet.se:6380',
+ 'redis2.drive.test.sunet.se:6380',
+ 'redis3.drive.test.sunet.se:6380',
+ 'redis1.drive.test.sunet.se:6381',
+ 'redis2.drive.test.sunet.se:6381',
+ 'redis3.drive.test.sunet.se:6381'
+ ],
+ 'timeout' => 1.1,
+ 'read_timeout' => 0.0,
+ 'password' => '<%= @redis_cluster_password %>',
+ 'failover_mode' => \RedisCluster::FAILOVER_ERROR
+ ],
+<% elsif @environment == 'test' && ! @is_multinode && @location != 'gss-test' -%>
+ 'redis.cluster' => [
+ 'seeds' => [
+ 'redis1.<%= @customer %>.drive.test.sunet.se:6379',
+ 'redis2.<%= @customer %>.drive.test.sunet.se:6379',
+ 'redis3.<%= @customer %>.drive.test.sunet.se:6379',
+ 'redis1.<%= @customer %>.drive.test.sunet.se:6380',
+ 'redis2.<%= @customer %>.drive.test.sunet.se:6380',
+ 'redis3.<%= @customer %>.drive.test.sunet.se:6380',
+ 'redis1.<%= @customer %>.drive.test.sunet.se:6381',
+ 'redis2.<%= @customer %>.drive.test.sunet.se:6381',
+ 'redis3.<%= @customer %>.drive.test.sunet.se:6381'
+ ],
+ 'timeout' => 1.1,
+ 'read_timeout' => 0.0,
+ 'password' => '<%= @redis_cluster_password %>',
+ 'failover_mode' => \RedisCluster::FAILOVER_ERROR
+ ],
+<% elsif @environment == 'prod' && ! @is_multinode && @location != 'gss-prod' -%>
+ 'redis.cluster' => [
+ 'seeds' => [
+ 'redis1.<%= @customer %>.drive.sunet.se:6379',
+ 'redis2.<%= @customer %>.drive.sunet.se:6379',
+ 'redis3.<%= @customer %>.drive.sunet.se:6379',
+ 'redis1.<%= @customer %>.drive.sunet.se:6380',
+ 'redis2.<%= @customer %>.drive.sunet.se:6380',
+ 'redis3.<%= @customer %>.drive.sunet.se:6380',
+ 'redis1.<%= @customer %>.drive.sunet.se:6381',
+ 'redis2.<%= @customer %>.drive.sunet.se:6381',
+ 'redis3.<%= @customer %>.drive.sunet.se:6381'
+ ],
+ 'timeout' => 1.1,
+ 'read_timeout' => 0.0,
+ 'password' => '<%= @redis_cluster_password %>',
+ 'failover_mode' => \RedisCluster::FAILOVER_ERROR
+ ],
+<% else -%>
+ 'redis' =>
+ array (
+ 'host' => '<%= @redis_host %>',
+ 'password' => '<%= @redis_host_password %>',
+ 'port' => 6379,
+ ),
+<% end -%>
+ 'forcessl' => true,
+ 'overwriteprotocol' => 'https',
+ 'objectstore' =>
+ array (
+ 'class' => '\\OC\\Files\\ObjectStore\\S3',
+ 'arguments' =>
+ array (
+ 'bucket' => '<%= @s3_bucket %>',
+ 'key' => '<%= @s3_key %>',
+ 'secret' => '<%= @s3_secret %>',
+ 'region' => 'us-east-1',
+ 'hostname' => '<%= @s3_host %>',
+ 'port' => '',
+ 'objectPrefix' => 'urn:oid:',
+ 'autocreate' => false,
+ 'use_ssl' => true,
+ 'use_path_style' => true,
+ 'legacy_auth' => false,
+ ),
+ ),
+ 'csrf.disabled' => true,
+ 'passwordsalt' => '<%= @passwordsalt %>',
+ 'secret' => '<%= @secret %>',
+ 'trusted_domains' =>
+ array (
+ <%- index = 0 -%>
+ <%- @trusted_domains.each do |item| -%>
+ <%= index %> => '<%= item %>',<% index += 1 %>
+ <%- end -%>
+ ),
+ <%- unless @trusted_proxies.empty? -%>
+ 'trusted_proxies' =>
+ array (
+ <%- index = 0 -%>
+ <%- @trusted_proxies.each do |item| -%>
+ <%= index %> => '<%= item %>',
+ <%- index += 1 -%>
+ <%- end -%>
+ ),
+ <%- end -%>
+ 'config_is_read_only' => true,
+ 'datadirectory' => '/var/www/html/data',
+ 'dbhost' => '<%= @dbhost %>',
+ 'dbname' => 'nextcloud',
+ 'dbpassword' => '<%= @mysql_user_password %>',
+ 'dbport' => '3306',
+ 'dbtableprefix' => 'oc_',
+ 'dbtype' => 'mysql',
+ 'dbuser' => 'nextcloud',
+ 'drive_email_template_text_left' => '<%= @drive_email_template_text_left %>',
+ 'drive_email_template_plain_text_left' => '<%= @drive_email_template_plain_text_left %>',
+ 'drive_email_template_url_left' => '<%= @drive_email_template_url_left %>',
+ 'gs.enabled' => '<%= @gs_enabled %>',
+ 'gs.federation' => '<%= @gs_federation %>',
+ 'gs.trustedHosts' => ['*.sunet.se'],
+ 'gss.discovery.manual.mapping.file' => '/var/www/html/mappingfile.json',
+ 'gss.discovery.manual.mapping.parameter' => 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6',
+ 'gss.discovery.manual.mapping.regex' => true,
+ 'gss.jwt.key' => '<%= @gss_jwt_key %>',
+ 'gss.master.admin' =>
+ array (
+ <%- index = 0 -%>
+ <%- @gss_master_admin.each do |item| -%>
+ <%= index %> => '<%= item %>',
+ <%- index += 1 -%>
+ <%- end -%>
+ ),
+ 'gss.master.url' => '<%= @gss_master_url %>',
+ 'gss.mode' => '<%= @gss_mode %>',
+ 'gss.user.discovery.module' => '\\OCA\\GlobalSiteSelector\\UserDiscoveryModules\\ManualUserMapping',
+ 'installed' => true,
+ 'instanceid' => '<%= @instanceid %>',
+ 'log_type' => 'file',
+ 'loglevel' => 0,
+ 'lookup_server' => '<%= @lookup_server %>',
+ 'mail_domain' => '<%= @mail_domain %>',
+ 'mail_from_address' => '<%= @mail_from_address %>',
+ 'mail_sendmailmode' => 'smtp',
+ 'mail_smtpauth' => 1,
+ 'mail_smtpauthtype' => 'LOGIN',
+ 'mail_smtphost' => '<%= @mail_smtphost %>',
+ 'mail_smtpmode' => 'smtp',
+ 'mail_smtpname' => '<%= @smtpuser %>',
+ 'mail_smtppassword' => '<%= @smtppassword %>',
+ 'mail_smtpport' => '587',
+ 'mail_smtpsecure' => 'tls',
+ 'mail_template_class' => 'OCA\DriveEmailTemplate\EMailTemplate',
+ 'mysql.utf8mb4' => true,
+ 'overwritehost' => '<%= @site_name %>',
+ 'overwrite.cli.url' => 'https://<%= @site_name %>',
+ 'templatedirectory' => '',
+ 'skeletondirectory' => '<%= @skeletondirectory %>',
+ 'twofactor_enforced' => 'true',
+ 'twofactor_enforced_groups' =>
+ array (
+ 0 => 'admin',
+ <%- index = 1 -%>
+ <%- @twofactor_enforced_groups.each do |item| -%>
+ <%= index %> => '<%= item %>',
+ <%- index += 1 -%>
+ <%- end -%>
+ ),
+ 'twofactor_enforced_excluded_groups' =>
+ array (
+ <%- index = 0 -%>
+ <%- @twofactor_enforced_excluded_groups.each do |item| -%>
+ <%= index %> => '<%= item %>',
+ <%- index += 1 -%>
+ <%- end -%>
+ ),
+ 'version' => '<%= @nextcloud_version_string %>',
+ 'app_install_overwrite' =>
+ array (
+ 0 => 'globalsiteselector',
+ ),
+
+);
diff --git a/templates/application/cron.erb.sh b/templates/application/cron.erb.sh
new file mode 100755
index 0000000..bc67f0f
--- /dev/null
+++ b/templates/application/cron.erb.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+echo "$(date) - Start executing cron.sh"
+
+# These are configurable with positional args
+container=${1}
+if [[ -z ${container} ]]; then
+ container=nextcloud_app_1
+fi
+
+lock="/tmp/cron-${container}.lock"
+if [[ -f ${lock} ]]; then
+ echo "Lockfile exists, another instance of ${0} is running"
+ exit 0
+else
+ touch ${lock}
+fi
+
+/usr/bin/docker exec -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/cron.php
+
+echo "$(date) - Done executing cron.sh"
+rm ${lock}
+
diff --git a/templates/application/docker-compose_nextcloud.yml.erb b/templates/application/docker-compose_nextcloud.yml.erb
new file mode 100644
index 0000000..4773587
--- /dev/null
+++ b/templates/application/docker-compose_nextcloud.yml.erb
@@ -0,0 +1,42 @@
+version: '3.2'
+
+services:
+
+ app:
+ image: docker.sunet.se/drive/nextcloud-custom:<%= @nextcloud_version %>
+ restart: always
+ volumes:
+ - /opt/nextcloud/000-default.conf:/etc/apache2/sites-enabled/000-default.conf
+ - /opt/nextcloud/mpm_prefork.conf:/etc/apache2/mods-available/mpm_prefork.conf
+ - /opt/nextcloud/404.html:/var/www/html/404.html
+ - /opt/nextcloud/apache.php.ini:/etc/php/8.0/apache2/php.ini
+ - /opt/nextcloud/apcu.ini:/etc/php/8.0/mods-available/apcu.ini
+ - /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini
+ - /opt/nextcloud/complete_reinstall.sh:/complete_reinstall.sh
+ - /opt/nextcloud/config.php:/var/www/html/config/config.php
+ - /opt/nextcloud/nextcloud.log:/var/www/html/data/nextcloud.log
+ - /opt/nextcloud/rclone.conf:/rclone.conf
+<%- if @skeletondirectory -%>
+ - /opt/nextcloud/skeleton:<%= @skeletondirectory %>
+<%- end -%>
+<%- if @location =~ /^gss/ -%>
+ - /opt/nextcloud/mappingfile.json:/var/www/html/mappingfile.json
+<%- end -%>
+<% if @location =~ /^kau/ -%>
+ - /mnt:/opt/tmp/
+<%- end -%>
+ networks:
+ - default
+ - proxysql_proxysql
+ dns:
+ - 89.46.20.75
+ - 89.46.21.29
+ - 89.32.32.32
+ ports:
+ - 443:443
+ command: apachectl -D FOREGROUND
+ tty: true
+
+networks:
+ proxysql_proxysql:
+ external: true
diff --git a/templates/application/get_no_mysql_servers.erb.sh b/templates/application/get_no_mysql_servers.erb.sh
new file mode 100644
index 0000000..a62bd36
--- /dev/null
+++ b/templates/application/get_no_mysql_servers.erb.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+docker exec -ti proxysql_proxysql_1 mysql -NB -e "select count(*) FROM main.runtime_mysql_servers where hostgroup_id = 10" | tr -d '\r'
diff --git a/templates/application/mappingfile-prod.json.erb b/templates/application/mappingfile-prod.json.erb
new file mode 100644
index 0000000..2b0b4f4
--- /dev/null
+++ b/templates/application/mappingfile-prod.json.erb
@@ -0,0 +1,57 @@
+{
+ "/antagning.se$/": "antagning.drive.sunet.se",
+ "/bth.se$/": "bth.drive.sunet.se",
+ "/chalmers.se$/": "chalmers.drive.sunet.se",
+ "/du.se$/": "du.drive.sunet.se",
+ "/eduid.se$/": "extern.drive.sunet.se",
+ "/esh.se$/": "esh.drive.sunet.se",
+ "/fhs.se$/": "fhs.drive.sunet.se",
+ "/gih.se$/": "gih.drive.sunet.se",
+ "/gu.se$/": "gu.drive.sunet.se",
+ "/hb.se$/": "hb.drive.sunet.se",
+ "/shh.se$/": "shh.drive.sunet.se",
+ "/hh.se$/": "hh.drive.sunet.se",
+ "/hhs.se$/": "hhs.drive.sunet.se",
+ "/hig.se$/": "hig.drive.sunet.se",
+ "/his.se$/": "his.drive.sunet.se",
+ "/hj.se$/": "hj.drive.sunet.se",
+ "/hkr.se$/": "hkr.drive.sunet.se",
+ "/hv.se$/": "hv.drive.sunet.se",
+ "/irf.se$/": "irf.drive.sunet.se",
+ "/kb.se$/": "kb.drive.sunet.se",
+ "/ki.se$/": "ki.drive.sunet.se",
+ "/kkh.se$/": "kkh.drive.sunet.se",
+ "/kmh.se$/": "kmh.drive.sunet.se",
+ "/konstfack.se$/": "konstfack.drive.sunet.se",
+ "/kth.se$/": "kth.drive.sunet.se",
+ "/kva.se$/": "kva.drive.sunet.se",
+ "/liu.se$/": "liu.drive.sunet.se",
+ "/lnu.se$/": "lnu.drive.sunet.se",
+ "/ltu.se$/": "ltu.drive.sunet.se",
+ "/lu.se$/": "lu.drive.sunet.se",
+ "/mah.se$/": "mau.drive.sunet.se",
+ "/mau.se$/": "mau.drive.sunet.se",
+ "/mdh.se$/": "mdu.drive.sunet.se",
+ "/mdu.se$/": "mdu.drive.sunet.se",
+ "/miun.se$/": "miun.drive.sunet.se",
+ "/nordunet.se$/": "nordunet.drive.sunet.se",
+ "/nrm.se$/": "nrm.drive.sunet.se",
+ "/oru.se$/": "oru.drive.sunet.se",
+ "/rkh.se$/": "rkh.drive.sunet.se",
+ "/sics.se$/": "sics.drive.sunet.se",
+ "/slu.se$/": "slu.drive.sunet.se",
+ "/smhi.se$/": "smhi.drive.sunet.se",
+ "/sp.se$/": "sp.drive.sunet.se",
+ "/su.se$/": "su.drive.sunet.se",
+ "/sunet.se$/": "sunet.drive.sunet.se",
+ "/suni.se$/": "suni.drive.sunet.se",
+ "/swamid.se$/": "swamid.drive.sunet.se",
+ "/ths.se$/": "ths.drive.sunet.se",
+ "/uhr.se$/": "uhr.drive.sunet.se",
+ "/umu.se$/": "umu.drive.sunet.se",
+ "/uniarts.se$/": "uniarts.drive.sunet.se",
+ "/uu.se$/": "uu.drive.sunet.se",
+ "/vinnova.se$/": "vinnova.drive.sunet.se",
+ "/vr.se$/": "vr.drive.sunet.se",
+ "/$/": "extern.drive.sunet.se"
+}
diff --git a/templates/application/mappingfile-test.json.erb b/templates/application/mappingfile-test.json.erb
new file mode 100644
index 0000000..06b3d0d
--- /dev/null
+++ b/templates/application/mappingfile-test.json.erb
@@ -0,0 +1,57 @@
+{
+ "/antagning.se$/": "antagning.drive.test.sunet.se",
+ "/bth.se$/": "bth.drive.test.sunet.se",
+ "/chalmers.se$/": "chalmers.drive.test.sunet.se",
+ "/du.se$/": "du.drive.test.sunet.se",
+ "/eduid.se$/": "extern.drive.test.sunet.se",
+ "/esh.se$/": "esh.drive.test.sunet.se",
+ "/fhs.se$/": "fhs.drive.test.sunet.se",
+ "/gih.se$/": "gih.drive.test.sunet.se",
+ "/gu.se$/": "gu.drive.test.sunet.se",
+ "/hb.se$/": "hb.drive.test.sunet.se",
+ "/shh.se$/": "shh.drive.test.sunet.se",
+ "/hh.se$/": "hh.drive.test.sunet.se",
+ "/hhs.se$/": "hhs.drive.test.sunet.se",
+ "/hig.se$/": "hig.drive.test.sunet.se",
+ "/his.se$/": "his.drive.test.sunet.se",
+ "/hj.se$/": "hj.drive.test.sunet.se",
+ "/hkr.se$/": "hkr.drive.test.sunet.se",
+ "/hv.se$/": "hv.drive.test.sunet.se",
+ "/irf.se$/": "irf.drive.test.sunet.se",
+ "/kb.se$/": "kb.drive.test.sunet.se",
+ "/ki.se$/": "ki.drive.test.sunet.se",
+ "/kkh.se$/": "kkh.drive.test.sunet.se",
+ "/kmh.se$/": "kmh.drive.test.sunet.se",
+ "/konstfack.se$/": "konstfack.drive.test.sunet.se",
+ "/kth.se$/": "kth.drive.test.sunet.se",
+ "/kva.se$/": "kva.drive.test.sunet.se",
+ "/liu.se$/": "liu.drive.test.sunet.se",
+ "/lnu.se$/": "lnu.drive.test.sunet.se",
+ "/ltu.se$/": "ltu.drive.test.sunet.se",
+ "/lu.se$/": "lu.drive.test.sunet.se",
+ "/mah.se$/": "mau.drive.test.sunet.se",
+ "/mau.se$/": "mau.drive.test.sunet.se",
+ "/mdh.se$/": "mdu.drive.test.sunet.se",
+ "/mdu.se$/": "mdu.drive.test.sunet.se",
+ "/miun.se$/": "miun.drive.test.sunet.se",
+ "/nordunet.se$/": "nordunet.drive.test.sunet.se",
+ "/nrm.se$/": "nrm.drive.test.sunet.se",
+ "/oru.se$/": "oru.drive.test.sunet.se",
+ "/rkh.se$/": "rkh.drive.test.sunet.se",
+ "/sics.se$/": "sics.drive.test.sunet.se",
+ "/slu.se$/": "slu.drive.test.sunet.se",
+ "/smhi.se$/": "smhi.drive.test.sunet.se",
+ "/sp.se$/": "sp.drive.test.sunet.se",
+ "/su.se$/": "su.drive.test.sunet.se",
+ "/sunet.se$/": "sunet.drive.test.sunet.se",
+ "/suni.se$/": "suni.drive.test.sunet.se",
+ "/swamid.se$/": "swamid.drive.test.sunet.se",
+ "/ths.se$/": "ths.drive.test.sunet.se",
+ "/uhr.se$/": "uhr.drive.test.sunet.se",
+ "/umu.se$/": "umu.drive.test.sunet.se",
+ "/uniarts.se$/": "uniarts.drive.test.sunet.se",
+ "/uu.se$/": "uu.drive.test.sunet.se",
+ "/vinnova.se$/": "vinnova.drive.test.sunet.se",
+ "/vr.se$/": "vr.drive.test.sunet.se",
+ "/$/": "extern.drive.test.sunet.se"
+}
diff --git a/templates/application/migrate_external_mounts.erb b/templates/application/migrate_external_mounts.erb
new file mode 100644
index 0000000..de4caa6
--- /dev/null
+++ b/templates/application/migrate_external_mounts.erb
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+keymapping=${1}
+
+if ! [[ -f ${keymapping} ]]; then
+ echo "We need a valid keymapping file to proceed"
+ exit
+fi
+
+function get_secrets {
+# Expects a space separated file with oldkey newkey newsecret
+ grep ${1} ${keymapping} | awk '{print $2, $3}'
+}
+
+for line in $(docker exec -u www-data nextcloud_app_1 /var/www/html/occ files_external:list --all --output json | jq -r '.[] | "\(.mount_id);\(.configuration.key)"'); do
+ id=$(echo ${line} | awk -F ';' '{print $1}')
+ key=$(echo ${line} | awk -F ';' '{print $2}')
+ if [[ "x${key}" == "x" ]] || [[ "${id}" == "x" ]]; then
+ echo "Old key or mount id is empty, bailing out."
+ exit 1
+ fi
+ secrets="$(get_secrets ${key})"
+ newkey="$(echo ${secrets} | awk '{print $1}')"
+ secret="$(echo ${secrets} | awk '{print $2}')"
+ if [[ "x${newkey}" == "x" ]] || [[ "x${secret}" == "x" ]]; then
+ echo "New key or secret is empty, skipping mount id ${id}."
+ continue
+ fi
+
+ docker exec -u www-data nextcloud_app_1 /var/www/html/occ files_external:config ${id} region us-east-1
+ docker exec -u www-data nextcloud_app_1 /var/www/html/occ files_external:config ${id} hostname s3.sto4.safedc.net
+ docker exec -u www-data nextcloud_app_1 /var/www/html/occ files_external:config ${id} key ${newkey}
+ docker exec -u www-data nextcloud_app_1 /var/www/html/occ files_external:config ${id} secret ${secret}
+done
diff --git a/templates/application/mpm_prefork.conf.erb b/templates/application/mpm_prefork.conf.erb
new file mode 100644
index 0000000..b5d309b
--- /dev/null
+++ b/templates/application/mpm_prefork.conf.erb
@@ -0,0 +1,17 @@
+# prefork MPM
+# StartServers: number of server processes to start
+# MinSpareServers: minimum number of server processes which are kept spare
+# MaxSpareServers: maximum number of server processes which are kept spare
+# MaxRequestWorkers: maximum number of server processes allowed to start
+# MaxConnectionsPerChild: maximum number of requests a server process serves
+
+
+ StartServers 5
+ MinSpareServers 5
+ MaxSpareServers 10
+ MaxRequestWorkers 2048
+ MaxConnectionsPerChild 0
+ ServerLimit 2048
+
+
+# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
diff --git a/templates/application/occ.erb b/templates/application/occ.erb
new file mode 100755
index 0000000..87eb268
--- /dev/null
+++ b/templates/application/occ.erb
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+if [[ "${1}" =~ ^nextcloud ]]; then
+ container=${1}
+ shift
+else
+ container="nextcloud_app_1"
+fi
+
+oc_list=$(env| grep 'OC_')
+if [[ "x${oc_list}" != "x" ]]; then
+ for row in $(echo "${oc_list}"); do
+ MY_VARS="${MY_VARS} -e ${row}"
+ done
+fi
+
+docker exec -ti ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
diff --git a/templates/application/rclone.conf.erb b/templates/application/rclone.conf.erb
new file mode 100644
index 0000000..1c1e5b4
--- /dev/null
+++ b/templates/application/rclone.conf.erb
@@ -0,0 +1,8 @@
+[<%= @location %>]
+type = s3
+provider = Ceph
+access_key_id = <%= @s3_key %>
+secret_access_key = <%= @s3_secret %>
+endpoint = <%= @s3_host %>
+acl = private
+
diff --git a/templates/application/redis-cli.erb b/templates/application/redis-cli.erb
new file mode 100644
index 0000000..3a18fd1
--- /dev/null
+++ b/templates/application/redis-cli.erb
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+docker exec -ti redis_redis-server_1 redis-cli -a <%= @redis_host_password %> ${@}
diff --git a/templates/application/upgrade23-25.erb.sh b/templates/application/upgrade23-25.erb.sh
new file mode 100644
index 0000000..c1b8ecb
--- /dev/null
+++ b/templates/application/upgrade23-25.erb.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+touch /etc/no-automatic-cosmos
+for version in 24.0.9.3-1 25.0.3.3-4; do
+ docker pull docker.sunet.se/drive/nextcloud-custom:${version}
+done
+
+function block_for_container {
+ while ! [[ "$( docker container inspect -f '{{.State.Running}}' "${1}" )" == "true" ]]; do
+ echo "Waiting for ${1}"
+ sleep 1s
+ done
+}
+container="nextcloud_app_1"
+sed -i "s/ 'version' => '.*',/ 'version' => '24.0.9.3',/" "/opt/nextcloud/config.php"
+sed -i "s/ 'config_is_read_only' => true,/ 'config_is_read_only' => false,/" "/opt/nextcloud/config.php"
+sed -i 's#docker.sunet.se/drive/nextcloud-custom:.*#docker.sunet.se/drive/nextcloud-custom:24.0.9.3-1#' "/opt/nextcloud/docker-compose.yml"
+systemctl restart "sunet-nextcloud"
+sleep 10s
+block_for_container ${container}
+occ upgrade && occ db:add-missing-columns && occ db:add-missing-indices && occ db:add-missing-primary-keys
+sed -i 's#docker.sunet.se/drive/nextcloud-custom:.*#docker.sunet.se/drive/nextcloud-custom:25.0.3.3-4#' "/opt/nextcloud/docker-compose.yml"
+systemctl restart "sunet-nextcloud"
+sleep 10s
+block_for_container ${container}
+occ upgrade && occ db:add-missing-columns && occ db:add-missing-indices && occ db:add-missing-primary-keys && occ maintenance:repair
+rm /etc/no-automatic-cosmos
diff --git a/templates/application/user-sync.erb.sh b/templates/application/user-sync.erb.sh
new file mode 100644
index 0000000..e2f4ab1
--- /dev/null
+++ b/templates/application/user-sync.erb.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+/usr/local/bin/occ globalsiteselector:users:update
+/usr/local/bin/occ federation:sync-addressbooks
diff --git a/templates/cache/docker-compose_cache.yml.erb b/templates/cache/docker-compose_cache.yml.erb
new file mode 100644
index 0000000..e18dbf4
--- /dev/null
+++ b/templates/cache/docker-compose_cache.yml.erb
@@ -0,0 +1,29 @@
+version: '3.2'
+
+services:
+
+ redis-server:
+ image: redis:alpine
+ dns:
+ - 89.46.20.75
+ - 89.46.21.29
+ - 89.32.32.32
+ ports:
+ - "6379:6379"
+ volumes:
+ - /opt/redis/server:/data
+ command: redis-server /data/server.conf --loglevel verbose
+ restart: always
+
+ redis-sentinel:
+ image: redis:alpine
+ dns:
+ - 89.46.20.75
+ - 89.46.21.29
+ - 89.32.32.32
+ ports:
+ - "26379:26379"
+ volumes:
+ - /opt/redis/sentinel:/data
+ command: redis-server /data/sentinel.conf --sentinel
+ restart: always
diff --git a/templates/cache/sentinel.conf.erb b/templates/cache/sentinel.conf.erb
new file mode 100644
index 0000000..e07ada0
--- /dev/null
+++ b/templates/cache/sentinel.conf.erb
@@ -0,0 +1,20 @@
+port 26379
+daemonize no
+logfile sentinel.log
+sentinel deny-scripts-reconfig yes
+sentinel monitor <%= @leader_name %> <%= @leader_address %> 6379 2
+<% if @location == 'sunet-prod' %>
+sentinel down-after-milliseconds <%= @leader_name %> 10000
+sentinel failover-timeout <%= @leader_name %> 60000
+<% else %>
+sentinel down-after-milliseconds <%= @leader_name %> 1000
+sentinel failover-timeout <%= @leader_name %> 1000
+<% end %>
+sentinel auth-pass <%= @leader_name %> <%= @redis_host_password %>
+sentinel config-epoch <%= @leader_name %> 1
+sentinel leader-epoch <%= @leader_name %> 1
+protected-mode yes
+requirepass "<%= @redis_host_password %>"
+sentinel current-epoch 1
+sentinel announce-ip <%= @announce_address %>
+sentinel announce-port 26379
diff --git a/templates/cache/server.conf.erb b/templates/cache/server.conf.erb
new file mode 100644
index 0000000..a064708
--- /dev/null
+++ b/templates/cache/server.conf.erb
@@ -0,0 +1,65 @@
+protected-mode yes
+port 6379
+tcp-backlog 511
+timeout 0
+tcp-keepalive 300
+daemonize no
+logfile server.log
+databases 16
+always-show-logo yes
+save 900 1
+save 300 10
+save 60 10000
+stop-writes-on-bgsave-error yes
+<% if @location == 'sunet-prod' %>
+rdbcompression no
+appendonly yes
+<% else %>
+rdbcompression yes
+appendonly no
+<% end %>
+rdbchecksum yes
+dbfilename "dump.rdb"
+masterauth "<%= @redis_host_password %>"
+replica-serve-stale-data yes
+replica-read-only yes
+repl-diskless-sync no
+repl-diskless-sync-delay 5
+repl-disable-tcp-nodelay no
+replica-priority 100
+requirepass "<%= @redis_host_password %>"
+lazyfree-lazy-eviction no
+lazyfree-lazy-expire no
+lazyfree-lazy-server-del no
+replica-lazy-flush no
+appendfilename "appendonly.aof"
+appendfsync everysec
+no-appendfsync-on-rewrite no
+auto-aof-rewrite-percentage 100
+auto-aof-rewrite-min-size 64mb
+aof-load-truncated yes
+aof-use-rdb-preamble yes
+lua-time-limit 5000
+slowlog-log-slower-than 10000
+slowlog-max-len 128
+latency-monitor-threshold 0
+notify-keyspace-events ""
+hash-max-ziplist-entries 512
+hash-max-ziplist-value 64
+list-max-ziplist-size -2
+list-compress-depth 0
+set-max-intset-entries 512
+zset-max-ziplist-entries 128
+zset-max-ziplist-value 64
+hll-sparse-max-bytes 3000
+stream-node-max-bytes 4096
+stream-node-max-entries 100
+activerehashing yes
+client-output-buffer-limit normal 0 0 0
+client-output-buffer-limit replica 256mb 64mb 60
+client-output-buffer-limit pubsub 32mb 8mb 60
+hz 10
+dynamic-hz yes
+aof-rewrite-incremental-fsync yes
+rdb-save-incremental-fsync yes
+<%= @replica_of %>
diff --git a/templates/document/coolwsd.xml.erb b/templates/document/coolwsd.xml.erb
new file mode 100644
index 0000000..8859088
--- /dev/null
+++ b/templates/document/coolwsd.xml.erb
@@ -0,0 +1,253 @@
+
+
+
+
+
+
+
+
+
+ sv_SE de_DE en_GB en_US es_ES fr_FR it nl pt_BR pt_PT ru
+
+
+
+
+
+
+
+
+ false
+
+ 10
+
+ 4
+ 5
+
+ false
+ 96
+ 3600
+
+
+ 30
+ 300
+ false
+ 0
+ 8000
+ 0
+ 0
+ 100
+ 5
+ 100
+
+ 10000
+ 60
+ 300
+ 3072
+ 85
+ 120
+
+
+
+
+ false
+ 120
+ 900
+
+
+
+
+
+ true
+
+ warning
+ notice
+ fatal
+ false
+
+ -INFO-WARN
+
+
+ /var/log/coolwsd.log
+ never
+ timestamp
+ true
+ 10 days
+ 10
+ true
+ false
+
+
+ false
+ 82589933
+
+ false
+
+
+
+
+ /var/log/coolwsd.trace.json
+
+
+ false
+
+
+
+
+
+
+
+ false
+
+
+
+
+
+ all
+ any
+
+
+
+ 192\.168\.[0-9]{1,3}\.[0-9]{1,3}
+ ::ffff:192\.168\.[0-9]{1,3}\.[0-9]{1,3}
+ 127\.0\.0\.1
+ ::ffff:127\.0\.0\.1
+ ::1
+ 172\.1[6789]\.[0-9]{1,3}\.[0-9]{1,3}
+ ::ffff:172\.1[6789]\.[0-9]{1,3}\.[0-9]{1,3}
+ 172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3}
+ ::ffff:172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3}
+ 172\.3[01]\.[0-9]{1,3}\.[0-9]{1,3}
+ ::ffff:172\.3[01]\.[0-9]{1,3}\.[0-9]{1,3}
+ 10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}
+ ::ffff:10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}
+
+ https://*.sunet.se
+
+
+
+
+
+
+
+
+ true
+
+ false
+ /etc/coolwsd/cert.pem
+ /etc/coolwsd/key.pem
+ /etc/coolwsd/ca-chain.cert.pem
+
+
+ 1000
+
+
+
+
+
+
+ false
+ 31536000
+
+
+
+
+ true
+ true
+ 1800
+ false
+ 1
+ false
+
+
+
+
+
+
+
+
+
+
+ false
+
+
+ default
+ true
+
+
+
+
+
+ 0
+
+ 900
+
+
+
+
+ <% @customers.each do |customer| -%>
+
+ https://<%= customer %>.<%= @domain %>:443
+ https://node[1-3].<%= customer %>.<%= @domain %>:443
+
+ <% end -%>
+ <% @multinode_customers.each do |customer| -%>
+
+ https://<%= customer %>.<%= @domain %>:443
+ https://multinode[1-3].<%= @domain %>:443
+
+ <% end -%>
+
+
+
+
+ true
+
+
+
+
+
+
+
+
+ true
+
+
+ true
+ false
+ admin
+ <%= @collabora_admin_password %>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/templates/lb/503.http.erb b/templates/lb/503.http.erb
new file mode 100644
index 0000000..2f5f1dd
--- /dev/null
+++ b/templates/lb/503.http.erb
@@ -0,0 +1,59 @@
+HTTP/1.1 503 Not Found
+Cache-Control: no-cache
+Connection: close
+Content-Type: text/html
+
+
+
+
+
+
+
+
+ 503
+
+
This service is currently unavailable.
+
+
+ Something must have died on us.
+
+
+
diff --git a/templates/lb/check_exabgp_announce.erb b/templates/lb/check_exabgp_announce.erb
new file mode 100755
index 0000000..49c1a19
--- /dev/null
+++ b/templates/lb/check_exabgp_announce.erb
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+OK=0
+WARNING=1
+CRITICAL=2
+UNKNOWN=3
+
+
+function show_help {
+echo "Usage: ${0} -w -c "
+ echo "Example: ${0} -w 1 -c 10"
+}
+
+warning=1
+critical=2
+output="OK: "
+
+OPTIND=1
+while getopts "w:c:" opt; do
+ case "$opt" in
+ w) warning=${OPTARG}
+ ;;
+ c) critical=${OPTARG}
+ ;;
+ *)
+ show_help
+ exit 0
+ ;;
+ esac
+done
+
+status=${OK}
+all="$(cat /opt/frontend/monitor/*/announce)"
+withdraw=$(echo "${all}" | grep withdraw)
+num_withdraw=$(echo "${all}" | grep withdraw | wc -l)
+num_total=$(echo "${all}" | wc -l)
+percent=$( echo "scale=1;( ( ${num_withdraw} + 0.05 ) / ${num_total} ) * 100 " | bc | sed 's/\.[0-9]//')
+
+if [[ ${percent} -gt ${warning} ]]; then
+ status=${WARNING}
+ output="WARNING: "
+fi
+if [[ ${percent} -gt ${critical} ]]; then
+ status=${CRITICAL}
+ output="CRITICAL: "
+fi
+
+output="${output}Total routes: ${num_total}"
+
+if [[ ${status} != 0 ]]; then
+ output="${output}, number of withdrawn: ${num_withdraw}. There is a problem with the following routes "
+ for ip in $(echo "${withdraw}" | awk '{print $3}'); do
+ output="${output} ${ip}"
+ done
+fi
+
+echo "${output} | withdraw=${percent}%;${warning};${critical};"
+
+exit ${status}
diff --git a/templates/lb/check_sarimner.erb b/templates/lb/check_sarimner.erb
new file mode 100644
index 0000000..1f53d9a
--- /dev/null
+++ b/templates/lb/check_sarimner.erb
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+problems=""
+num=0
+status=0
+output="OK: all systems nominal"
+for i in $(ls /etc/systemd/system/frontend*.service | sed -e 's_.*/frontend-__' -e 's/.service//'); do
+ sudo docker logs ${i}_haproxy_1 | grep 'Device "sarimner0" does not exist.' > /dev/null
+ if [[ ${?} -eq 0 ]]; then
+ num=$(( num + 1))
+ problems="${i} "
+ fi
+done
+
+if [[ num -ne 0 ]]; then
+ output="CRITICAL: problems with: ${problems}"
+ status=2
+fi
+
+echo "${output}|problems=${num};1;1"
+exit ${status}
diff --git a/templates/lookup/config.php.erb b/templates/lookup/config.php.erb
new file mode 100644
index 0000000..ce464dd
--- /dev/null
+++ b/templates/lookup/config.php.erb
@@ -0,0 +1,16 @@
+
+ [
+ 'host' => "<%= @dbhost %>",
+ 'db' => "lookup" ,
+ 'user' => "lookup",
+ 'pass' => "<%= @mysql_user_password %>",
+ ],
+
+ 'GLOBAL_SCALE' => true,
+
+ 'AUTH_KEY' => "<%= @gss_jwt_key %>",
+];
\ No newline at end of file
diff --git a/templates/lookup/docker-compose_lookup.yml.erb b/templates/lookup/docker-compose_lookup.yml.erb
new file mode 100644
index 0000000..20e0f68
--- /dev/null
+++ b/templates/lookup/docker-compose_lookup.yml.erb
@@ -0,0 +1,24 @@
+version: '3.2'
+
+services:
+
+ app:
+ image: docker.sunet.se/drive/nextcloud-lookup:<%= @lookup_version %>
+ restart: always
+ volumes:
+ - /opt/lookup/config.php:/var/www/html/config/config.php
+ networks:
+ - default
+ - proxysql_proxysql
+ dns:
+ - 89.46.20.75
+ - 89.46.21.29
+ - 89.32.32.32
+ ports:
+ - 443:443
+ command: apache2-foreground
+ tty: true
+
+networks:
+ proxysql_proxysql:
+ external: true
diff --git a/templates/mariadb/02-backup_user.sql.erb b/templates/mariadb/02-backup_user.sql.erb
new file mode 100644
index 0000000..898dd70
--- /dev/null
+++ b/templates/mariadb/02-backup_user.sql.erb
@@ -0,0 +1,2 @@
+CREATE USER 'backup'@'%' IDENTIFIED BY '<%= @backup_password %>';
+GRANT SELECT, INSERT, CREATE, RELOAD, PROCESS, SUPER, LOCK TABLES, REPLICATION SLAVE, REPLICATION CLIENT, SHOW VIEW, EVENT, CREATE TABLESPACE ON *.* TO 'backup'@'%';
diff --git a/templates/mariadb/03-proxysql.sql.erb b/templates/mariadb/03-proxysql.sql.erb
new file mode 100644
index 0000000..e495094
--- /dev/null
+++ b/templates/mariadb/03-proxysql.sql.erb
@@ -0,0 +1,2 @@
+CREATE USER 'proxysql'@'%' IDENTIFIED BY '<%= @proxysql_password %>';
+GRANT USAGE ON *.* TO 'proxysql'@'%';
\ No newline at end of file
diff --git a/templates/mariadb/04-nextcloud.sql.erb b/templates/mariadb/04-nextcloud.sql.erb
new file mode 100644
index 0000000..ec2aca6
--- /dev/null
+++ b/templates/mariadb/04-nextcloud.sql.erb
@@ -0,0 +1,3 @@
+CREATE SCHEMA nextcloud;
+CREATE USER 'nextcloud'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
+GRANT ALL PRIVILEGES ON nextcloud.* TO 'nextcloud'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
\ No newline at end of file
diff --git a/templates/mariadb/05-lookup.sql.erb b/templates/mariadb/05-lookup.sql.erb
new file mode 100644
index 0000000..577c0e9
--- /dev/null
+++ b/templates/mariadb/05-lookup.sql.erb
@@ -0,0 +1,62 @@
+SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
+SET time_zone = "+00:00";
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8mb4 */;
+
+CREATE DATABASE IF NOT EXISTS `lookup` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
+USE `lookup`;
+
+CREATE USER 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
+GRANT ALL PRIVILEGES ON lookup.* TO 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
+
+
+DROP TABLE IF EXISTS `emailValidation`;
+CREATE TABLE IF NOT EXISTS `emailValidation` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `storeId` int(11) NOT NULL,
+ `token` varchar(16) COLLATE utf8mb4_unicode_ci NOT NULL,
+ PRIMARY KEY (`id`),
+ KEY `token` (`token`),
+ KEY `storeId` (`storeId`)
+) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
+
+DROP TABLE IF EXISTS `store`;
+CREATE TABLE IF NOT EXISTS `store` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `userId` int(11) NOT NULL,
+ `k` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
+ `v` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
+ `valid` tinyint(1) NOT NULL DEFAULT '0',
+ PRIMARY KEY (`id`),
+ KEY `key` (`k`(191)),
+ KEY `value` (`v`(191)),
+ KEY `userId` (`userId`)
+) ENGINE=InnoDB AUTO_INCREMENT=51 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
+
+DROP TABLE IF EXISTS `users`;
+CREATE TABLE IF NOT EXISTS `users` (
+ `id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT,
+ `federationId` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
+ `timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+ PRIMARY KEY (`id`),
+ KEY `federationId` (`federationId`(191))
+) ENGINE=InnoDB AUTO_INCREMENT=15 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
+
+DROP TABLE IF EXISTS `toVerify`;
+CREATE TABLE IF NOT EXISTS `toVerify` (
+ `id` int(11) NOT NULL AUTO_INCREMENT,
+ `userId` int(11) NOT NULL,
+ `storeId` int(11) NOT NULL,
+ `property` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL,
+ `location` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
+ `tries` int(11) NOT NULL,
+ PRIMARY KEY (`id`)
+) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
+
+
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
diff --git a/templates/mariadb/check_galera_cluster.erb b/templates/mariadb/check_galera_cluster.erb
new file mode 100644
index 0000000..697060c
--- /dev/null
+++ b/templates/mariadb/check_galera_cluster.erb
@@ -0,0 +1,303 @@
+#!/bin/bash
+
+#It is a changed version of the original https://github.com/fridim/nagios-plugin-check_galera_cluster/blob/master/check_galera_cluster
+
+
+PROGNAME=`basename $0`
+VERSION="Version 1.1.5"
+AUTHOR="Guillaume Coré , Ales Nosek , Staf Wagemakers , Claudio Kuenzler "
+
+ST_OK=0
+ST_WR=1
+ST_CR=2
+ST_UK=3
+
+warnAlerts=0
+critAlerts=0
+unknAlerts=0
+
+warnText=""
+critText=""
+
+print_version() {
+ echo "$VERSION $AUTHOR"
+}
+
+print_help() {
+ print_version $PROGNAME $VERSION
+ echo ""
+ echo "$PROGNAME is a monitoring plugin to monitor Galera cluster status."
+ echo ""
+ echo "$PROGNAME [-u USER] [-p PASSWORD] [-H HOST] [-P PORT] [-m file] [-w SIZE] [-c SIZE] [-s statefile] [-0]"
+ echo ""
+ echo "Options:"
+ echo " u)"
+ echo " MySQL user."
+ echo " p)"
+ echo " MySQL password."
+ echo " H)"
+ echo " MySQL host."
+ echo " P)"
+ echo " MySQL port."
+ echo " m)"
+ echo " MySQL extra my.cnf configuration file."
+ echo " w)"
+ echo " Sets minimum number of nodes in the cluster when WARNING is raised. (default is same as critical)."
+ echo " c)"
+ echo " Sets minimum number of nodes in the cluster when CRITICAL is raised. (default is 2)."
+ #echo " f)"
+ #echo " Sets critical value of wsrep_flow_control_paused (default is 0.1)."
+ echo " 0)"
+ echo " Rise CRITICAL if the node is not primary"
+ echo " s)"
+ echo " Create state file, detect disconnected nodes"
+ exit $ST_UK
+}
+
+# default values
+crit=2
+#fcp=0.1
+
+check_executable() {
+ if [ -z "$1" ]; then
+ echo "check_executable: no parameter given!"
+ exit $ST_UK
+ fi
+
+ if ! command -v "$1" &>/dev/null; then
+ echo "UNKNOWN: Cannot find $1"
+ exit $ST_UK
+ fi
+}
+
+check_executable docker
+check_executable bc
+
+while getopts “hvu:p:H:P:w:c:m:s:0” OPTION; do
+ case $OPTION in
+ h)
+ print_help
+ exit $ST_UK
+ ;;
+ v)
+ print_version $PROGNAME $VERSION
+ exit $ST_UK
+ ;;
+ u)
+ mysqluser=$OPTARG
+ ;;
+ p)
+ password=$OPTARG
+ ;;
+ H)
+ mysqlhost=$OPTARG
+ ;;
+ P)
+ port=$OPTARG
+ ;;
+ m)
+ myconfig=$OPTARG
+ ;;
+ w)
+ warn=$OPTARG
+ ;;
+ c)
+ crit=$OPTARG
+ ;;
+ 0)
+ primary='TRUE'
+ ;;
+ s)
+ stateFile=$OPTARG
+ ;;
+ ?)
+ echo "Unknown argument: $1"
+ print_help
+ exit $ST_UK
+ ;;
+ esac
+done
+
+if [ -z "$warn" ]; then
+ warn=$crit
+fi
+
+create_param() {
+ if [ -n "$2" ]; then
+ echo $1$2
+ fi
+}
+
+param_mysqlhost=$(create_param -h "$mysqlhost")
+param_port=$(create_param -P "$port")
+param_mysqluser=$(create_param -u "$mysqluser")
+param_password=$(create_param -p "$password")
+param_configfile=$(create_param --defaults-extra-file= "$myconfig")
+export MYSQL_PWD=$password
+
+param_mysql="$param_mysqlhost $param_port $param_mysqluser $param_password $param_configfile"
+
+#
+# verify the database connection
+#
+
+/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e '\s;' >/dev/null 2>&1 || {
+ echo "CRITICAL: mysql connection check failed"
+ exit $ST_CR
+}
+
+#
+# retrieve the mysql status
+#
+
+#rMysqlStatus=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "show status like 'wsrep_%';")
+
+#
+# verify that the node is part of a cluster
+#
+
+rClusterStateUuid=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_cluster_state_uuid'")
+
+if [ -z $rClusterStateUuid ]; then
+ echo "CRITICAL: node is not part of a cluster."
+ exit $ST_CR
+fi
+
+#rFlowControl=$(echo "$rMysqlStatus" | awk '/wsrep_flow_control_paused\t/ {print $2}') # < 0.1
+#rFlowControl=$(printf "%.14f" $rFlowControl) # issue #4
+
+
+rClusterSize=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_cluster_size'")
+
+
+rClusterStatus=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_cluster_status'") # Primary
+
+
+rReady=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_ready'") # ON
+
+
+rConnected=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_connected'") # ON
+
+
+rLocalStateComment=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_state_comment'") # Synced
+
+#rIncommingAddresses=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_incoming_addresses'")
+
+rSSTMethod=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_VARIABLES where VARIABLE_NAME = 'wsrep_sst_method'") # mariabackup
+
+
+#if [ -z "$rFlowControl" ]; then
+# echo "UNKNOWN: wsrep_flow_control_paused is empty"
+# unknAlerts=$(($unknAlerts+1))
+#fi
+
+#if [ $(echo "$rFlowControl > $fcp" | bc) = 1 ]; then
+# echo "CRITICAL: wsrep_flow_control_paused is > $fcp"
+# critAlerts=$(($criticalAlerts+1))
+#fi
+
+if [ "$primary" = 'TRUE' ]; then
+ if [ $rClusterStatus != 'Primary' ]; then
+ critText+="CRITICAL: node is not primary (wsrep_cluster_status). "
+ critAlerts=$(($criticalAlerts+1))
+ fi
+fi
+
+if [ $rReady != 'ON' ]; then
+ critText+="CRITICAL: node is not ready (wsrep_ready). "
+ critAlerts=$(($criticalAlerts+1))
+fi
+
+if [ $rConnected != 'ON' ]; then
+ critText+="CRITICAL: node is not connected (wsrep_connected). "
+ critAlerts=$(($criticalAlerts+1))
+fi
+
+if [ $rLocalStateComment != 'Synced' ]; then
+ critText+="CRITICAL: node is not synced - actual state is: $rLocalStateComment (wsrep_local_state_comment). "
+ critAlerts=$(($criticalAlerts+1))
+fi
+
+if [ $rSSTMethod != 'mariabackup' ]; then
+ critText+="CRITICAL: node is not backed up - actual state is: $rSSTMethod (wsrep_sst_method). "
+ critAlerts=$(($criticalAlerts+1))
+fi
+
+if [ $rClusterSize -gt $warn ]; then
+ # only display the ok message if the state check not enabled
+ if [ -z "$stateFile" ]; then
+ echo "OK: number of NODES = $rClusterSize"
+ fi
+elif [ $rClusterSize -le $crit ]; then
+ critText+="CRITICAL: number of NODES = $rClusterSize. "
+ critAlerts=$(($criticalAlerts+1))
+elif [ $rClusterSize -le $warn ]; then
+ warnText+="WARNING: number of NODES = $rClusterSize."
+ warnAlerts=$(($warnAlerts+1))
+ else
+ exit $ST_UK
+fi
+
+#
+# detect is the connection is lost automatically
+#
+
+if [ ! -z "$stateFile" ]; then
+
+ touch $stateFile
+
+ if [ $? != "0" ]; then
+
+ echo "UNKNOWN: stateFile \"$stateFile\" is not writeable"
+ unknAlerts=$(($unknAlerts+1))
+
+ else
+
+ if [ $rConnected = "ON" ]; then
+ # get the current connected Nodes
+ currentNodes=$(echo $rIncommingAddresses} | tr "," "\n" | sort -u)
+ if [ -f "$stateFile" ]; then
+ # get the nodes added to the cluster
+ newNodes=$(echo $currentNodes | tr " " "\n" | comm -2 -3 - $stateFile)
+ # get the nodes that were removed from the cluster
+ missingNodes=$(echo $currentNodes | tr " " "\n" | comm -1 -3 - $stateFile)
+ if [ ! -z "$newNodes" ]; then
+ # add the new nodes to the cluster to the state file
+ echo $newNodes | tr " " "\n" >> $stateFile
+ fi
+ else
+ # there is no state file yet, creating new one.
+ echo $currentNodes | tr " " "\n" > $stateFile
+ fi # -f stateFile
+ # get the numeber of nodes that were part of the cluster before
+ maxClusterSize=$(cat $stateFile | wc -l)
+
+ if [ $maxClusterSize -eq $rClusterSize ]; then
+ if [ $maxClusterSize -eq 1 ]; then
+ if [ $crit -eq 0 -a $warn -eq 0 ]; then
+ echo "OK: running single-node database cluster"
+ fi
+ else
+ echo "OK: running redundant $rClusterSize online / $maxClusterSize total"
+ fi
+ else
+ echo "WARNING: redundant $rClusterSize online / $maxClusterSize total, missing peers: $missingNodes"
+ warnAlerts=$(($warnAlerts+1))
+ fi
+
+ fi # rConnected
+
+ fi # -w stateFile
+
+fi # -z stateFile
+
+
+#
+# exit
+#
+
+[ "$critAlerts" -gt "0" ] && echo $critText && exit $ST_CR
+[ "$unknAlerts" -gt "0" ] && exit $ST_UK
+[ "$warnAlerts" -gt "0" ] && echo $warnText && exit $ST_WR
+
+exit 0
diff --git a/templates/mariadb/credentials.cnf.erb b/templates/mariadb/credentials.cnf.erb
new file mode 100644
index 0000000..8bb5db7
--- /dev/null
+++ b/templates/mariadb/credentials.cnf.erb
@@ -0,0 +1,14 @@
+[mysqld]
+wsrep_sst_auth = "backup:<%= @backup_password %>"
+
+[mariabackup]
+user = backup
+password = '<%= @backup_password %>'
+
+[mysqldump]
+user = backup
+password = '<%= @backup_password %>'
+
+[mysql_backup]
+user = backup
+password = '<%= @backup_password %>'
\ No newline at end of file
diff --git a/templates/mariadb/docker-compose_mariadb.yml.erb b/templates/mariadb/docker-compose_mariadb.yml.erb
new file mode 100644
index 0000000..4582608
--- /dev/null
+++ b/templates/mariadb/docker-compose_mariadb.yml.erb
@@ -0,0 +1,30 @@
+version: '3'
+
+services:
+
+ db:
+ image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %>
+ restart: always
+ volumes:
+ - /etc/mariadb/backups:/backups
+ - /etc/mariadb/conf:/etc/mysql/mariadb.conf.d
+ - /etc/mariadb/datadir:/var/lib/mysql
+ - /etc/mariadb/init:/docker-entrypoint-initdb.d
+ - /etc/mariadb/scripts:/scripts
+ network_mode: host
+ dns:
+ - 89.46.20.75
+ - 89.46.21.29
+ - 89.32.32.32
+ environment:
+ - MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %>
+ - BOOTSTRAP=<%= @bootstrap %>
+ - FORCE_BOOTSTRAP=0
+ ports:
+ - 3306:3306
+ - 4444:4444
+ - 4567:4567
+ - 4568:4568
+ command: "--wsrep_cluster_address=gcomm://<%= @db_ip[0] %>,<%= @db_ip[1] %>,<%= @db_ip[2] %>"
+ tty: true
+
diff --git a/templates/mariadb/my.cnf.erb b/templates/mariadb/my.cnf.erb
new file mode 100644
index 0000000..81892bb
--- /dev/null
+++ b/templates/mariadb/my.cnf.erb
@@ -0,0 +1,47 @@
+[mysqld]
+
+# General
+binlog_format = ROW
+character_set_server = utf8mb4
+collation_server = utf8mb4_general_ci
+default_storage_engine = InnoDB
+local_infile = 0
+max_allowed_packet = 256M
+max_connections = 750
+skip_external_locking = ON
+skip_name_resolve = ON
+
+# Binary logging and replication
+expire_logs_days = 7
+gtid_domain_id = 1000
+gtid_ignore_duplicates = ON
+gtid_strict_mode = ON
+log_bin = binlog
+log_slave_updates = ON
+server_id = <%= @server_id %>
+
+# Innodb
+innodb_autoinc_lock_mode = 2
+innodb_buffer_pool_size = 4G # 60-75% of total RAM
+innodb_file_per_table = 1
+innodb_flush_log_at_trx_commit = 0
+innodb_flush_method = O_DIRECT
+innodb_io_capacity = 2000 # IOPS dependent
+innodb_io_capacity_max = 4000 # IOPS dependent
+innodb_log_file_size = 512M
+innodb_read_io_threads = 4 # CPU dependent
+innodb_rollback_on_timeout = 1
+innodb_write_io_threads = 4 # CPU dependent
+transaction_isolation = 'READ-COMMITTED'
+
+# Galera
+wsrep_cluster_name = "Nextcloud_MariaDB_Cluster"
+wsrep_gtid_domain_id = 1000 # same on all Galera nodes in the same segment
+wsrep_gtid_mode = ON
+wsrep_log_conflicts = ON
+wsrep_on = ON
+wsrep_provider = /usr/lib/galera/libgalera_smm.so
+wsrep_provider_options = "gcache.size=2G;gmcast.segment=0" # gmcast.segment same on all Galera nodes in the same segment
+wsrep_slave_threads = 4 # CPU dependent
+wsrep_sst_method = mariabackup
+wsrep_sync_wait = 1
diff --git a/templates/mariadb/quorum.conf.erb b/templates/mariadb/quorum.conf.erb
new file mode 100644
index 0000000..f04a5a4
--- /dev/null
+++ b/templates/mariadb/quorum.conf.erb
@@ -0,0 +1,10 @@
+quorum_alive_command='[[ "Primary" == $(/usr/bin/mysql -p<%= @quorum_password %> -NB -e \'show status like "wsrep_cluster_status"\' | awk \'{print $2}\') ]]'
+quorum_db='quorum'
+quorum_db_command='/usr/bin/mysql'
+quorum_host='localhost'
+quorum_id=<%= @quorum_id %>
+quorum_interval='20'
+quorum_notify_command='/usr/bin/echo alive status:${QUORUM_ALIVE_STATUS}, leader: ${QUORUM_LEADER}, my leader status: ${QUORUM_LEADER_STATUS}'
+quorum_password='<%= @quorum_password %>'
+quorum_table='service_election'
+quorum_user='quorum'
diff --git a/templates/mariadb/quorum.erb.sh b/templates/mariadb/quorum.erb.sh
new file mode 100755
index 0000000..5046c41
--- /dev/null
+++ b/templates/mariadb/quorum.erb.sh
@@ -0,0 +1,99 @@
+#!/usr/bin/env bash
+
+# A bash implementation of http://code.openark.org/blog/mysql/leader-election-using-mysql
+
+# Defaults
+quorum_alive_command='/bin/true'
+quorum_config='/etc/quorum.conf'
+quorum_db='quorum'
+quorum_db_command='/usr/bin/mysql'
+quorum_host='localhost'
+quorum_id=$(hostname -f)
+quorum_interval='20'
+quorum_notify_command='/usr/bin/echo alive status:${QUORUM_ALIVE_STATUS}, leader: ${QUORUM_LEADER}, my leader status: ${QUORUM_LEADER_STATUS}'
+quorum_password='quorum'
+quorum_table='service_election'
+quorum_user='quorum'
+
+# Override default config path from env
+if [[ "x${QUORUM_CONFIG}" != "x" ]]; then
+ quorum_config="${QUORUM_CONFIG}"
+fi
+
+# Override default config with settings from config file
+if [[ -f "${quorum_config}" ]]; then
+ . "${quorum_config}"
+fi
+
+# Override with env
+if [[ "x${QUORUM_ALIVE_COMMAND}" != "x" ]]; then
+ quorum_alive_command=${QUORUM_ALIVE_COMMAND}
+fi
+if [[ "x${QUORUM_DB}" != "x" ]]; then
+ quorum_db=${QUORUM_DB}
+fi
+if [[ "x${QUORUM_DB_COMMAND}" != "x" ]]; then
+ quorum_db_command=${QUORUM_DB_COMMAND}
+fi
+if [[ "x${QUORUM_HOST}" != "x" ]]; then
+ quorum_host=${QUORUM_HOST}
+fi
+if [[ "x${QUORUM_ID}" != "x" ]]; then
+ quorum_id=${QUORUM_ID}
+fi
+if [[ "x${QUORUM_INTERVAL}" != "x" ]]; then
+ quorum_interval=${QUORUM_INTERVAL}
+fi
+if [[ "x${QUORUM_NOTIFY_COMMAND}" != "x" ]]; then
+ quorum_notify_command=${QUORUM_NOTIFY_COMMAND}
+fi
+if [[ "x${QUORUM_PASSWORD}" != "x" ]]; then
+ quorum_password=${QUORUM_PASSWORD}
+fi
+if [[ "x${QUORUM_TABLE}" != "x" ]]; then
+ quorum_table=${QUORUM_TABLE}
+fi
+if [[ "x${QUORUM_USER}" != "x" ]]; then
+ quorum_user=${QUORUM_USER}
+fi
+
+# MySQL command
+quorum_db_command="${quorum_db_command} --database=${quorum_db} --user ${quorum_user} --password ${quorum_password}"
+
+# Queries
+create_table_query='CREATE TABLE ${quorum_table} (
+ anchor tinyint(3) unsigned NOT NULL,
+ service_id varchar(128) NOT NULL,
+ last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ PRIMARY KEY (anchor)
+) ENGINE=InnoDB'
+
+table_exists_query="SELECT *
+FROM information_schema.tables
+WHERE table_schema = '${quorum_db}'
+ AND table_name = '${quorum_table}'
+LIMIT 1"
+
+election_query="insert ignore into ${quorum_table} ( anchor, service_id, last_seen_active ) values ( 1, '${quorum_id}', now() ) on duplicate key update service_id = if(last_seen_active < now() - interval ${quorum_interval} second, values(service_id), service_id), last_seen_active = if(service_id = values(service_id), values(last_seen_active), last_seen_active)"
+
+is_leader_query="select count(*) as is_leader from ${quorum_table} where anchor=1 and service_id='${quorum_id}'"
+
+who_is_leader_query="select max(service_id) as leader from ${quorum_table} where anchor=1"
+
+# Set up table if it does not exist
+${quorum_db_command} -e "${table_exists_query}" >/dev/null 2>&1
+if [[ ${?} -ne 0 ]]; then
+ ${quorum_db_command} -e "${create_table_query}" >/dev/null 2>&1
+fi
+
+# Run the algorithm
+${quorum_alive_command} >/dev/null 2>&1
+alive_status=${?}
+if [[ ${alive_status} -eq 0 ]]; then
+ ${quorum_db_command} -e "${election_query}" >/dev/null 2>&1
+fi
+leader_status=$(${quorum_db_command} -e "${is_leader_query}")
+leader=$(${quorum_db_command} -e "${who_is_leader_query}")
+
+QUORUM_ALIVE_STATUS=${alive_status} QUORUM_LEADER=${leader} QUORUM_LEADER_STATUS=${leader_status} eval ${quorum_notify_command}
+exit ${alive_status}
diff --git a/templates/mariadb/rename-docker.sh b/templates/mariadb/rename-docker.sh
new file mode 100644
index 0000000..5283057
--- /dev/null
+++ b/templates/mariadb/rename-docker.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+touch /etc/no-automatic-cosmos
+for i in $(systemctl list-unit-files | grep sunet-docker | grep enabled | awk '{print $1}'); do systemctl disable --now ${i}; done
+for i in $(systemctl list-unit-files | grep mariadb | grep disabled | awk '{print $1}'); do rm /etc/systemd/system/${i}; done
+rm -r /opt/docker*mariadb
+run-cosmos -v
+docker ps
+rm /etc/no-automatic-cosmos
diff --git a/templates/mariadb/run_manual_backup_dump.erb.sh b/templates/mariadb/run_manual_backup_dump.erb.sh
new file mode 100644
index 0000000..61a2c71
--- /dev/null
+++ b/templates/mariadb/run_manual_backup_dump.erb.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+dump_name="mariadb-dump-$(date +%Y-%m-%dT%H.%M.%S).sql.gz"
+dump_args="--all-databases --single-transaction --master-data=2 -u root -p${MYSQL_ROOT_PASSWORD}"
+
+# Replication slave priv was not in backup user creation script previously
+mysql -u root -p${MYSQL_ROOT_PASSWORD} -e "GRANT REPLICATION SLAVE ON *.* TO 'backup'@'%'"
+echo "Running backup as root user"
+mysqldump ${dump_args} | gzip >"/backups/${dump_name}"
diff --git a/templates/mariadb/size-test.erb b/templates/mariadb/size-test.erb
new file mode 100644
index 0000000..3516bf3
--- /dev/null
+++ b/templates/mariadb/size-test.erb
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+docker exec mariadb_db_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "show status like 'wsrep_cluster_size'"
diff --git a/templates/mariadb/status-test.erb b/templates/mariadb/status-test.erb
new file mode 100644
index 0000000..7b07ee5
--- /dev/null
+++ b/templates/mariadb/status-test.erb
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+docker exec mariadb_db_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "show status like 'wsrep_cluster_status'"
diff --git a/templates/mariadb_backup/02-backup_user.sql.erb b/templates/mariadb_backup/02-backup_user.sql.erb
new file mode 100644
index 0000000..a00e9c7
--- /dev/null
+++ b/templates/mariadb_backup/02-backup_user.sql.erb
@@ -0,0 +1,2 @@
+CREATE USER 'backup'@'localhost' IDENTIFIED BY '<%= @backup_password %>';
+GRANT SELECT, INSERT, CREATE, RELOAD, PROCESS, SUPER, LOCK TABLES, REPLICATION SLAVE, REPLICATION CLIENT, SHOW VIEW, EVENT, CREATE TABLESPACE ON *.* TO 'backup'@'localhost';
diff --git a/templates/mariadb_backup/check_replication.erb b/templates/mariadb_backup/check_replication.erb
new file mode 100755
index 0000000..8df406f
--- /dev/null
+++ b/templates/mariadb_backup/check_replication.erb
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+result="$(docker exec mariadbbackup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
+if [[ "${result}" == "Slave_running ON" ]]; then
+ echo "OK: Replica running"
+ exit 0
+else
+ echo "CRITICAL: Replica not running"
+ exit 2
+fi
diff --git a/templates/mariadb_backup/credentials.cnf.erb b/templates/mariadb_backup/credentials.cnf.erb
new file mode 100644
index 0000000..8bb5db7
--- /dev/null
+++ b/templates/mariadb_backup/credentials.cnf.erb
@@ -0,0 +1,14 @@
+[mysqld]
+wsrep_sst_auth = "backup:<%= @backup_password %>"
+
+[mariabackup]
+user = backup
+password = '<%= @backup_password %>'
+
+[mysqldump]
+user = backup
+password = '<%= @backup_password %>'
+
+[mysql_backup]
+user = backup
+password = '<%= @backup_password %>'
\ No newline at end of file
diff --git a/templates/mariadb_backup/do_backup.erb.sh b/templates/mariadb_backup/do_backup.erb.sh
new file mode 100644
index 0000000..9ba16b5
--- /dev/null
+++ b/templates/mariadb_backup/do_backup.erb.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+customer=${1}
+stream_name="${customer}mariadb-stream-$(date +%Y-%m-%dT%H.%M.%S).gz"
+dump_name="${customer}mariadb-dump-$(date +%Y-%m-%dT%H.%M.%S).sql.gz"
+backup_dir="/backups/$(date +%Y/%m/%d)"
+mkdir -p "${backup_dir}"
+
+if [[ -z ${customer} ]]; then
+ buopts="--slave-info --safe-slave-backup"
+ dumpopts="--dump-slave"
+ mysql -p${MYSQL_ROOT_PASSWORD} -e "stop slave"
+fi
+mariadb-backup --backup ${buopts} -u root -p${MYSQL_ROOT_PASSWORD} --stream=xbstream | gzip >"${backup_dir}/${stream_name}"
+mysqldump --all-databases --single-transaction ${dumpopts} -u root -p${MYSQL_ROOT_PASSWORD} | gzip >"${backup_dir}/${dump_name}"
+if [[ -z ${customer} ]]; then
+ mysql -p${MYSQL_ROOT_PASSWORD} -e "start slave"
+fi
diff --git a/templates/mariadb_backup/docker-compose_mariadb_backup.yml.erb b/templates/mariadb_backup/docker-compose_mariadb_backup.yml.erb
new file mode 100644
index 0000000..22173d2
--- /dev/null
+++ b/templates/mariadb_backup/docker-compose_mariadb_backup.yml.erb
@@ -0,0 +1,20 @@
+version: '3.2'
+
+services:
+
+ mariadb_backup:
+ image: docker.sunet.se/drive/mariadb
+ dns:
+ - 89.46.20.75
+ - 89.46.21.29
+ - 89.32.32.32
+ environment:
+ - MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %>
+ volumes:
+ - /opt/mariadb_backup/backups:/backups
+ - /opt/mariadb_backup/conf:/etc/mysql/mariadb.conf.d
+ - /opt/mariadb_backup/datadir:/var/lib/mysql
+ - /opt/mariadb_backup/init:/docker-entrypoint-initdb.d
+ - /opt/mariadb_backup/start_replica_from_init.sh:/start_replica_from_init.sh
+ - /opt/mariadb_backup/do_backup.sh:/do_backup.sh
+ restart: always
diff --git a/templates/mariadb_backup/find_disabled_sharing.erb.sh b/templates/mariadb_backup/find_disabled_sharing.erb.sh
new file mode 100644
index 0000000..df6acfb
--- /dev/null
+++ b/templates/mariadb_backup/find_disabled_sharing.erb.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+container=${1}
+customer=${2}
+if [[ -z ${container} ]]; then
+ container='mariadbbackup_mariadb_backup_1'
+fi
+if [[ -z ${customer} ]]; then
+ location='<%= @location %>'
+fi
+
+dexec="docker exec ${container}"
+
+password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}')
+
+mysql="${dexec} mysql -p${password}"
+
+project="statistics"
+bucket="drive-server-coms"
+base_dir="${project}:${bucket}"
+mountpoint="/opt/statistics"
+customer_dir="${mountpoint}/${location}"
+mkdir -p "${customer_dir}"
+rclone mkdir "${base_dir}/${location}"
+
+${mysql} -NB -e \
+ "select JSON_OBJECT('option_id',oc_external_options.option_id,'mount_id',oc_external_options.mount_id,'sharing_enabled',oc_external_options.value,'mount_point',oc_external_mounts.mount_point,'storage_backend',oc_external_mounts.storage_backend,'auth_backend',oc_external_mounts.auth_backend,'priority',oc_external_mounts.priority,'type',oc_external_mounts.type,'bucket',oc_external_config.value) as data from oc_external_options join oc_external_mounts on oc_external_options.mount_id=oc_external_mounts.mount_id and oc_external_options.key = 'enable_sharing' and oc_external_options.value = 'false' join oc_external_config on oc_external_config.mount_id=oc_external_mounts.mount_id where oc_external_config.key='bucket'" \
+ nextcloud | jq -s . >"${customer_dir}/sharing_disabled.json"
+status=${?}
+if [[ ${status} == 0 ]]; then
+ rclone move "${customer_dir}/sharing_disabled.json" "${base_dir}/${location}/"
+fi
+exit ${status}
diff --git a/templates/mariadb_backup/listusers.erb.sh b/templates/mariadb_backup/listusers.erb.sh
new file mode 100644
index 0000000..6cd5993
--- /dev/null
+++ b/templates/mariadb_backup/listusers.erb.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+container=${1}
+customer=${2}
+if [[ -z ${container} ]]; then
+ container='mariadbbackup_mariadb_backup_1'
+fi
+if [[ -z ${customer} ]]; then
+ location='<%= @location %>'
+fi
+
+dexec="docker exec ${container}"
+
+password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}')
+
+mysql="${dexec} mysql -p${password}"
+
+users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
+users="${users}
+$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
+
+project="statistics"
+bucket="drive-server-coms"
+base_dir="${project}:${bucket}"
+mountpoint="/opt/statistics"
+customer_dir="${mountpoint}/${location}"
+mkdir -p "${customer_dir}"
+rclone mkdir "${base_dir}/${location}"
+
+echo "${users}" | awk 'BEGIN{print "{"} {print t "\""$1"\": \""$2"\""} {t=","} END{print "}"}' | jq . >"${customer_dir}/users.json"
+status=0
+if ! jq . "${customer_dir}/users.json" &>/dev/null; then
+ status=1
+fi
+if [[ ${status} -eq 0 ]]; then
+ # something is wrong if we cant copy the file in 30 seconds, so we should note that
+ if ! timeout 30s rclone copy --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
+ status=1
+ fi
+fi
+exit ${status}
diff --git a/templates/mariadb_backup/my.cnf.erb b/templates/mariadb_backup/my.cnf.erb
new file mode 100644
index 0000000..b5d09ac
--- /dev/null
+++ b/templates/mariadb_backup/my.cnf.erb
@@ -0,0 +1,37 @@
+[mysqld]
+
+# General
+binlog_format = ROW
+character_set_server = utf8mb4
+collation_server = utf8mb4_general_ci
+default_storage_engine = InnoDB
+local_infile = 0
+max_allowed_packet = 256M
+max_connections = 750
+skip_external_locking = ON
+skip_name_resolve = ON
+skip_slave_start = OFF
+
+# Binary logging and replication
+expire_logs_days = 7
+gtid_domain_id = 1000
+gtid_ignore_duplicates = ON
+gtid_strict_mode = OFF
+log_bin = binlog
+log_slave_updates = ON
+relay_log = 'relay-log'
+server_id = 2000
+
+# Innodb
+innodb_autoinc_lock_mode = 2
+innodb_buffer_pool_size = 4G # 60-75% of total RAM
+innodb_file_per_table = 1
+innodb_flush_log_at_trx_commit = 0
+innodb_flush_method = O_DIRECT
+innodb_io_capacity = 2000 # IOPS dependent
+innodb_io_capacity_max = 4000 # IOPS dependent
+innodb_log_file_size = 512M
+innodb_read_io_threads = 4 # CPU dependent
+innodb_rollback_on_timeout = 1
+innodb_write_io_threads = 4 # CPU dependent
+transaction_isolation = 'READ-COMMITTED'
diff --git a/templates/mariadb_backup/rclone.conf.erb b/templates/mariadb_backup/rclone.conf.erb
new file mode 100644
index 0000000..97d75df
--- /dev/null
+++ b/templates/mariadb_backup/rclone.conf.erb
@@ -0,0 +1,6 @@
+[statistics]
+type = webdav
+url = https://sunet.drive.sunet.se/remote.php/dav/files/_script/
+vendor = nextcloud
+user = _script
+pass = <%= @statistics_secret %>
diff --git a/templates/mariadb_backup/start_replica_from_init.erb.sh b/templates/mariadb_backup/start_replica_from_init.erb.sh
new file mode 100644
index 0000000..4548cab
--- /dev/null
+++ b/templates/mariadb_backup/start_replica_from_init.erb.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+mysql="mysql -u root -p${MYSQL_ROOT_PASSWORD}"
+init_file='/backups/init.sql.gz'
+if [[ -f ${init_file} ]]; then
+ ${mysql} -e "STOP SLAVE;RESET SLAVE;"
+ master_command=$(zgrep 'CHANGE MASTER TO MASTER_LOG_FILE' ${init_file} | sed -e 's/^-- //' -e 's/;$//')
+ master_command="${master_command}, MASTER_HOST='<%= @first_db %>', MASTER_USER='backup'"
+ master_command="${master_command}, MASTER_PASSWORD='<%= @backup_password%>', MASTER_SSL=1"
+ master_command="${master_command}, MASTER_CONNECT_RETRY=20"
+ zcat ${init_file} | ${mysql}
+ ${mysql} -e "${master_command}"
+ ${mysql} -e "START SLAVE"
+ sleep 3s
+ ${mysql} -e "SHOW SLAVE STATUS\G"
+fi
+
+exit 0
diff --git a/templates/mariadb_backup/status-test.erb b/templates/mariadb_backup/status-test.erb
new file mode 100644
index 0000000..c90da90
--- /dev/null
+++ b/templates/mariadb_backup/status-test.erb
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+docker exec mariadbbackup_mariadb_backup_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "show status like 'Slave_running'"
diff --git a/templates/monitor/000-default.conf.erb b/templates/monitor/000-default.conf.erb
new file mode 100644
index 0000000..1c1ae1c
--- /dev/null
+++ b/templates/monitor/000-default.conf.erb
@@ -0,0 +1,24 @@
+
+ RewriteEngine On
+ RewriteCond %{HTTPS} off
+ RewriteCond %{REQUEST_URI} !^\/\.well-known\/.*$
+ RewriteRule ^/?(.*) https://%{SERVER_NAME}/$1 [R,L]
+ ProxyPass /.well-known/acme-challenge http://acme-c.sunet.se/.well-known/acme-challenge/
+ ProxyPassReverse /.well-known/acme-challenge http://acme-c.sunet.se/.well-known/acme-challenge/
+
+ Header always set Strict-Transport-Security "max-age=15768000; includeSubDomains; preload"
+ Header always set X-Frame-Options "SAMEORIGIN"
+ Header always set X-XSS-Protection "1; mode=block"
+ Header always set X-Content-Type-Options "nosniff"
+ Header always set Content-Security-Policy "default-src https://<%= @certname %>"
+
+
+ ServerAdmin webmaster@localhost
+ DocumentRoot /var/www/html
+
+ ErrorLog ${APACHE_LOG_DIR}/error.log
+ CustomLog ${APACHE_LOG_DIR}/access.log combined
+
+
+
+# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
diff --git a/templates/monitor/check_nrpe.cfg.erb b/templates/monitor/check_nrpe.cfg.erb
new file mode 100644
index 0000000..623f1dd
--- /dev/null
+++ b/templates/monitor/check_nrpe.cfg.erb
@@ -0,0 +1,22 @@
+# this command runs a program $ARG1$ with arguments $ARG2$
+# this file is managed with puppet.
+# this command runs a program $ARG1$ with no arguments
+define command {
+ command_name check_nrpe_1arg
+ command_line /usr/lib/nagios/plugins/check_nrpe -H $HOSTADDRESS$ -c $ARG1$
+}
+
+# this command runs a program $ARG1$ with no arguments
+define command {
+ command_name check_nrpe_1arg_to30
+ command_line /usr/lib/nagios/plugins/check_nrpe -t 30 -H $HOSTADDRESS$ -c $ARG1$
+}
+# this command runs a program $ARG1$ with no arguments
+define command {
+ command_name check_nrpe_1arg_to300
+ command_line /usr/lib/nagios/plugins/check_nrpe -t 300 -H $HOSTADDRESS$ -c $ARG1$
+}
+define command {
+ command_name check_nrpe_1arg_to600
+ command_line /usr/lib/nagios/plugins/check_nrpe -t 600 -H $HOSTADDRESS$ -c $ARG1$
+}
diff --git a/templates/monitor/commands.cfg.erb b/templates/monitor/commands.cfg.erb
new file mode 100644
index 0000000..f2ba152
--- /dev/null
+++ b/templates/monitor/commands.cfg.erb
@@ -0,0 +1,229 @@
+###############################################################################
+# COMMANDS.CFG - SAMPLE COMMAND DEFINITIONS FOR NAEMON 1.3.0
+#
+#
+# NOTES: This config file provides you with some example command definitions
+# that you can reference in host, service, and contact definitions.
+#
+# You don't need to keep commands in a separate file from your other
+# object definitions. This has been done just to make things easier to
+# understand.
+#
+###############################################################################
+###############################
+# Slack notifications
+###############################
+
+# 'notify-service-by-slack' command definition
+define command {
+ command_name notify-service-by-slack
+ command_line SERVICESTATE="$SERVICESTATE$" \
+ HOSTNAME="$HOSTNAME$" \
+ SERVICEDISPLAYNAME="$SERVICEDISPLAYNAME$" \
+ SERVICEOUTPUT="$SERVICEOUTPUT$" \
+ HOSTNAME="$HOSTNAME$" \
+ /usr/local/bin/slack_nagios.sh > /tmp/slack.log 2>&1
+}
+
+# 'notify-host-by-slack' command definition
+define command {
+ command_name notify-host-by-slack
+ command_line SERVICESTATE="$SERVICESTATE$" \
+ HOSTNAME="$HOSTNAME$" \
+ SERVICEDISPLAYNAME="$SERVICEDISPLAYNAME$" \
+ SERVICEOUTPUT="$SERVICEOUTPUT$" \
+ HOSTNAME="$HOSTNAME$" \
+ /usr/local/bin/slack_nagios.sh > /tmp/slack.log 2>&1
+}
+################################################################################
+#
+# SAMPLE NOTIFICATION COMMANDS
+#
+# These are some example notification commands. They may or may not work on
+# your system without modification. As an example, some systems will require
+# you to use "/usr/bin/mailx" instead of "/usr/bin/mail" in the commands below.
+#
+################################################################################
+# 'notify-host-by-email' command definition
+define command {
+ command_name notify-host-by-email
+ command_line /usr/bin/printf "%b" "***** Naemon *****\n\nNotification Type: $NOTIFICATIONTYPE$\nHost: $HOSTNAME$\nState: $HOSTSTATE$\nAddress: $HOSTADDRESS$\nInfo: $HOSTOUTPUT$\n\nDate/Time: $LONGDATETIME$\n" \
+ | /usr/bin/mail \
+ -s "** $NOTIFICATIONTYPE$ Host Alert: $HOSTNAME$ is $HOSTSTATE$ **" $CONTACTEMAIL$
+}
+
+# 'notify-service-by-email' command definition
+define command {
+ command_name notify-service-by-email
+ command_line /usr/bin/printf "%b" "***** Naemon *****\n\nNotification Type: $NOTIFICATIONTYPE$\n\nService: $SERVICEDESC$\nHost: $HOSTALIAS$\nAddress: $HOSTADDRESS$\nState: $SERVICESTATE$\n\nDate/Time: $LONGDATETIME$\n\nAdditional Info:\n\n$SERVICEOUTPUT$\n" \
+ | /usr/bin/mail \
+ -s "** $NOTIFICATIONTYPE$ Service Alert: $HOSTALIAS$/$SERVICEDESC$ is $SERVICESTATE$ **" $CONTACTEMAIL$
+}
+
+################################################################################
+#
+# SAMPLE HOST CHECK COMMANDS
+#
+################################################################################
+# This command checks to see if a host is "alive" by pinging it
+# The check must result in a 100% packet loss or 5 second (5000ms) round trip
+# average time to produce a critical error.
+# Note: Five ICMP echo packets are sent (determined by the '-p 5' argument)
+# 'check-host-alive' command definition
+#define command {
+# command_name check-host-alive
+# command_line $USER1$/check_ping -H $HOSTADDRESS$ -w 3000.0,80% -c 5000.0,100% -p 5
+#}
+
+################################################################################
+#
+# SAMPLE SERVICE CHECK COMMANDS
+#
+# These are some example service check commands. They may or may not work on
+# your system, as they must be modified for your plugins. See the HTML
+# documentation on the plugins for examples of how to configure command definitions.
+#
+# NOTE: The following 'check_local_...' functions are designed to monitor
+# various metrics on the host that Naemon is running on (i.e. this one).
+################################################################################
+# 'check_local_disk' command definition
+define command {
+ command_name check_local_disk
+ command_line $USER1$/check_disk -w $ARG1$ -c $ARG2$ -p $ARG3$
+}
+
+# 'check_local_load' command definition
+define command {
+ command_name check_local_load
+ command_line $USER1$/check_load -w $ARG1$ -c $ARG2$
+}
+
+# 'check_local_procs' command definition
+define command {
+ command_name check_local_procs
+ command_line $USER1$/check_procs -w $ARG1$ -c $ARG2$ -s $ARG3$
+}
+
+# 'check_local_users' command definition
+define command {
+ command_name check_local_users
+ command_line $USER1$/check_users -w $ARG1$ -c $ARG2$
+}
+
+# 'check_local_swap' command definition
+define command {
+ command_name check_local_swap
+ command_line $USER1$/check_swap -w $ARG1$ -c $ARG2$
+}
+
+# 'check_local_mrtgtraf' command definition
+define command {
+ command_name check_local_mrtgtraf
+ command_line $USER1$/check_mrtgtraf -F $ARG1$ -a $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$
+}
+#
+#################################################################################
+## NOTE: The following 'check_...' commands are used to monitor services on
+## both local and remote hosts.
+#################################################################################
+## 'check_ftp' command definition
+#define command {
+# command_name check_ftp
+# command_line $USER1$/check_ftp -H $HOSTADDRESS$ $ARG1$
+#}
+#
+## 'check_hpjd' command definition
+#define command {
+# command_name check_hpjd
+# command_line $USER1$/check_hpjd -H $HOSTADDRESS$ $ARG1$
+#}
+#
+# 'check_snmp' command definition
+define command {
+ command_name check_snmp
+ command_line $USER1$/check_snmp -H $HOSTADDRESS$ $ARG1$
+}
+#
+## 'check_http' command definition
+#define command {
+# command_name check_http
+# command_line $USER1$/check_http -I $HOSTADDRESS$ $ARG1$
+#}
+#
+## 'check_ssh' command definition
+#define command {
+# command_name check_ssh
+# command_line $USER1$/check_ssh $ARG1$ $HOSTADDRESS$
+#}
+#
+## 'check_dhcp' command definition
+#define command {
+# command_name check_dhcp
+# command_line $USER1$/check_dhcp $ARG1$
+#}
+#
+## 'check_ping' command definition
+#define command {
+# command_name check_ping
+# command_line $USER1$/check_ping -H $HOSTADDRESS$ -w $ARG1$ -c $ARG2$ -p 5
+#}
+#
+## 'check_pop' command definition
+#define command {
+# command_name check_pop
+# command_line $USER1$/check_pop -H $HOSTADDRESS$ $ARG1$
+#}
+#
+## 'check_imap' command definition
+#define command {
+# command_name check_imap
+# command_line $USER1$/check_imap -H $HOSTADDRESS$ $ARG1$
+#}
+#
+## 'check_smtp' command definition
+#define command {
+# command_name check_smtp
+# command_line $USER1$/check_smtp -H $HOSTADDRESS$ $ARG1$
+#}
+#
+## 'check_tcp' command definition
+#define command {
+# command_name check_tcp
+# command_line $USER1$/check_tcp -H $HOSTADDRESS$ -p $ARG1$ $ARG2$
+#}
+#
+## 'check_udp' command definition
+#define command {
+# command_name check_udp
+# command_line $USER1$/check_udp -H $HOSTADDRESS$ -p $ARG1$ $ARG2$
+#}
+#
+## 'check_nt' command definition
+#define command {
+# command_name check_nt
+# command_line $USER1$/check_nt -H $HOSTADDRESS$ -p 12489 -v $ARG1$ $ARG2$
+#}
+
+################################################################################
+#
+# SAMPLE PERFORMANCE DATA COMMANDS
+#
+# These are sample performance data commands that can be used to send performance
+# data output to two text files (one for hosts, another for services). If you
+# plan on simply writing performance data out to a file, consider using the
+# host_perfdata_file and service_perfdata_file options in the main config file.
+#
+################################################################################
+# 'process-host-perfdata' command definition
+define command {
+ command_name process-host-perfdata
+ command_line /usr/bin/printf "%b" "$LASTHOSTCHECK$\t$HOSTNAME$\t$HOSTSTATE$\t$HOSTATTEMPT$\t$HOSTSTATETYPE$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$\n" \
+ >> /var/lib/naemon/host-perfdata.out
+}
+
+# 'process-service-perfdata' command definition
+define command {
+ command_name process-service-perfdata
+ command_line /usr/bin/printf "%b" "$LASTSERVICECHECK$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICESTATE$\t$SERVICEATTEMPT$\t$SERVICESTATETYPE$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$\n" \
+ >> /var/lib/naemon/service-perfdata.out
+}
diff --git a/templates/monitor/contacts.cfg.erb b/templates/monitor/contacts.cfg.erb
new file mode 100644
index 0000000..531d805
--- /dev/null
+++ b/templates/monitor/contacts.cfg.erb
@@ -0,0 +1,54 @@
+###############################################################################
+# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
+#
+#
+# NOTES: This config file provides you with some example contact and contact
+# group definitions that you can reference in host and service
+# definitions.
+#
+# You don't need to keep these definitions in a separate file from your
+# other object definitions. This has been done just to make things
+# easier to understand.
+#
+###############################################################################
+###############################################################################
+###############################################################################
+#
+# CONTACTS
+#
+###############################################################################
+###############################################################################
+# Just one contact defined by default - the Naemon admin (that's you)
+# This contact definition inherits a lot of default values from the 'generic-contact'
+# template which is defined elsewhere.
+define contact {
+ contact_name nagiosadmin ; Short name of user
+ alias Naemon Admin ; Full name of user
+ use generic-contact ; Inherit default values from generic-contact template (defined above)
+ email naemon@localhost ; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
+}
+
+###############################################################################
+###############################################################################
+#
+# CONTACT GROUPS
+#
+###############################################################################
+###############################################################################
+# We only have one contact in this simple configuration file, so there is
+# no need to create more than one contact group.
+define contactgroup {
+ contactgroup_name admins
+ alias Naemon Administrators
+ members nagiosadmin
+}
+define contact {
+ contact_name slack
+ alias Slack
+ service_notification_period 24x7
+ host_notification_period 24x7
+ service_notification_options w,u,c,r
+ host_notification_options d,r
+ service_notification_commands notify-service-by-slack
+ host_notification_commands notify-host-by-slack
+}
diff --git a/templates/monitor/grafana.conf.erb b/templates/monitor/grafana.conf.erb
new file mode 100644
index 0000000..11ba203
--- /dev/null
+++ b/templates/monitor/grafana.conf.erb
@@ -0,0 +1,14 @@
+
+ ProxyPass http://127.0.0.1:3000 retry=0 disablereuse=On
+ ProxyPassReverse http://127.0.0.1:3000/grafana
+ RewriteEngine On
+ RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER},NS]
+ SetEnvIf Request_Protocol ^HTTPS.* IS_HTTPS=1
+ SetEnvIf Authorization "^.+$" IS_BASIC_AUTH=1
+ # without thruk cookie auth, use the proxy user from the rewrite rule above
+ RequestHeader set X-WEBAUTH-USER "%{PROXY_USER}s" env=IS_HTTPS
+ RequestHeader set X-WEBAUTH-USER "%{PROXY_USER}e" env=!IS_HTTPS
+ # when thruk cookie auth is used, fallback to remote user directly
+ RequestHeader set X-WEBAUTH-USER "%{REMOTE_USER}e" env=!IS_BASIC_AUTH
+ RequestHeader unset Authorization
+
diff --git a/templates/monitor/grafana.ini.erb b/templates/monitor/grafana.ini.erb
new file mode 100644
index 0000000..a3509a4
--- /dev/null
+++ b/templates/monitor/grafana.ini.erb
@@ -0,0 +1,42 @@
+# This file is managed by puppet
+[paths]
+logs = /var/log/grafana
+
+[log]
+mode = file
+
+[log.file]
+level = Info
+daily_rotate = true
+
+[server]
+root_url = %(protocol)s://%(domain)s:%(http_port)s/grafana/
+serve_from_sub_path = true
+
+[snapshots]
+external_enabled = false
+
+[security]
+disable_gravatar = true
+
+[users]
+allow_sign_up = false
+default_theme = light
+
+[auth.basic]
+enabled = false
+
+[auth.proxy]
+enabled = true
+header_name = X-WEBAUTH-USER
+header_property = username
+auto_sign_up = true
+sync_ttl = 60
+whitelist = #89.46.21.220, 001:6b0:6c::2de
+headers = Email:X-User-Email, Name:X-User-Name
+headers_encoded = false
+enable_login_token = false
+
+[alerting]
+enabled = true
+execute_alerts = true
diff --git a/templates/monitor/histou.cfg.erb b/templates/monitor/histou.cfg.erb
new file mode 100644
index 0000000..dc3de46
--- /dev/null
+++ b/templates/monitor/histou.cfg.erb
@@ -0,0 +1,22 @@
+define command {
+ command_name process-host-perfdata-file-nagflux
+ command_line /bin/mv /var/naemon/host-perfdata /var/nagflux/perfdata/$TIMET$.perfdata.host
+ }
+
+define command {
+ command_name process-service-perfdata-file-nagflux
+ command_line /bin/mv /var/naemon/service-perfdata /var/nagflux/perfdata/$TIMET$.perfdata.service
+ }
+
+define host {
+ name host-grafana
+ action_url https:<%= @monitorhost %>/grafana/dashboard/script/histou.js?host=$HOSTNAME$&theme=light&annotations=true
+ notes_url https:<%= @monitorhost %>/dokuwiki/doku.php?id=inventory:$HOSTNAME$
+ register 0
+}
+
+define service {
+ name service-grafana
+ action_url https:<%= @monitorhost %>/grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDESC$&theme=light&annotations=true
+ register 0
+}
diff --git a/templates/monitor/histou.ini.erb b/templates/monitor/histou.ini.erb
new file mode 100644
index 0000000..e62251d
--- /dev/null
+++ b/templates/monitor/histou.ini.erb
@@ -0,0 +1,24 @@
+[general]
+socketTimeout = 10
+phpCommand = "php"
+;leave tmpFolder empty to use default php folder
+tmpFolder = ""
+specialChar = "&"
+; set the backend: influxdb, elasticsearch
+databaseType = "influxdb"
+forecastDatasourceName = "nagflux_forecast"
+; disable the Panel Titel globally
+disablePanelTitle = false
+
+[folder]
+defaultTemplateFolder = "templates/default/"
+customTemplateFolder = "templates/custom/"
+forecastTemplateFolder = "forecasts/"
+
+[influxdb]
+url = "http://127.0.0.1:8086/query?db=nagflux"
+hostcheckAlias = "hostcheck"
+
+[elasticsearch]
+url = "http://127.0.0.1:8086/nagflux"
+hostcheckAlias = "hostcheck"
\ No newline at end of file
diff --git a/templates/monitor/histou.js.erb b/templates/monitor/histou.js.erb
new file mode 100644
index 0000000..f91ba12
--- /dev/null
+++ b/templates/monitor/histou.js.erb
@@ -0,0 +1,223 @@
+/* global _ */
+
+// accessible variables in this scope
+var window, document, ARGS, $, jQuery, moment, kbn;
+
+//parse arguments
+parseArgs()
+
+
+return function (callback) {
+ if (window.location.href.search('/dashboard-solo/') != -1) {
+ document.documentElement.style.background = '#FFF';
+ }
+
+ var url = 'https://<%= @monitorhost%>/histou/';
+ var configUrl = url+'index.php?host='+host+'&service='+service+'&height='+height+'&legend='+legend+debug+disablePanelTitle+disablePerfdataLookup+specificTemplate+'&annotations='+annotations;
+
+ var flotAddons = url + 'flotAddons.js';
+ $.getScript(flotAddons, function (){});
+ if (!_.isUndefined(ARGS.customCSSFile)) {
+ $('head').append('');
+ }
+ cssLoaded = false;
+ jQuery('body').on('DOMNodeInserted', 'DIV.drop-popover', function (e) {
+ var cssUrl = url+'lightbox/css/light.css'
+ if (!cssLoaded) {
+ $('head').append('');
+ $.getScript(url+'lightbox/js/light.js', function(){});
+ cssLoaded = true;
+ }
+
+ var box = $( e.currentTarget ).find( "DIV.sakuli-popup" );
+ if (box.length > 0 ){
+ $(box[0]).attr('class', 'sakuli-image');
+ var sakuliUrl = site[1] + box[0].innerHTML;
+ var svcoutput;
+ var imagename;
+ jQuery.when(
+ // fetch Sakuli serviceoutput file
+ $.get( sakuliUrl + "output.txt").always(function(data ,state) {
+ if (state != "success" ) {
+ data = "Could not find Sakuli service outputfile at " + sakuliUrl + "output.txt !"
+ }
+ console.log(data);
+ svcoutput = $("").text(data).html().replace(/['"]+/g, '');
+ console.log("Sakuli service output: " + svcoutput);
+ }) &&
+ // fetch Sakuli screenshot (jpg/png)
+ $.get( sakuliUrl ).always(function(imgdata ,state) {
+ if (state != "success" ) {
+ imgdata = "Could not access screenshot list page at " + sakuliUrl + "!"
+ }
+ // the 3rd href on the apache index page contains the img name
+ imagename = $(imgdata).find('a')[2].text.trim();
+ console.log("Sakuli screenshot image name: " + imagename);
+ })
+ ).then ( function() {
+ box[0].innerHTML = '
';
+ });
+ }
+ });
+
+
+ $.ajax(
+ {
+ method: 'GET',
+ url: configUrl,
+ dataType: "jsonp",
+ }
+ ).done(
+ function (result) {
+ console.log(result);
+ callback(result);
+ }
+ ).fail(
+ function (result) {
+ console.log(result);
+ console.log(configUrl);
+ if (result.status == 200) {
+ callback(createErrorDashboard('# HTTP code: '+result.status+'\n# Message: '+result.statusText+'\n# Url: '+configUrl+'\n# Probably the output is not valid json, because the returncode is 200!'));
+ } else {
+ callback(createErrorDashboard('# HTTP code: '+result.status+'\n# Message: '+result.statusText+'\n# Url: '+configUrl));
+ }
+ }
+ );
+}
+
+function createErrorDashboard(message)
+{
+ return {
+ rows : [{
+ title: 'Chart',
+ height: '300px',
+ panels : [{
+ title: 'Error Message below',
+ type: 'text',
+ span: 12,
+ fill: 1,
+ content: message,
+ }]
+ }],
+ services : {},
+ title : 'JS Error / HTTP Error'
+ };
+}
+
+function parseArgs()
+{
+ if (!_.isUndefined(ARGS.reduce)) {
+ $('head').append('');
+
+ //change ui to our needs
+ clearUi();
+ }
+
+ if (!_.isUndefined(ARGS.dynUnit)) {
+ dynUnit = true;
+ } else {
+ dynUnit = false;
+ }
+
+ if (!_.isUndefined(ARGS.host)) {
+ host = ARGS.host;
+ } else {
+ host = "";
+ }
+
+ if (!_.isUndefined(ARGS.service)) {
+ service = ARGS.service;
+ } else {
+ service = "";
+ }
+
+ if (!_.isUndefined(ARGS.command)) {
+ command = ARGS.command;
+ } else {
+ command = "";
+ }
+
+ if (!_.isUndefined(ARGS.perf)) {
+ perf = ARGS.perf;
+ } else {
+ perf = "";
+ }
+
+ if (!_.isUndefined(ARGS.height)) {
+ height = ARGS.height;
+ } else {
+ height = "";
+ }
+
+ if (_.isUndefined(ARGS.debug)) {
+ debug = '';
+ } else {
+ debug = "&debug";
+ }
+
+ if (!_.isUndefined(ARGS.legend)) {
+ legend = ARGS.legend;
+ } else {
+ legend = true;
+ }
+
+ if (!_.isUndefined(ARGS.annotations)) {
+ annotations = ARGS.annotations;
+ } else {
+ annotations = false;
+ }
+
+ if(_.isUndefined(ARGS.disablePanelTitle)) {
+ disablePanelTitle = '';
+ }else{
+ disablePanelTitle = "&disablePanelTitle";
+ }
+
+ if(_.isUndefined(ARGS.disablePerfdataLookup)) {
+ disablePerfdataLookup = '';
+ }else{
+ disablePerfdataLookup = "&disablePerfdataLookup";
+ }
+
+ if(_.isUndefined(ARGS.specificTemplate)) {
+ specificTemplate = '';
+ }else{
+ specificTemplate = "&specificTemplate="+ARGS.specificTemplate;
+ }
+}
+
+function clearUi()
+{
+ //removes white space
+ var checkExist = setInterval(
+ function () {
+ if ($('.panel-content').length) {
+ clearInterval(checkExist);
+ document.getElementsByClassName("panel-content")[0].style.paddingBottom = '0px';
+ }
+ },
+ 100
+ );
+ /*
+ .panel-header removes the headline of the graphs
+ .navbar-static-top removes the menubar on the top
+ .row-control-inner removes the row controll button on the left
+ .span12 removes the add new row button on the bottom
+ */
+ divs = ['.panel-header','.navbar-static-top','.row-control-inner','.span12']
+ for (index = 0; index < divs.length; index++) {
+ waitForDivAndDeleteIt(divs[index]);
+ }
+ function waitForDivAndDeleteIt(div)
+ {
+ var checkExist = setInterval(
+ function () {
+ if ($(div).length) {
+ clearInterval(checkExist);
+ $(div).remove();
+ }
+ },
+ 100
+ );
+ }
+}
diff --git a/templates/monitor/hostgroups_nagios4.cfg.erb b/templates/monitor/hostgroups_nagios4.cfg.erb
new file mode 100644
index 0000000..9d27977
--- /dev/null
+++ b/templates/monitor/hostgroups_nagios4.cfg.erb
@@ -0,0 +1,8 @@
+# Some generic hostgroup definitions
+
+# A simple wildcard hostgroup
+define hostgroup {
+ hostgroup_name all
+ alias All Servers
+ members *
+ }
diff --git a/templates/monitor/localhost.cfg.erb b/templates/monitor/localhost.cfg.erb
new file mode 100644
index 0000000..e38f35b
--- /dev/null
+++ b/templates/monitor/localhost.cfg.erb
@@ -0,0 +1,122 @@
+###############################################################################
+# LOCALHOST.CFG - SAMPLE OBJECT CONFIG FILE FOR MONITORING THIS MACHINE
+#
+#
+# NOTE: This config file is intended to serve as an *extremely* simple
+# example of how you can create configuration entries to monitor
+# the local (Linux) machine.
+#
+###############################################################################
+###############################################################################
+###############################################################################
+#
+# HOST DEFINITION
+#
+###############################################################################
+###############################################################################
+# Define a host for the local machine
+# This host definition will inherit all variables that are defined
+# in (or inherited by) the linux-server host template definition.
+#define host {
+# host_name localhost
+# alias localhost
+# address 127.0.0.1
+# use linux-server ; Name of host template to use
+#}
+#
+################################################################################
+################################################################################
+##
+## HOST GROUP DEFINITION
+##
+################################################################################
+################################################################################
+## Define an optional hostgroup for Linux machines
+define hostgroup {
+ hostgroup_name linux-servers ; The name of the hostgroup
+ alias Linux Servers ; Long name of the group
+# members localhost ; Comma separated list of hosts that belong to this group
+}
+#
+################################################################################
+################################################################################
+##
+## SERVICE DEFINITIONS
+##
+################################################################################
+################################################################################
+## Define a service to "ping" the local machine
+#define service {
+# service_description PING
+# host_name localhost
+# use local-service ; Name of service template to use
+# check_command check_ping!100.0,20%!500.0,60%
+#}
+#
+## Define a service to check the disk space of the root partition
+## on the local machine. Warning if < 20% free, critical if
+## < 10% free space on partition.
+#define service {
+# service_description Root Partition
+# host_name localhost
+# use local-service ; Name of service template to use
+# check_command check_local_disk!20%!10%!/
+#}
+#
+## Define a service to check the number of currently logged in
+## users on the local machine. Warning if > 20 users, critical
+## if > 50 users.
+#define service {
+# service_description Current Users
+# host_name localhost
+# use local-service ; Name of service template to use
+# check_command check_local_users!20!50
+#}
+#
+## Define a service to check the number of currently running procs
+## on the local machine. Warning if > 250 processes, critical if
+## > 400 users.
+#define service {
+# service_description Total Processes
+# host_name localhost
+# use local-service ; Name of service template to use
+# check_command check_local_procs!250!400!RSZDT
+#}
+#
+## Define a service to check the load on the local machine.
+#define service {
+# service_description Current Load
+# host_name localhost
+# use local-service ; Name of service template to use
+# check_command check_local_load!5.0,4.0,3.0!10.0,6.0,4.0
+#}
+#
+## Define a service to check the swap usage the local machine.
+## Critical if less than 10% of swap is free, warning if less than 20% is free
+#define service {
+# service_description Swap Usage
+# host_name localhost
+# use local-service ; Name of service template to use
+# check_command check_local_swap!20!10
+#}
+#
+## Define a service to check SSH on the local machine.
+## Disable notifications for this service by default, as not all users may have SSH enabled.
+#define service {
+# service_description SSH
+# host_name localhost
+# use local-service ; Name of service template to use
+# check_command check_ssh
+# notifications_enabled 0
+#}
+#
+## Define a service to check HTTP on the local machine.
+## Disable notifications for this service by default, as not all users may have HTTP enabled.
+#define service {
+# service_description HTTP
+# host_name localhost
+# use local-service ; Name of service template to use
+# check_command check_http!-u /naemon/
+# notifications_enabled 0
+#}
+#
diff --git a/templates/monitor/monitor-host_nagios4.cfg.erb b/templates/monitor/monitor-host_nagios4.cfg.erb
new file mode 100644
index 0000000..dcffdb8
--- /dev/null
+++ b/templates/monitor/monitor-host_nagios4.cfg.erb
@@ -0,0 +1,19 @@
+# Generic host definition template - This is NOT a real host, just a template!
+
+define host{
+ name monitor-host ; The name of this host template
+ action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&theme=light&annotations=true
+ notifications_enabled 1 ; Host notifications are enabled
+ event_handler_enabled 1 ; Host event handler is enabled
+ flap_detection_enabled 1 ; Flap detection is enabled
+ process_perf_data 1 ; Process performance data
+ retain_status_information 1 ; Retain status information across program restarts
+ retain_nonstatus_information 1 ; Retain non-status information across program restarts
+ check_command check-host-alive
+ max_check_attempts 10
+ notification_interval 0
+ notification_period 24x7
+ notification_options d,u,r
+ contact_groups admins
+ register 0 ; DONT REGISTER THIS DEFINITION - ITS NOT A REAL HOST, JUST A TEMPLATE!
+ }
diff --git a/templates/monitor/monitor-ssl.conf.erb b/templates/monitor/monitor-ssl.conf.erb
new file mode 100644
index 0000000..e86476f
--- /dev/null
+++ b/templates/monitor/monitor-ssl.conf.erb
@@ -0,0 +1,50 @@
+
+
+ ServerName <%= @certname %>
+ SSLEngine on
+ SSLCertificateFile /etc/dehydrated/certs/<%= @certname %>/fullchain.pem
+ SSLCertificateKeyFile /etc/dehydrated/certs/<%= @certname %>/privkey.pem
+
+ Header always set Strict-Transport-Security "max-age=63072000"
+
+ SSLProtocol all -SSLv3 -TLSv1 -TLSv1.1
+ SSLCipherSuite ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
+ SSLHonorCipherOrder off
+ SSLSessionTickets off
+
+ SSLUseStapling On
+ SSLCompression off
+
+ SSLOptions +StrictRequire
+
+ # Add vhost name to log entries:
+ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" vhost_combined
+ LogFormat "%v %h %l %u %t \"%r\" %>s %b" vhost_common
+ BrowserMatch "MSIE [2-6]" \
+ nokeepalive ssl-unclean-shutdown \
+ downgrade-1.0 force-response-1.0
+ BrowserMatch "MSIE [17-9]" ssl-unclean-shutdown
+ RedirectMatch ^/$ /thruk/
+
+
+ AuthName "Thruk Monitoring"
+ AuthType Basic
+ AuthUserFile /etc/thruk/htpasswd
+ Require valid-user
+ ProxyPass http://127.0.0.1:3000 retry=0 disablereuse=On
+ ProxyPassReverse http://127.0.0.1:3000/grafana
+ RewriteEngine On
+ RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER},NS]
+ SetEnvIf Request_Protocol ^HTTPS.* IS_HTTPS=1
+ SetEnvIf Authorization "^.+$" IS_BASIC_AUTH=1
+ # without thruk cookie auth, use the proxy user from the rewrite rule above
+ RequestHeader set X-WEBAUTH-USER "%{PROXY_USER}s" env=IS_HTTPS
+ RequestHeader set X-WEBAUTH-USER "%{PROXY_USER}e" env=!IS_HTTPS
+ # when thruk cookie auth is used, fallback to remote user directly
+ RequestHeader set X-WEBAUTH-USER "%{REMOTE_USER}e" env=!IS_BASIC_AUTH
+ RequestHeader unset Authorization
+
+
+
+ SSLStaplingCache "shmcb:logs/ssl_stapling(32768)"
+
diff --git a/templates/monitor/nagflux.service.erb b/templates/monitor/nagflux.service.erb
new file mode 100644
index 0000000..36778c2
--- /dev/null
+++ b/templates/monitor/nagflux.service.erb
@@ -0,0 +1,14 @@
+[Unit]
+Description=A connector which transforms performancedata from Nagios/Icinga(2)/Naemon to InfluxDB/Elasticsearch
+Documentation=https://github.com/Griesbacher/nagflux
+After=network-online.target
+
+[Service]
+User=root
+Group=root
+ExecStart=/usr/local/bin/nagflux -configPath /etc/nagflux/config.gcfg
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
+Alias=nagflux.service
diff --git a/templates/monitor/nagflux.yaml.erb b/templates/monitor/nagflux.yaml.erb
new file mode 100644
index 0000000..4ec4ccb
--- /dev/null
+++ b/templates/monitor/nagflux.yaml.erb
@@ -0,0 +1,14 @@
+apiVersion: 1
+
+deleteDatasources:
+ - name: nagflux
+
+datasources:
+- name: nagflux
+ type: influxdb
+ url: http://localhost:8086
+ access: proxy
+ database: nagflux
+ isDefault: true
+ version: 1
+ editable: true
diff --git a/templates/monitor/nagflux_config.gcfg.erb b/templates/monitor/nagflux_config.gcfg.erb
new file mode 100644
index 0000000..0699e8d
--- /dev/null
+++ b/templates/monitor/nagflux_config.gcfg.erb
@@ -0,0 +1,36 @@
+[main]
+NagiosSpoolfileFolder = "/var/nagflux/perfdata"
+NagiosSpoolfileWorker = 1
+InfluxWorker = 2
+MaxInfluxWorker = 5
+DumpFile = "/var/log/nagflux/nagflux.dump"
+NagfluxSpoolfileFolder = "/var/nagflux/spool"
+FieldSeparator = "&"
+BufferSize = 1000
+FileBufferSize = 65536
+DefaultTarget = "Influxdb"
+
+[Log]
+LogFile = "/var/log/nagflux/nagflux.log"
+MinSeverity = "INFO"
+
+[InfluxDBGlobal]
+CreateDatabaseIfNotExists = true
+NastyString = ""
+NastyStringToReplace = ""
+HostcheckAlias = "hostcheck"
+
+[InfluxDB "nagflux"]
+Enabled = true
+Version = 1.0
+Address = "http://localhost:8086"
+Arguments = "precision=ms&db=nagflux&u=admin&p=<%= @influx_passwd%>"
+StopPullingDataIfDown = true
+
+[Livestatus]
+#tcp or file
+Type = "file"
+#tcp: 127.0.0.1:6557 or file /var/run/live
+Address = "/var/cache/naemon/live"
+MinutesToWait = 3
+Version = ""
diff --git a/templates/monitor/nagios4-cgi.conf.erb b/templates/monitor/nagios4-cgi.conf.erb
new file mode 100644
index 0000000..1988ad9
--- /dev/null
+++ b/templates/monitor/nagios4-cgi.conf.erb
@@ -0,0 +1,48 @@
+# apache configuration for nagios 4.x
+
+ScriptAlias /cgi-bin/nagios4 /usr/lib/cgi-bin/nagios4
+ScriptAlias /nagios4/cgi-bin /usr/lib/cgi-bin/nagios4
+
+# Where the stylesheets (config files) reside
+Alias /nagios4/stylesheets /etc/nagios4/stylesheets
+
+# Where the HTML pages live
+Alias /nagios4 /usr/share/nagios4/htdocs
+
+
+ Options FollowSymLinks
+ DirectoryIndex index.php index.html
+ AllowOverride AuthConfig
+ #
+ # The default Debian nagios4 install sets use_authentication=0 in
+ # /etc/nagios4/cgi.cfg, which turns off nagos's internal authentication.
+ # This is insecure. As a compromise this default apache2 configuration
+ # only allows private IP addresses access.
+ #
+ # The ... below shows how you can secure the nagios4
+ # web site so anybody can view it, but only authenticated users can issue
+ # commands (such as silence notifications). To do that replace the
+ # "Require all granted" with "Require valid-user", and use htdigest
+ # program from the apache2-utils package to add users to
+ # /etc/nagios4/htdigest.users.
+ #
+ # A step up is to insist all users validate themselves by moving
+ # the stanza's in the .. into the .
+ # Then by setting use_authentication=1 in /etc/nagios4/cgi.cfg you
+ # can configure which people get to see a particular service from
+ # within the nagios configuration.
+ #
+ #Require ip ::1/128 fc00::/7 fe80::/10 10.0.0.0/8 127.0.0.0/8 169.254.0.0/16 172.16.0.0/12 192.168.0.0/16
+ AuthDigestDomain "Nagios4"
+ AuthDigestProvider file
+ AuthUserFile "/etc/nagios4/htdigest.users"
+ AuthGroupFile "/etc/group"
+ AuthName "Nagios4"
+ AuthType Digest
+ #Require all granted
+ Require valid-user
+
+
+
+ Options +ExecCGI
+
\ No newline at end of file
diff --git a/templates/monitor/nagios_nagflux.cfg.erb b/templates/monitor/nagios_nagflux.cfg.erb
new file mode 100644
index 0000000..be28164
--- /dev/null
+++ b/templates/monitor/nagios_nagflux.cfg.erb
@@ -0,0 +1,13 @@
+process_performance_data=1
+
+host_perfdata_file=/var/naemon/host-perfdata
+host_perfdata_file_template=DATATYPE::HOSTPERFDATA\tTIMET::$TIMET$\tHOSTNAME::$HOSTNAME$\tHOSTPERFDATA::$HOSTPERFDATA$\tHOSTCHECKCOMMAND::$HOSTCHECKCOMMAND$
+host_perfdata_file_mode=a
+host_perfdata_file_processing_interval=15
+host_perfdata_file_processing_command=process-host-perfdata-file-nagflux
+
+service_perfdata_file=/var/naemon/service-perfdata
+service_perfdata_file_template=DATATYPE::SERVICEPERFDATA\tTIMET::$TIMET$\tHOSTNAME::$HOSTNAME$\tSERVICEDESC::$SERVICEDESC$\tSERVICEPERFDATA::$SERVICEPERFDATA$\tSERVICECHECKCOMMAND::$SERVICECHECKCOMMAND$
+service_perfdata_file_mode=a
+service_perfdata_file_processing_interval=15
+service_perfdata_file_processing_command=process-service-perfdata-file-nagflux
diff --git a/templates/monitor/notify_slack.erb.sh b/templates/monitor/notify_slack.erb.sh
new file mode 100644
index 0000000..c7b7d01
--- /dev/null
+++ b/templates/monitor/notify_slack.erb.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# This script is used by Nagios to post alerts into a Slack channel
+# using the Incoming WebHooks integration. Create the channel, botname
+# and integration first and then add this notification script in your
+# Nagios configuration.
+#
+# More info on Slack
+# Website: https://slack.com/
+# Twitter: @slackhq, @slackapi
+#
+# My info
+# Website: http://matthewcmcmillan.blogspot.com/
+# Twitter: @matthewmcmillan
+
+#Modify these variables for your environment
+MY_NAEMON_HOSTNAME="monitor.drive.sunet.se"
+SLACK_URL="https://hooks.slack.com/services/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"
+
+#Set the message icon based on Nagios service state
+if [ "$SERVICESTATE" = "CRITICAL" ]; then
+ ICON=":exclamation:"
+elif [ "$SERVICESTATE" = "WARNING" ]; then
+ ICON=":warning:"
+elif [ "$SERVICESTATE" = "OK" ]; then
+ ICON=":white_check_mark:"
+elif [ "$SERVICESTATE" = "UNKNOWN" ]; then
+ ICON=":question:"
+else
+ ICON=":white_medium_square:"
+fi
+
+#Send message to Slack
+payload='{"text": "'${ICON}' HOST: '${HOSTNAME}', SERVICE: '${SERVICEDISPLAYNAME}', MESSAGE: '${SERVICEOUTPUT}', ()"}'
+curl -X POST --data "${payload}" "${SLACK_URL}"
diff --git a/templates/monitor/ping.cfg.erb b/templates/monitor/ping.cfg.erb
new file mode 100644
index 0000000..11abcbe
--- /dev/null
+++ b/templates/monitor/ping.cfg.erb
@@ -0,0 +1,73 @@
+# This file is managed by puppet
+# 'check_ping' command definition
+define command{
+ command_name check_ping
+ command_line /usr/lib/nagios/plugins/check_ping -H '$HOSTADDRESS$' -w '$ARG1$' -c '$ARG2$' -4
+ }
+
+
+# 'check-host-alive' command definition
+define command{
+ command_name check-host-alive
+ command_line /usr/lib/nagios/plugins/check_ping -H '$HOSTADDRESS$' -w 5000,100% -c 5000,100% -p 1 -4
+ }
+
+
+# 'check-printer-alive' command definition
+define command{
+ command_name check-printer-alive
+ command_line /usr/lib/nagios/plugins/check_ping -H '$HOSTADDRESS$' -w 5000,100% -c 5000,100% -p 1
+ }
+
+
+# 'check-switch-alive' command definition
+define command{
+ command_name check-switch-alive
+ command_line /usr/lib/nagios/plugins/check_ping -H '$HOSTADDRESS$' -w 5000,100% -c 5000,100% -p 1
+ }
+
+
+# 'check-router-alive' command definition
+define command{
+ command_name check-router-alive
+ command_line /usr/lib/nagios/plugins/check_ping -H '$HOSTADDRESS$' -w 5000,100% -c 5000,100% -p 1
+ }
+
+####
+# use these checks, if you want to test IPv4 connectivity on IPv6 enabled systems
+####
+
+# 'check_ping_4' command definition
+define command{
+ command_name check_ping_4
+ command_line /usr/lib/nagios/plugins/check_ping -4 -H '$HOSTADDRESS$' -w '$ARG1$' -c '$ARG2$'
+ }
+
+
+# 'check-host-alive_4' command definition
+define command{
+ command_name check-host-alive_4
+ command_line /usr/lib/nagios/plugins/check_ping -H '$HOSTADDRESS$' -w 5000,100% -c 5000,100% -p 1 -4
+ }
+
+
+# 'check-printer-alive_4' command definition
+define command{
+ command_name check-printer-alive_4
+ command_line /usr/lib/nagios/plugins/check_ping -H '$HOSTADDRESS$' -w 5000,100% -c 5000,100% -p 1 -4
+ }
+
+
+# 'check-switch-alive_4' command definition
+define command{
+ command_name check-switch-alive_4
+ command_line /usr/lib/nagios/plugins/check_ping -H '$HOSTADDRESS$' -w 5000,100% -c 5000,100% -p 1 -4
+ }
+
+
+# 'check-router-alive_4' command definition
+define command{
+ command_name check-router-alive_4
+ command_line /usr/lib/nagios/plugins/check_ping -H '$HOSTADDRESS$' -w 5000,100% -c 5000,100% -p 1 -4
+ }
+
diff --git a/templates/monitor/printer.cfg.erb b/templates/monitor/printer.cfg.erb
new file mode 100644
index 0000000..ec5f61d
--- /dev/null
+++ b/templates/monitor/printer.cfg.erb
@@ -0,0 +1,67 @@
+###############################################################################
+# PRINTER.CFG - SAMPLE CONFIG FILE FOR MONITORING A NETWORK PRINTER
+#
+#
+# NOTES: This config file assumes that you are using the sample configuration
+# files that get installed with the Naemon quickstart guide.
+#
+###############################################################################
+###############################################################################
+###############################################################################
+#
+# HOST DEFINITIONS
+#
+###############################################################################
+###############################################################################
+# Define a host for the printer we'll be monitoring
+# Change the host_name, alias, and address to fit your situation
+#define host {
+# host_name hplj2605dn ; The name we're giving to this printer
+# alias HP LaserJet 2605dn ; A longer name associated with the printer
+# address 192.168.1.30 ; IP address of the printer
+# use generic-printer ; Inherit default values from a template
+# hostgroups network-printers ; Host groups this printer is associated with
+#}
+
+###############################################################################
+###############################################################################
+#
+# HOST GROUP DEFINITIONS
+#
+###############################################################################
+###############################################################################
+# A hostgroup for network printers
+#define hostgroup {
+# hostgroup_name network-printers ; The name of the hostgroup
+# alias Network Printers ; Long name of the group
+#}
+
+###############################################################################
+###############################################################################
+#
+# SERVICE DEFINITIONS
+#
+###############################################################################
+###############################################################################
+# Create a service for monitoring the status of the printer
+# Change the host_name to match the name of the host you defined above
+# If the printer has an SNMP community string other than "public", change the check_command directive to reflect that
+#define service {
+# service_description Printer Status ; The service description
+# host_name hplj2605dn ; The name of the host the service is associated with
+# use generic-service ; Inherit values from a template
+# check_command check_hpjd!-C public ; The command used to monitor the service
+# check_interval 10 ; Check the service every 10 minutes under normal conditions
+# retry_interval 1 ; Re-check the service every minute until its final/hard state is determined
+#}
+#
+## Create a service for "pinging" the printer occassionally. Useful for monitoring RTA, packet loss, etc.
+#define service {
+# service_description PING
+# host_name hplj2605dn
+# use generic-service
+# check_command check_ping!3000.0,80%!5000.0,100%
+# check_interval 10
+# retry_interval 1
+#}
+#
diff --git a/templates/monitor/sunetdrive_sites.cfg.erb b/templates/monitor/sunetdrive_sites.cfg.erb
new file mode 100644
index 0000000..dd89e3e
--- /dev/null
+++ b/templates/monitor/sunetdrive_sites.cfg.erb
@@ -0,0 +1,209 @@
+define contact {
+ contact_name slack
+ alias Slack
+ service_notification_period 24x7
+ host_notification_period 24x7
+ service_notification_options w,u,c,r
+ host_notification_options d,r
+ service_notification_commands notify-service-by-slack
+ host_notification_commands notify-host-by-slack
+}
+# 'notify-service-by-slack' command definition
+define command {
+ command_name notify-service-by-slack
+ command_line SERVICESTATE="$SERVICESTATE$" \
+ HOSTNAME="$HOSTNAME$" \
+ SERVICEDISPLAYNAME="$SERVICEDISPLAYNAME$" \
+ SERVICEOUTPUT="$SERVICEOUTPUT$" \
+ HOSTNAME="$HOSTNAME$" \
+ /usr/local/bin/slack_nagios.sh > /tmp/slack.log 2>&1
+}
+
+# 'notify-host-by-slack' command definition
+define command {
+ command_name notify-host-by-slack
+ command_line SERVICESTATE="$SERVICESTATE$" \
+ HOSTNAME="$HOSTNAME$" \
+ SERVICEDISPLAYNAME="$SERVICEDISPLAYNAME$" \
+ SERVICEOUTPUT="$SERVICEOUTPUT$" \
+ HOSTNAME="$HOSTNAME$" \
+ /usr/local/bin/slack_nagios.sh > /tmp/slack.log 2>&1
+}
+define host {
+ name monitor-site ; The name of this host template
+ notifications_enabled 1 ; Host notifications are enabled
+ event_handler_enabled 1 ; Host event handler is enabled
+ flap_detection_enabled 1 ; Flap detection is enabled
+ process_perf_data 1 ; Process performance data
+ retain_status_information 1 ; Retain status information across program restarts
+ retain_nonstatus_information 1 ; Retain non-status information across program restarts
+ max_check_attempts 3
+ check_period 24x7
+ notification_period 24x7
+ check_command check_ping_4!'5000,100%'!'5000,100%'
+ max_check_attempts 10
+ notification_interval 0
+ notification_period 24x7
+ notification_options d,u,r
+ contact_groups admins
+ register 0 ; DONT REGISTER THIS DEFINITION - ITS NOT A REAL HOST, JUST A TEMPLATE!
+}
+define command {
+ command_name check_nextcloud
+ command_line /usr/lib/nagios/plugins/check_http -H '$HOSTADDRESS$' --ssl -u '/status.php' -s '"installed":true' -E
+}
+define command {
+ command_name check_nextcloud_version
+ command_line /usr/lib/nagios/plugins/check_http -H '$HOSTADDRESS$' --ssl -u '/status.php' -s '"version":"$ARG1$"'
+}
+define command {
+ command_name check_satosa
+ command_line /usr/lib/nagios/plugins/check_http -H '$HOSTADDRESS$' --ssl -u '/healthcheck' -s '"status": "ok"' -E
+}
+define hostgroup {
+ alias SUNET Drive Nextcloud sites
+ hostgroup_name prod-sites
+}
+define servicegroup {
+ alias SUNET Drive Nextcloud sites
+ servicegroup_name prod-sites
+}
+define hostgroup {
+ alias SUNET Drive Nextcloud sites test
+ hostgroup_name test-sites
+}
+define servicegroup {
+ alias SUNET Drive Nextcloud sites
+ servicegroup_name test-sites
+}
+<% proxies = ['idp-proxy.drive.test.sunet.se', 'drive-idp-proxy.sunet.se'] %>
+<% proxies.each do | proxy | -%>
+# <%= proxy %>
+define host {
+ action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&theme=light&annotations=true
+ notes_url https://<%= proxy %>/healthcheck
+ address <%= proxy %>
+ alias <%= proxy %>
+ host_name <%= proxy %>
+<% if proxy.match('test') -%>
+ hostgroups test-sites
+<% else -%>
+ hostgroups prod-sites
+<% end -%>
+ use monitor-site
+}
+define service {
+ notes_url https://<%= proxy %>/healthcheck
+ action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
+ check_command check_satosa
+ check_interval 5
+ check_period 24x7
+<% if @environment == 'prod' and not proxy.match('test') %>
+ contacts slack
+<% else -%>
+ contact_groups naemon-admins
+<% end -%>
+ host_name <%= proxy %>
+ max_check_attempts 3
+ notification_interval 60
+ notification_period 24x7
+ retry_interval 1
+ service_description Satosa healthcheck
+<% if proxy.match('test') -%>
+ servicegroups test-sites
+<% else -%>
+ servicegroups prod-sites
+<% end -%>
+}
+<% end %>
+<% @sites.each do | site | -%>
+# <%= site %>
+define host {
+ action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&theme=light&annotations=true
+ notes_url https://<%= site %>
+ address <%= site %>
+ alias <%= site %>
+ host_name <%= site %>
+<% if site.match('test') -%>
+ hostgroups test-sites
+<% else -%>
+ hostgroups prod-sites
+<% end -%>
+ use monitor-site
+}
+<% if site.match('lookup') %>
+define service {
+ notes_url https://<%= site %>
+ action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
+ check_command check_https
+ check_interval 5
+ check_period 24x7
+<% if @environment == 'prod' %>
+ contacts slack
+<% else -%>
+ contact_groups naemon-admins
+<% end -%>
+ host_name <%= site %>
+ max_check_attempts 3
+ notification_interval 60
+ notification_period 24x7
+ retry_interval 1
+ service_description HTTPS
+<% if site.match('test') -%>
+ servicegroups test-sites
+<% else -%>
+ servicegroups prod-sites
+<% end -%>
+}
+<% end -%>
+<% unless site.match('lookup') %>
+define service {
+ notes_url https://<%= site %>/status.php
+ action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
+ check_command check_nextcloud
+ check_interval 5
+ check_period 24x7
+<% cur_cust = site.sub('/\.drive.*/','') %>
+# 'check_nextcloud' command definition
+<% if @environment == 'prod' and not site.match('test') and @fullnodes.include?(cur_cust) %>
+ contacts slack
+<% else -%>
+ contact_groups naemon-admins
+<% end -%>
+ host_name <%= site %>
+ max_check_attempts 3
+ notification_interval 60
+ notification_period 24x7
+ retry_interval 1
+ service_description Nextcloud status
+<% if site.match('test') -%>
+ servicegroups test-sites
+<% else -%>
+ servicegroups prod-sites
+<% end -%>
+}
+define service {
+ notes_url https://<%= site %>/status.php
+ action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
+<% if site.match('test') -%>
+ check_command check_nextcloud_version!'<%= @nextcloud_version_test %>'
+<% else -%>
+ check_command check_nextcloud_version!'<%= @nextcloud_version_prod %>'
+<% end -%>
+ check_interval 5
+ check_period 24x7
+ contact_groups naemon-admins
+ host_name <%= site %>
+ max_check_attempts 3
+ notification_interval 60
+ notification_period 24x7
+ retry_interval 1
+ service_description Nextcloud version
+<% if site.match('test') -%>
+ servicegroups test-sites
+<% else -%>
+ servicegroups prod-sites
+<% end -%>
+}
+<% end -%>
+<% end -%>
diff --git a/templates/monitor/sunetdrive_ssl_checks.cfg.erb b/templates/monitor/sunetdrive_ssl_checks.cfg.erb
new file mode 100644
index 0000000..3b63892
--- /dev/null
+++ b/templates/monitor/sunetdrive_ssl_checks.cfg.erb
@@ -0,0 +1,67 @@
+# 'check_nextcloud' command definition
+define command {
+ command_name check_tls_cert
+ command_line /usr/lib/nagios/plugins/check_http -H '$HOSTADDRESS$' -C 30,14
+}
+define command {
+ command_name check_tls_cert_with_port
+ command_line /usr/lib/nagios/plugins/check_http -H '$HOSTADDRESS$' --sni '$HOSTADDRESS$' -C '$ARG1$','$ARG2$' -p '$ARG3$'
+}
+define hostgroup {
+ alias SUNET Drive TLS Servers
+ hostgroup_name tls-servers
+}
+define hostgroup {
+ alias External TLS Servers
+ hostgroup_name external-tls-servers
+}
+define servicegroup {
+ alias SUNET Drive TLS Servers
+ servicegroup_name tls-servers
+}
+define servicegroup {
+ alias External Drive TLS Servers
+ servicegroup_name external-tls-servers
+}
+<% @tls_servers.each do | server | -%>
+# <%= server %>
+define service {
+ check_command check_tls_cert
+ check_interval 5
+ check_period 24x7
+ contact_groups naemon-admins
+ host_name <%= server %>
+ max_check_attempts 3
+ notification_interval 60
+ notification_period 24x7
+ retry_interval 1
+ service_description TLS
+ servicegroups tls-servers
+}
+<% end -%>
+<% @tls_servers_with_port.each do | server | -%>
+# <%= server['name'] %>
+define host {
+ address <%= server['name'] %>
+ alias <%= server['name'] %>
+ host_name <%= server['name'] %>
+ use monitor-site
+ hostgroups external-tls-servers
+}
+<% server['ports'].each do |port|%>
+define service {
+ action_url https://<%= server['name'] %>:<%= port %>
+ check_command check_tls_cert_with_port!<%= server['warn']%>!<%= server['crit']%>!<%= port %>
+ check_interval 5
+ check_period 24x7
+ contact_groups naemon-admins
+ host_name <%= server['name'] %>
+ max_check_attempts 3
+ notification_interval 60
+ notification_period 24x7
+ retry_interval 1
+ service_description TLS <%= port %>
+ servicegroups external-tls-servers
+}
+<% end -%>
+<% end -%>
diff --git a/templates/monitor/sunetdrive_thruk_templates.conf.erb b/templates/monitor/sunetdrive_thruk_templates.conf.erb
new file mode 100644
index 0000000..4d0e2f6
--- /dev/null
+++ b/templates/monitor/sunetdrive_thruk_templates.conf.erb
@@ -0,0 +1,65 @@
+define host {
+ name thruk-bp-template
+ address Business Process
+ active_checks_enabled 0
+ check_command check_thruk_bp
+ check_interval 1
+ check_period thruk_24x7
+ event_handler_enabled 1
+ flap_detection_enabled 1
+ max_check_attempts 3
+ notification_interval 120
+ notification_options d,u,r
+ notification_period thruk_24x7
+ notifications_enabled 1
+ process_perf_data 1
+ register 0
+ retain_nonstatus_information 1
+ retain_status_information 1
+ retry_interval 1
+ _WORKER local
+}
+
+define service {
+ name thruk-bp-node-template
+ active_checks_enabled 0
+ check_command check_thruk_bp
+ check_freshness 0
+ check_interval 1
+ check_period thruk_24x7
+ event_handler_enabled 1
+ flap_detection_enabled 1
+ is_volatile 0
+ max_check_attempts 3
+ notification_interval 120
+ notification_options w,u,c,r
+ notification_period thruk_24x7
+ notifications_enabled 1
+ obsess_over_service 1
+ passive_checks_enabled 1
+ process_perf_data 1
+ register 0
+ retain_nonstatus_information 1
+ retain_status_information 1
+ retry_interval 1
+ _WORKER local
+}
+
+define command {
+ command_name check_thruk_bp
+ command_line thruk -a bp $_HOSTTHRUK_BP_ID$
+}
+
+define timeperiod {
+ timeperiod_name thruk_24x7
+ alias 24 Hours A Day, 7 Days A Week
+ monday 00:00-24:00
+ tuesday 00:00-24:00
+ wednesday 00:00-24:00
+ thursday 00:00-24:00
+ friday 00:00-24:00
+ saturday 00:00-24:00
+ sunday 00:00-24:00
+}
+
+
diff --git a/templates/monitor/switch.cfg.erb b/templates/monitor/switch.cfg.erb
new file mode 100644
index 0000000..83a6d2d
--- /dev/null
+++ b/templates/monitor/switch.cfg.erb
@@ -0,0 +1,77 @@
+###############################################################################
+# SWITCH.CFG - SAMPLE CONFIG FILE FOR MONITORING A SWITCH
+#
+#
+# NOTES: This config file assumes that you are using the sample configuration
+# files that get installed with the Naemon quickstart guide.
+#
+###############################################################################
+###############################################################################
+###############################################################################
+#
+# HOST DEFINITIONS
+#
+###############################################################################
+###############################################################################
+# Define the switch that we'll be monitoring
+#define host {
+# host_name linksys-srw224p ; The name we're giving to this switch
+# alias Linksys SRW224P Switch ; A longer name associated with the switch
+# address 192.168.1.253 ; IP address of the switch
+# use generic-switch ; Inherit default values from a template
+# hostgroups switches ; Host groups this switch is associated with
+#}
+#
+################################################################################
+################################################################################
+##
+## HOST GROUP DEFINITIONS
+##
+################################################################################
+################################################################################
+## Create a new hostgroup for switches
+#define hostgroup {
+# hostgroup_name switches ; The name of the hostgroup
+# alias Network Switches ; Long name of the group
+#}
+#
+################################################################################
+################################################################################
+##
+## SERVICE DEFINITIONS
+##
+################################################################################
+################################################################################
+## Create a service to PING to switch
+#define service {
+# service_description PING ; The service description
+# host_name linksys-srw224p ; The name of the host the service is associated with
+# use generic-service ; Inherit values from a template
+# check_command check_ping!200.0,20%!600.0,60% ; The command used to monitor the service
+# check_interval 5 ; Check the service every 5 minutes under normal conditions
+# retry_interval 1 ; Re-check the service every minute until its final/hard state is determined
+#}
+#
+## Monitor uptime via SNMP
+#define service {
+# service_description Uptime
+# host_name linksys-srw224p
+# use generic-service ; Inherit values from a template
+# check_command check_snmp!-C public -o sysUpTime.0
+#}
+#
+## Monitor Port 1 status via SNMP
+#define service {
+# service_description Port 1 Link Status
+# host_name linksys-srw224p
+# use generic-service ; Inherit values from a template
+# check_command check_snmp!-C public -o ifOperStatus.1 -r 1 -m RFC1213-MIB
+#}
+#
+## Monitor bandwidth via MRTG logs
+#define service {
+# service_description Port 1 Bandwidth Usage
+# host_name linksys-srw224p
+# use generic-service ; Inherit values from a template
+# check_command check_local_mrtgtraf!/var/lib/mrtg/192.168.1.253_1.log!AVG!1000000,1000000!5000000,5000000!10
+#}
diff --git a/templates/monitor/thruk_local.conf.erb b/templates/monitor/thruk_local.conf.erb
new file mode 100644
index 0000000..c442847
--- /dev/null
+++ b/templates/monitor/thruk_local.conf.erb
@@ -0,0 +1,10 @@
+############################################
+# put your own settings into this file
+# settings from this file will override
+# those from the thruk.conf
+# Note: This file is managed by puppet
+############################################
+cookie_auth_verbose=1
+cookie_auth_login_url = thruk/cgi-bin/login.cgi
+cookie_auth_restricted_url = https://<%= @monitorhost %>/thruk/cgi-bin/restricted.cgi
+api_keys_enabled = 1
diff --git a/templates/monitor/timeperiods.cfg.erb b/templates/monitor/timeperiods.cfg.erb
new file mode 100644
index 0000000..3fc6d71
--- /dev/null
+++ b/templates/monitor/timeperiods.cfg.erb
@@ -0,0 +1,81 @@
+###############################################################################
+# TIMEPERIODS.CFG - SAMPLE TIMEPERIOD DEFINITIONS
+#
+#
+# NOTES: This config file provides you with some example timeperiod definitions
+# that you can reference in host, service, contact, and dependency
+# definitions.
+#
+# You don't need to keep timeperiods in a separate file from your other
+# object definitions. This has been done just to make things easier to
+# understand.
+#
+###############################################################################
+###############################################################################
+###############################################################################
+#
+# TIME PERIODS
+#
+###############################################################################
+###############################################################################
+# This defines a timeperiod where all times are valid for checks,
+# notifications, etc. The classic "24x7" support nightmare. :-)
+define timeperiod {
+ timeperiod_name 24x7
+ alias 24 Hours A Day, 7 Days A Week
+ monday 00:00-24:00
+ tuesday 00:00-24:00
+ wednesday 00:00-24:00
+ thursday 00:00-24:00
+ friday 00:00-24:00
+ saturday 00:00-24:00
+ sunday 00:00-24:00
+}
+
+# 'workhours' timeperiod definition
+define timeperiod {
+ timeperiod_name workhours
+ alias Normal Work Hours
+ monday 09:00-17:00
+ tuesday 09:00-17:00
+ wednesday 09:00-17:00
+ thursday 09:00-17:00
+ friday 09:00-17:00
+}
+
+# 'none' timeperiod definition
+define timeperiod {
+ timeperiod_name none
+ alias No Time Is A Good Time
+}
+
+# Some U.S. holidays
+# Note: The timeranges for each holiday are meant to *exclude* the holidays from being
+# treated as a valid time for notifications, etc. You probably don't want your pager
+# going off on New Year's. Although you're employer might... :-)
+define timeperiod {
+ name us-holidays
+ timeperiod_name us-holidays
+ alias U.S. Holidays
+ december 25 00:00-00:00 ; Christmas
+ january 1 00:00-00:00 ; New Years
+ july 4 00:00-00:00 ; Independence Day
+ monday -1 may 00:00-00:00 ; Memorial Day (last Monday in May)
+ monday 1 september 00:00-00:00 ; Labor Day (first Monday in September)
+ thursday 4 november 00:00-00:00 ; Thanksgiving (4th Thursday in November)
+}
+
+# This defines a modified "24x7" timeperiod that covers every day of the
+# year, except for U.S. holidays (defined in the timeperiod above).
+define timeperiod {
+ timeperiod_name 24x7_sans_holidays
+ alias 24x7 Sans Holidays
+ use us-holidays ; Get holiday exceptions from other timeperiod
+ monday 00:00-24:00
+ tuesday 00:00-24:00
+ wednesday 00:00-24:00
+ thursday 00:00-24:00
+ friday 00:00-24:00
+ saturday 00:00-24:00
+ sunday 00:00-24:00
+}
diff --git a/templates/monitor/windows.cfg.erb b/templates/monitor/windows.cfg.erb
new file mode 100644
index 0000000..e97250a
--- /dev/null
+++ b/templates/monitor/windows.cfg.erb
@@ -0,0 +1,114 @@
+###############################################################################
+# WINDOWS.CFG - SAMPLE CONFIG FILE FOR MONITORING A WINDOWS MACHINE
+#
+#
+# NOTES: This config file assumes that you are using the sample configuration
+# files that get installed with the Naemon quickstart guide.
+#
+###############################################################################
+###############################################################################
+###############################################################################
+#
+# HOST DEFINITIONS
+#
+###############################################################################
+###############################################################################
+# Define a host for the Windows machine we'll be monitoring
+# Change the host_name, alias, and address to fit your situation
+#define host {
+# host_name winserver ; The name we're giving to this host
+# alias My Windows Server ; A longer name associated with the host
+# address 192.168.1.2 ; IP address of the host
+# use windows-server ; Inherit default values from a template
+#}
+#
+################################################################################
+################################################################################
+##
+## HOST GROUP DEFINITIONS
+##
+################################################################################
+################################################################################
+## Define a hostgroup for Windows machines
+## All hosts that use the windows-server template will automatically be a member of this group
+#define hostgroup {
+# hostgroup_name windows-servers ; The name of the hostgroup
+# alias Windows Servers ; Long name of the group
+#}
+#
+################################################################################
+################################################################################
+##
+## SERVICE DEFINITIONS
+##
+################################################################################
+################################################################################
+## Create a service for monitoring the version of NSCLient++ that is installed
+## Change the host_name to match the name of the host you defined above
+#define service {
+# service_description NSClient++ Version
+# host_name winserver
+# use generic-service
+# check_command check_nt!CLIENTVERSION
+#}
+#
+## Create a service for monitoring the uptime of the server
+## Change the host_name to match the name of the host you defined above
+#define service {
+# service_description Uptime
+# host_name winserver
+# use generic-service
+# check_command check_nt!UPTIME
+#}
+#
+## Create a service for monitoring CPU load
+## Change the host_name to match the name of the host you defined above
+#define service {
+# service_description CPU Load
+# host_name winserver
+# use generic-service
+# check_command check_nt!CPULOAD!-l 5,80,90
+#}
+#
+## Create a service for monitoring memory usage
+## Change the host_name to match the name of the host you defined above
+#define service {
+# service_description Memory Usage
+# host_name winserver
+# use generic-service
+# check_command check_nt!MEMUSE!-w 80 -c 90
+#}
+#
+## Create a service for monitoring C:\ disk usage
+## Change the host_name to match the name of the host you defined above
+#define service {
+# service_description C:\ Drive Space
+# host_name winserver
+# use generic-service
+# check_command check_nt!USEDDISKSPACE!-l c -w 80 -c 90
+#}
+#
+## Create a service for monitoring the W3SVC service
+## Change the host_name to match the name of the host you defined above
+#define service {
+# service_description W3SVC
+# host_name winserver
+# use generic-service
+# check_command check_nt!SERVICESTATE!-d SHOWALL -l W3SVC
+#}
+#
+## Create a service for monitoring the Explorer.exe process
+## Change the host_name to match the name of the host you defined above
+#define service {
+# service_description Explorer
+# host_name winserver
+# use generic-service
+# check_command check_nt!PROCSTATE!-d SHOWALL -l Explorer.exe
+#}
+#
+## example service group
+#define servicegroup {
+# servicegroup_name windows
+# alias Some Windows Checks
+# members winserver,CPU Load,winserver,Memory Usage
+#}
diff --git a/templates/multinode/complete_reinstall.erb.sh b/templates/multinode/complete_reinstall.erb.sh
new file mode 100644
index 0000000..e448a0f
--- /dev/null
+++ b/templates/multinode/complete_reinstall.erb.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+
+config_php='/var/www/html/config/config.php'
+dbhost="<%= @dbhost %>"
+mysql_user_password="<%= @mysql_user_password %>"
+admin_password="<%= @admin_password %>"
+location="<%= @location %>"
+bucket="<%= @s3_bucket %>"
+customer="<%= @customer %>"
+
+/usr/bin/mysql -e "drop database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
+/usr/bin/mysql -e "create database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
+if [[ "x${location}" != "x" || "x${bucket}" != "x" ]]; then
+ bucket_content=$(/usr/bin/rclone ls "${location}":"${bucket}" --config /rclone.conf 2>/dev/null)
+ if [[ "x${bucket_content}" != "x" ]]; then
+ /usr/bin/rclone purge "${location}:${bucket}" --config /rclone.conf >/dev/null 2>&1
+ /usr/bin/rclone mkdir "${location}:${bucket}" --config /rclone.conf >/dev/null 2>&1
+ fi
+fi
+: >${config_php}
+su - www-data -s /bin/bash < /dev/null 2>&1 && php --define apc.enable_cli=1 ./occ files:recommendations:recommend admin > /dev/null 2>&1 && \
+ php --define apc.enable_cli=1 ./occ app:enable globalsiteselector > /dev/null 2>&1 && \
+ php --define apc.enable_cli=1 ./occ app:list > /dev/null 2>&1 && \
+ php --define apc.enable_cli=1 ./occ app:enable files_external > /dev/null 2>&1 && \
+ php --define apc.enable_cli=1 ./occ app:enable twofactor_totp > /dev/null 2>&1 && \
+ php --define apc.enable_cli=1 ./occ app:enable twofactor_u2f > /dev/null 2>&1 && \
+ php --define apc.enable_cli=1 ./occ app:enable admin_audit > /dev/null 2>&1
+EOF
+/usr/bin/wget --no-check-certificate -q https://localhost/index.php -O /dev/null
+instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
+secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
+passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
+echo "${customer}_instanceid: DEC::PKCS7[${instanceid}]!"
+echo "${customer}_secret: DEC::PKCS7[${secret}]!"
+echo "${customer}_passwordsalt: DEC::PKCS7[${passwordsalt}]!"
+
+exit 0
diff --git a/templates/multinode/compress-logs.erb.sh b/templates/multinode/compress-logs.erb.sh
new file mode 100644
index 0000000..9468c85
--- /dev/null
+++ b/templates/multinode/compress-logs.erb.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+no_files=30 # Keep this many files as an archive, script is run once a week
+# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
+# a specific host, but will differ between hosts
+sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
+
+for logfile in $(ls /opt/multinode/*/{nextcloud.log,server/server.log}); do
+ if [[ -f ${logfile}.gz.${no_files} ]]; then
+ rm ${logfile}.gz.${no_files}
+ fi
+ for i in $(seq 1 $((no_files - 1)) | sort -nr); do
+ if [[ -f ${logfile}.gz.${i} ]]; then
+ mv ${logfile}.gz.${i} ${logfile}.gz.$((i + 1))
+ fi
+ done
+ if [[ -f ${logfile}.gz ]]; then
+ mv ${logfile}.gz ${logfile}.gz.1
+ fi
+ cat ${logfile} | gzip >${logfile}.gz && echo '' >${logfile}
+done
diff --git a/templates/multinode/cron.erb.sh b/templates/multinode/cron.erb.sh
new file mode 100755
index 0000000..c4d06dd
--- /dev/null
+++ b/templates/multinode/cron.erb.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+for container in $(docker ps | grep docker.sunet.se/drive/nextcloud-custom | grep -v cron | awk '{print $NF}'); do
+ /usr/bin/docker exec -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/cron.php
+done
diff --git a/templates/multinode/docker-compose_cache.yml.erb b/templates/multinode/docker-compose_cache.yml.erb
new file mode 100644
index 0000000..d3043a1
--- /dev/null
+++ b/templates/multinode/docker-compose_cache.yml.erb
@@ -0,0 +1,21 @@
+version: '3.2'
+
+services:
+
+ redis-server:
+ image: redis:alpine
+ dns:
+ - 89.46.20.75
+ - 89.46.21.29
+ - 89.32.32.32
+ volumes:
+ - <%= @redis_conf_dir %>:/data
+ command: redis-server /data/redis.conf --loglevel verbose
+ restart: always
+ networks:
+ - mariadb<%= @customer %>_<%= @customer %>
+
+networks:
+ mariadb<%= @customer %>_<%= @customer %>:
+ external: true
+
diff --git a/templates/multinode/docker-compose_mariadb.yml.erb b/templates/multinode/docker-compose_mariadb.yml.erb
new file mode 100644
index 0000000..cd7e6fc
--- /dev/null
+++ b/templates/multinode/docker-compose_mariadb.yml.erb
@@ -0,0 +1,27 @@
+version: '3.2'
+
+services:
+
+ db:
+ image: docker.sunet.se/drive/mariadb
+ restart: always
+ volumes:
+ - /opt/multinode/<%= @customer %>/mariadb-<%= @customer %>/datadir:/var/lib/mysql
+ - /opt/multinode/<%= @customer %>/mariadb-<%= @customer %>/init:/docker-entrypoint-initdb.d
+ - /opt/multinode/<%= @customer %>/mariadb-<%= @customer %>/conf:/etc/mysql/mariadb.conf.d
+ - /opt/multinode/<%= @customer %>/mariadb-<%= @customer %>/backups:/backups
+ - /opt/multinode/<%= @customer %>/mariadb-<%= @customer %>/do_backup.sh:/do_backup.sh
+ networks:
+ - <%= @customer %>
+ dns:
+ - 89.46.20.75
+ - 89.46.21.29
+ - 89.32.32.32
+ environment:
+ - MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %>
+ - BOOTSTRAP=<%= @bootstrap %>
+ - FORCE_BOOTSTRAP=0
+ tty: true
+
+networks:
+ <%= @customer %>:
diff --git a/templates/multinode/docker-compose_nextcloud.yml.erb b/templates/multinode/docker-compose_nextcloud.yml.erb
new file mode 100644
index 0000000..70ea3f1
--- /dev/null
+++ b/templates/multinode/docker-compose_nextcloud.yml.erb
@@ -0,0 +1,31 @@
+version: '3.2'
+
+services:
+
+ app:
+ image: docker.sunet.se/drive/nextcloud-custom:<%= @nextcloud_version %>
+ restart: always
+ volumes:
+ - /opt/multinode/<%= @customer %>/complete_reinstall.sh:/complete_reinstall.sh
+ - /opt/nextcloud/000-default.conf:/etc/apache2/sites-enabled/000-default.conf
+ - /opt/nextcloud/404.html:/var/www/html/404.html
+ - /opt/nextcloud/apache.php.ini:/etc/php/8.0/apache2/php.ini
+ - /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini
+ - <%= @config_php_path %>:/var/www/html/config/config.php
+ - <%= @nextcloud_log_path %>:/var/www/html/data/nextcloud.log
+ - <%= @rclone_conf_path %>:/rclone.conf
+ networks:
+ - default
+ - mariadb<%= @customer %>_<%= @customer %>
+ dns:
+ - 89.46.20.75
+ - 89.46.21.29
+ - 89.32.32.32
+ ports:
+ - <%= @https_port %>:443
+ command: apachectl -D FOREGROUND
+ tty: true
+
+networks:
+ mariadb<%= @customer %>_<%= @customer %>:
+ external: true
diff --git a/templates/multinode/get_containers b/templates/multinode/get_containers
new file mode 100644
index 0000000..b84c68a
--- /dev/null
+++ b/templates/multinode/get_containers
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+type=${1}
+
+if [[ -z ${type} ]]; then
+ type="nextcloud-custom"
+fi
+for container in $(docker ps | grep ${type} | awk '{print $NF}' | grep -E -v '^$'); do
+ echo "${container}"
+done
diff --git a/templates/multinode/my.cnf.erb b/templates/multinode/my.cnf.erb
new file mode 100644
index 0000000..807a2a6
--- /dev/null
+++ b/templates/multinode/my.cnf.erb
@@ -0,0 +1,36 @@
+[mysqld]
+
+# General
+default_storage_engine = InnoDB
+binlog_format = ROW
+character_set_server = utf8mb4
+collation_server = utf8mb4_general_ci
+skip_name_resolve = ON
+skip_external_locking = ON
+local_infile = 0
+max_allowed_packet = 128M
+max_connections = 750
+
+# Binary logging and replication
+log_bin = binlog
+log_slave_updates = ON
+server_id = 1000 # same on all Galera nodes in the same segment
+expire_logs_days = 7
+gtid_domain_id = 1000
+gtid_strict_mode = ON
+gtid_ignore_duplicates = ON
+
+# Innodb
+innodb_buffer_pool_size = 4G # 60-75% of total RAM
+innodb_flush_log_at_trx_commit = 0
+innodb_log_file_size = 512M
+innodb_flush_method = O_DIRECT
+innodb_file_per_table = 1
+innodb_autoinc_lock_mode = 2
+innodb_rollback_on_timeout = 1
+transaction_isolation = 'READ-COMMITTED'
+innodb_io_capacity = 2000 # IOPS dependent
+innodb_io_capacity_max = 4000 # IOPS dependent
+innodb_read_io_threads = 4 # CPU dependent
+innodb_write_io_threads = 4 # CPU dependent
+
diff --git a/templates/multinode/prune.erb.sh b/templates/multinode/prune.erb.sh
new file mode 100755
index 0000000..6a891d3
--- /dev/null
+++ b/templates/multinode/prune.erb.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+if [[ $(df --output=ipcent /var/lib/docker/ | tail -1 | sed 's/[ %]//g') -gt 50 ]]; then
+ touch /etc/no-automatic-cosmos
+ systemctl stop docker sunet-*.service
+ #docker system prune -af --volumes
+ rm -rf /var/lib/docker
+ rm /etc/no-automatic-cosmos
+ safer_reboot
+fi
diff --git a/templates/multinode/rclone.conf.erb b/templates/multinode/rclone.conf.erb
new file mode 100644
index 0000000..2c52bbd
--- /dev/null
+++ b/templates/multinode/rclone.conf.erb
@@ -0,0 +1,7 @@
+[<%= @location %>]
+type = s3
+provider = Ceph
+access_key_id = <%= @s3_key %>
+secret_access_key = <%= @s3_secret %>
+endpoint = <%= @s3_host %>
+acl = private
\ No newline at end of file
diff --git a/templates/multinode/redis.conf.erb b/templates/multinode/redis.conf.erb
new file mode 100644
index 0000000..e241753
--- /dev/null
+++ b/templates/multinode/redis.conf.erb
@@ -0,0 +1,2 @@
+requirepass "<%= @redis_host_password %>"
+logfile server.log
diff --git a/templates/multinode/upgrade23-25.erb.sh b/templates/multinode/upgrade23-25.erb.sh
new file mode 100644
index 0000000..ef3a865
--- /dev/null
+++ b/templates/multinode/upgrade23-25.erb.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+touch /etc/no-automatic-cosmos
+for version in 24.0.9.3-1 25.0.3.3-4; do
+ docker pull docker.sunet.se/drive/nextcloud-custom:${version}
+done
+
+function block_for_container {
+ while ! [[ "$( docker container inspect -f '{{.State.Running}}' "${1}" )" == "true" ]]; do
+ echo "Waiting for ${1}"
+ sleep 1s
+ done
+}
+for container in $(get_containers); do
+ customer=$(echo "${container}" | sed -e 's/nextcloud//' -e 's/_app_1//')
+ sed -i "s/ 'version' => '.*',/ 'version' => '24.0.9.3',/" "/opt/multinode/${customer}/config.php"
+ sed -i "s/ 'config_is_read_only' => true,/ 'config_is_read_only' => false,/" "/opt/multinode/${customer}/config.php"
+ sed -i 's#docker.sunet.se/drive/nextcloud-custom:.*-1#docker.sunet.se/drive/nextcloud-custom:24.0.9.3-1#' "/opt/multinode/${customer}/nextcloud-${customer}/docker-compose.yml"
+ systemctl restart "sunet-nextcloud-${customer}"
+ sleep 10s
+ block_for_container "${container}"
+ occ "${container}" upgrade && occ "${container}" db:add-missing-columns && occ "${container}" db:add-missing-indices && occ "${container}" db:add-missing-primary-keys
+ sed -i 's#docker.sunet.se/drive/nextcloud-custom:.*-1#docker.sunet.se/drive/nextcloud-custom:25.0.3.3-1#' "/opt/multinode/${customer}/nextcloud-${customer}/docker-compose.yml"
+ systemctl restart "sunet-nextcloud-${customer}"
+ sleep 10s
+ block_for_container "${container}"
+ occ "${container}" upgrade && occ "${container}" db:add-missing-columns && occ "${container}" db:add-missing-indices && occ "${container}" db:add-missing-primary-keys && occ "${container}" maintenance:repair
+done
+rm /etc/no-automatic-cosmos
diff --git a/templates/ni/docker-compose.yml.erb b/templates/ni/docker-compose.yml.erb
new file mode 100644
index 0000000..800721d
--- /dev/null
+++ b/templates/ni/docker-compose.yml.erb
@@ -0,0 +1,71 @@
+---
+version: '3.4'
+services:
+
+ postgres:
+ image: docker.sunet.se/library/postgres-11:stable
+ expose:
+ - 5432
+ volumes:
+ - /opt/sri/postgresql/data:/var/lib/postgresql/data
+ - /opt/sri/postgresql/init/init-noclook-db.sh:/docker-entrypoint-initdb.d/init-noclook-db.sh
+ - /var/sri/backup/postgres:/var/lib/postgresql/backup
+ environment:
+ - "POSTGRES_USER=postgres"
+ - "POSTGRES_PASSWORD=<% @ni_db_password %>"
+
+ neo4j:
+ image: neo4j:3.5
+ expose:
+ - 7474
+ - 7687
+ volumes:
+ - /opt/sri/neo4j/data:/data
+
+ ni:
+ image: docker.sunet.se/sunet/docker-ni:stable
+ expose:
+ - 8080
+ volumes:
+ - /opt/sri/ni/etc/dotenv:/opt/ni/src/niweb/.env
+ - /opt/sri/ni/log:/var/log/ni
+ - /opt/sri/backup/neo4j:/opt/ni/backup
+ - /opt/sri/staticfiles:/opt/ni/src/niweb/niweb/static
+
+ depends_on:
+ - postgres
+ - neo4j
+
+ sri-front:
+ image: docker.sunet.se/sunet/sri-front:stable
+ environment:
+ - NODE_ENV=production
+ - REACT_APP_API_HOST=<%= @domain %>/api
+ - REACT_APP_COOKIE_DOMAIN=<%= @domain %>
+ - SCHEMA_VERSION=sunet
+ volumes:
+ - /opt/sri/srifrontfiles:/bundle
+
+ nginx:
+ image: docker.sunet.se/eduid/nginx:stable
+ expose:
+ - 443
+ ports:
+ - 443:443
+ volumes:
+ - /opt/sri/nginx/etc/ni.http:/etc/nginx/sites-enabled/ni.conf
+ - /opt/sri/nginx/etc/dhparams.pem:/etc/ssl/dhparams.pem
+ - /etc/dehydrated/certs/<%= @domain %>/fullchain.pem:/etc/dehydrated/certs/<%= @domain %>/fullchain.pem
+ - /etc/dehydrated/certs/<%= @domain %>/privkey.pem:/etc/dehydrated/certs/<%= @domain %>/privkey.pem
+ - /opt/sri/nginx/log:/var/log/nginx
+ - /opt/sri/staticfiles:/var/www/staticfiles
+ - /opt/sri/srifrontfiles:/var/www/srifrontfiles
+ depends_on:
+ - ni
+
+networks:
+ default:
+ driver_opts:
+ com.docker.network.bridge.name: br-sri
+
+
diff --git a/templates/proxysql/check_mysql_server_status.erb b/templates/proxysql/check_mysql_server_status.erb
new file mode 100644
index 0000000..d45d3d3
--- /dev/null
+++ b/templates/proxysql/check_mysql_server_status.erb
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+#It is a changed version of the original https://github.com/fridim/nagios-plugin-check_galera_cluster/blob/master/check_galera_cluster
+
+ST_OK=0
+ST_WA=1
+ST_CR=2
+ST_UK=3
+
+PROGNAME=`basename $0`
+
+print_help() {
+ echo "It is a monitoring plugin to monitor ProxySQL hosts."
+ echo ""
+ echo "It does not take any parameter"
+ exit ${ST_UK}
+}
+
+while getopts “h” OPTION; do
+ case $OPTION in
+ h)
+ print_help
+ exit ${ST_UK}
+ ;;
+ ?)
+ echo "Unknown argument: $1"
+ print_help
+ exit ${ST_UK}
+ ;;
+ esac
+done
+
+
+/usr/bin/docker exec <%= @proxysql_container_name %> mysql -B -N -e '\s;' >/dev/null 2>&1 || {
+ echo "CRITICAL: mysql connection check failed"
+ exit ${ST_CR}
+}
+
+mysql_server_status=$(/usr/bin/docker exec <%= @proxysql_container_name %> mysql -B -N -e "select count(distinct hostname) from main.runtime_mysql_servers where status='ONLINE' and hostgroup_id != 9999")
+mysql_writer_status=$(/usr/bin/docker exec <%= @proxysql_container_name %> mysql -B -N -e "select count(*) FROM main.runtime_mysql_servers where status = 'ONLINE' and hostgroup_id = 10")
+mysql_reader_status=$(/usr/bin/docker exec <%= @proxysql_container_name %> mysql -B -N -e "select count(*) FROM main.runtime_mysql_servers where status = 'ONLINE' and hostgroup_id = 30")
+
+exit_status=${ST_UK}
+output="UNKNOWN"
+if [[ ${mysql_server_status} -eq 3 ]] && [[ ${mysql_writer_status} -eq 1 ]] && [[ ${mysql_reader_status} -gt 0 ]]; then
+ exit_status=${ST_OK}
+ output="OK"
+elif [[ ${mysql_server_status} -lt 3 ]] && [[ ${mysql_writer_status} -eq 1 ]] && [[ ${mysql_reader_status} -gt 0 ]]; then
+ exit_status=${ST_WA}
+ output="WARNING"
+else
+ exit_status=${ST_CR}
+ output="CRITICAL"
+fi
+echo "${output}: number of servers online: ${mysql_server_status}, writers: ${mysql_writer_status}, readers: ${mysql_reader_status}|total=${mysql_server_status};2;1;0;3 writers=${mysql_writer_status};@1:1;@1:1;0;1 readers=${mysql_reader_status};1;0;0;3;"
+exit ${exit_status}
diff --git a/templates/proxysql/check_proxysql_server.erb b/templates/proxysql/check_proxysql_server.erb
new file mode 100644
index 0000000..3550fbc
--- /dev/null
+++ b/templates/proxysql/check_proxysql_server.erb
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+#It is a changed version of the original https://github.com/fridim/nagios-plugin-check_galera_cluster/blob/master/check_galera_cluster
+
+ST_OK=0
+ST_WR=1
+ST_CR=2
+ST_UK=3
+
+PROGNAME=`basename $0`
+
+print_help() {
+ echo "It is a monitoring plugin to monitor ProxySQL hosts."
+ echo ""
+ echo "It does not take any parameter"
+ exit $ST_UK
+}
+
+while getopts “h” OPTION; do
+ case $OPTION in
+ h)
+ print_help
+ exit $ST_UK
+ ;;
+ ?)
+ echo "Unknown argument: $1"
+ print_help
+ exit $ST_UK
+ ;;
+ esac
+done
+
+
+/usr/bin/docker exec <%= @proxysql_container_name %> mysql -B -N -e '\s;' >/dev/null 2>&1 || {
+ echo "CRITICAL: mysql connection check failed"
+ exit $ST_CR
+}
+
+proxysql_server_number=$(/usr/bin/docker exec <%= @proxysql_container_name %> mysql -B -N -e "select count(*) from proxysql_servers")
+
+if [ "$proxysql_server_number" == "<%= @proxysql_ok_num %>" ]; then
+ echo "OK: number of SERVERS = $proxysql_server_number"
+ exit $ST_OK
+elif [ "$proxysql_server_number" == "<%= @proxysql_warn_num %>" ]; then
+ echo "WARNING: number of SERVERS = $proxysql_server_number"
+ exit $ST_WR
+else
+ echo "CRITICAL: number of SERVERS = $proxysql_server_number"
+ exit $ST_CR
+fi
diff --git a/templates/proxysql/docker-compose_proxysql.yml.erb b/templates/proxysql/docker-compose_proxysql.yml.erb
new file mode 100644
index 0000000..b3c5213
--- /dev/null
+++ b/templates/proxysql/docker-compose_proxysql.yml.erb
@@ -0,0 +1,30 @@
+version: '3.2'
+
+services:
+
+ proxysql:
+ image: docker.sunet.se/drive/proxysql:<%= @proxysql_version %>
+ ports:
+ - 3306:3306
+ - 6032:6032
+ - 6080:6080
+ environment:
+ INITIALIZE: 1
+ dns:
+ - 89.46.20.75
+ - 89.46.21.29
+ - 89.32.32.32
+ networks:
+ - proxysql
+ restart: unless-stopped
+ volumes:
+ - proxysql:/var/lib/proxysql
+ - /opt/proxysql/proxysql.cnf:/etc/proxysql.cnf
+ - /opt/proxysql/my.cnf:/etc/mysql/conf.d/my.cnf
+
+volumes:
+ proxysql:
+
+networks:
+ proxysql:
+ driver: bridge
diff --git a/templates/proxysql/insert_server_in_proxysql.erb.sh b/templates/proxysql/insert_server_in_proxysql.erb.sh
new file mode 100644
index 0000000..a819ab2
--- /dev/null
+++ b/templates/proxysql/insert_server_in_proxysql.erb.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+PATH="${PATH}:/usr/local/bin"
+restarted="false"
+for index in 1 2 3; do
+ db_ip=$(host "intern-db${index}.$(hostname -d)" | awk '/has address/ {print $NF}')
+ result=$(proxysql "select * from main.mysql_servers where hostname = '${db_ip}' and hostgroup_id = 10")
+ if [[ -z ${result} ]]; then
+ query="INSERT INTO main.mysql_servers (hostgroup_id, hostname, max_connections, comment) VALUES( 10, '${db_ip}', 100, 'Inserted by script at $(date)')"
+ proxysql "${query}"
+ restarted="true"
+ fi
+done
+if [[ "${restarted}" == "true" ]]; then
+ systemctl restart sunet-proxysql.service
+fi
diff --git a/templates/proxysql/my.cnf.erb b/templates/proxysql/my.cnf.erb
new file mode 100644
index 0000000..a6473f0
--- /dev/null
+++ b/templates/proxysql/my.cnf.erb
@@ -0,0 +1,6 @@
+[mysql]
+user=admin
+password='<%= @admin_password %>'
+host=127.0.0.1
+port=6032
+prompt="ProxySQL> "
\ No newline at end of file
diff --git a/templates/proxysql/proxysql.cnf.erb b/templates/proxysql/proxysql.cnf.erb
new file mode 100644
index 0000000..8e01a14
--- /dev/null
+++ b/templates/proxysql/proxysql.cnf.erb
@@ -0,0 +1,122 @@
+datadir="/var/lib/proxysql"
+
+# ProxySQL admin configuration section
+admin_variables=
+{
+ admin_credentials="admin:<%= @admin_password%>;cluster_admin:<%= @cluster_admin_password %>"
+ mysql_ifaces="0.0.0.0:6032"
+ refresh_interval=2000
+ web_enabled=true
+ web_port=6080
+ stats_credentials="stats:<%= @admin_password %>"
+ cluster_username="cluster_admin"
+ cluster_password="<%= @cluster_admin_password %>"
+ cluster_check_interval_ms=200
+ cluster_check_status_frequency=100
+ cluster_mysql_query_rules_save_to_disk=true
+ cluster_mysql_servers_save_to_disk=true
+ cluster_mysql_users_save_to_disk=true
+ cluster_proxysql_servers_save_to_disk=true
+ cluster_mysql_query_rules_diffs_before_sync=3
+ cluster_mysql_servers_diffs_before_sync=3
+ cluster_mysql_users_diffs_before_sync=3
+ cluster_proxysql_servers_diffs_before_sync=3
+}
+
+# MySQL/MariaDB related section
+mysql_variables=
+{
+ threads=4
+ max_connections=2048
+ default_query_delay=0
+ default_query_timeout=36000000
+ have_compress=true
+ poll_timeout=2000
+ interfaces="0.0.0.0:3306;/tmp/proxysql.sock"
+ default_schema="information_schema"
+ stacksize=1048576
+ server_version="10.5.5"
+ connect_timeout_server=10000
+ monitor_history=60000
+ monitor_connect_interval=2000
+ monitor_ping_interval=2000
+ ping_interval_server_msec=10000
+ ping_timeout_server=200
+ commands_stats=true
+ sessions_sort=true
+ monitor_username="proxysql"
+ monitor_password="<%= @monitor_password %>"
+ monitor_galera_healthcheck_interval=2000
+ monitor_galera_healthcheck_timeout=800
+}
+
+
+# Specify all ProxySQL hosts here
+proxysql_servers =
+(
+<%- index = 0 -%>
+<%- @nextcloud_ip.each do |appserver| -%>
+ <%- index += 1 -%>
+ { hostname="<%= appserver %>" , port=6032 , comment="proxysql<%= index %>" },
+<%- end -%>
+)
+
+# HG10 - single-writer
+# HF30 - multi-writer
+mysql_galera_hostgroups =
+(
+ {
+
+ writer_hostgroup=10
+ backup_writer_hostgroup=20
+ reader_hostgroup=30
+ offline_hostgroup=9999
+ max_writers=1
+ writer_is_also_reader=1
+ max_transactions_behind=0
+ active=1
+ }
+)
+
+# List all MariaDB Galera nodes here
+mysql_servers =
+(
+<%- @db_ip.each do |db| -%>
+ { address="<%= db %>" , port=3306 , hostgroup=10, max_connections=100 },
+<%- end -%>
+)
+
+# Default query rules:
+# - All writes -> HG10 (single-writer)
+# - All reads -> HG30 (multi-writer)
+mysql_query_rules =
+(
+ {
+ rule_id=100
+ active=1
+ match_pattern="^SELECT .* FOR UPDATE"
+ destination_hostgroup=10
+ apply=1
+ },
+ {
+ rule_id=200
+ active=1
+ match_pattern="^SELECT .*"
+ destination_hostgroup=30
+ apply=1
+ },
+ {
+ rule_id=300
+ active=1
+ match_pattern=".*"
+ destination_hostgroup=10
+ apply=1
+ }
+)
+
+# All MySQL user that you want to pass through this instance
+# - The MySQL user must be created first in the DB server and grant it to access from this ProxySQL host
+mysql_users =
+(
+ { username = "<%= @mysql_user %>", password = "<%= @mysql_user_password %>", default_hostgroup = 10, transaction_persistent = <%= @transaction_persistent %>, active = 1 }
+)
diff --git a/templates/proxysql/proxysql.erb.sh b/templates/proxysql/proxysql.erb.sh
new file mode 100644
index 0000000..764b487
--- /dev/null
+++ b/templates/proxysql/proxysql.erb.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+#docker exec proxysql_proxysql_1 mysql -B -N -e "UPDATE main.mysql_servers SET status = 'ONLINE' WHERE hostname = ''"
+docker exec proxysql_proxysql_1 mysql -B -N -e "${@}"
+
diff --git a/templates/rabbitmq/rabbitmq.conf.erb b/templates/rabbitmq/rabbitmq.conf.erb
new file mode 100644
index 0000000..a000e8d
--- /dev/null
+++ b/templates/rabbitmq/rabbitmq.conf.erb
@@ -0,0 +1,6 @@
+loopback_users.guest = false
+listeners.tcp.default = 5672
+cluster_formation.peer_discovery_backend = rabbit_peer_discovery_classic_config
+cluster_formation.classic_config.nodes.1 = rabbit@document-be1.<%= @domain %>
+cluster_formation.classic_config.nodes.2 = rabbit@document-be2.<%= @domain %>
+cluster_formation.classic_config.nodes.3 = rabbit@document-be3.<%= @domain %>
diff --git a/templates/redis_cluster/bashrc.erb b/templates/redis_cluster/bashrc.erb
new file mode 100644
index 0000000..dcedc9b
--- /dev/null
+++ b/templates/redis_cluster/bashrc.erb
@@ -0,0 +1,58 @@
+# ~/.bashrc: executed by bash(1) for non-login shells.
+# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
+# for examples
+
+# If not running interactively, don't do anything
+[ -z "$PS1" ] && return
+
+# don't put duplicate lines in the history. See bash(1) for more options
+# ... or force ignoredups and ignorespace
+HISTCONTROL=ignoredups:ignorespace
+
+# append to the history file, don't overwrite it
+shopt -s histappend
+
+# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
+HISTSIZE=1000
+HISTFILESIZE=2000
+
+# check the window size after each command and, if necessary,
+# update the values of LINES and COLUMNS.
+shopt -s checkwinsize
+
+# make less more friendly for non-text input files, see lesspipe(1)
+[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
+
+# set variable identifying the chroot you work in (used in the prompt below)
+if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
+ debian_chroot=$(cat /etc/debian_chroot)
+fi
+
+# set a fancy prompt (non-color, unless we know we "want" color)
+case "$TERM" in
+ xterm-color) color_prompt=yes;;
+esac
+
+# uncomment for a colored prompt, if the terminal has the capability; turned
+# off by default to not distract the user: the focus in a terminal window
+# should be on the output of commands, not on the prompt
+#force_color_prompt=yes
+
+if [ -n "$force_color_prompt" ]; then
+ if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
+ # We have color support; assume it's compliant with Ecma-48
+ # (ISO/IEC-6429). (Lack of such support is extremely rare, and such
+ # a case would tend to support setf rather than setaf.)
+ color_prompt=yes
+ else
+ color_prompt=
+ fi
+fi
+
+if [ "$color_prompt" = yes ]; then
+ PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
+else
+ PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
+fi
+unset color_prompt force_color_prompt
+alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
diff --git a/templates/redis_cluster/bootstrap_cluster.erb.sh b/templates/redis_cluster/bootstrap_cluster.erb.sh
new file mode 100644
index 0000000..2e92cd3
--- /dev/null
+++ b/templates/redis_cluster/bootstrap_cluster.erb.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+my_host=$(hostname -f)
+hosts=""
+for index in 1 2 3; do
+ cur_host="redis${index}.$(hostname -d)"
+ if [[ "${my_host}" == "${cur_host}" ]]; then
+ ip="$(hostname -I | awk '{print $1}')"
+ else
+ ip="$(host "${cur_host}" | grep "has address" | awk '{print $NF}')"
+ fi
+ for port in 6379 6380 6381; do
+ hosts="${hosts} ${ip}:${port}"
+ done
+done
+
+redis-cli --no-auth-warning -a <%= @redis_password %> --cluster create ${hosts} --cluster-replicas 2
diff --git a/templates/redis_cluster/reset_cluster.erb.sh b/templates/redis_cluster/reset_cluster.erb.sh
new file mode 100644
index 0000000..634a1ff
--- /dev/null
+++ b/templates/redis_cluster/reset_cluster.erb.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+my_host=$(hostname -f)
+hosts=""
+redis_password="<%= @redis_password %>"
+for index in 1 2 3; do
+ cur_host="redis${index}.$(hostname -d)"
+ if [[ "${my_host}" == "${cur_host}" ]]; then
+ ip="$(hostname -I | awk '{print $1}')"
+ else
+ ip="$(host "${cur_host}" | grep "has address" | awk '{print $NF}')"
+ fi
+ for port in 6379 6380 6381; do
+ hosts="${hosts} ${ip}:${port}"
+ done
+done
+
+for host in ${hosts}; do
+ redis-cli --no-auth-warning -a "${redis_password}" -u "redis://${host}" flushall
+ redis-cli --no-auth-warning -a "${redis_password}" -u "redis://${host}" cluster reset hard
+done
+redis-cli --no-auth-warning -a "${redis_password}" --cluster create ${hosts} --cluster-replicas 2
diff --git a/templates/reva/docker-compose.yml.erb b/templates/reva/docker-compose.yml.erb
new file mode 100644
index 0000000..375f8c5
--- /dev/null
+++ b/templates/reva/docker-compose.yml.erb
@@ -0,0 +1,19 @@
+version: '3.2'
+
+services:
+
+ reva-server:
+# image: docker.sunet.se/drive/sciencemesh-reva:v2.12.0
+ image: michielbdejong/reva:mentix-fixes
+ network_mode: host
+ dns:
+ - 89.46.20.75
+ - 89.46.21.29
+ - 89.32.32.32
+ volumes:
+ - /opt/reva/revad.toml:/etc/revad/revad.toml
+ - /opt/reva/data:/var/tmp/reva
+ - /opt/reva/ocm-providers.json:/etc/revad/ocm-providers.json
+ - /opt/reva/test/drive.test.sunet.se.crt:/etc/revad/tls/drive.test.sunet.se.crt
+ - /opt/reva/test/drive.test.sunet.se.key:/etc/revad/tls/drive.test.sunet.se.key
+ restart: always
diff --git a/templates/reva/ocm-providers.json.erb b/templates/reva/ocm-providers.json.erb
new file mode 100644
index 0000000..0284bab
--- /dev/null
+++ b/templates/reva/ocm-providers.json.erb
@@ -0,0 +1,14 @@
+[
+ { "domain": "mesh.pondersource.org", "services": [
+ { "endpoint": { "type": { "name": "OCM" }, "path": "https://mesh.pondersource.org/ocm/" }, "host": "https://mesh.pondersource.org" },
+ { "endpoint": { "type": { "name": "Webdav" }, "path": "https://cloud.pondersource.org/remote.php/webdav/" }, "host": "https://cloud.pondersource.org" }
+ ] },
+ { "domain": "cs3mesh-iop.apps.dcw1.paas.psnc.pl", "services": [
+ { "endpoint": { "type": { "name": "OCM" }, "path": "https://cs3mesh-iop.apps.dcw1.paas.psnc.pl/ocm/" }, "host": "https://cs3mesh-iop.apps.dcw1.paas.psnc.pl" },
+ { "endpoint": { "type": { "name": "Webdav" }, "path": "https://cs3mesh-drive.apps.dcw1.paas.psnc.pl/remote.php/webdav/" }, "host": "https://cs3mesh-drive.apps.dcw1.paas.psnc.pl" }
+ ] },
+ { "domain": "<%= @reva_domain %>", "services": [
+ { "endpoint": { "type": { "name": "OCM" }, "path": "https://<%= @reva_domain%>/ocm/" }, "host": "https://<%= @reva_domain %>" },
+ { "endpoint": { "type": { "name": "Webdav" }, "path": "https://<%= @domain %>/remote.php/webdav/" }, "host": "https://<%= @domain %>" }
+ ] }
+]
diff --git a/templates/reva/revad.toml.erb b/templates/reva/revad.toml.erb
new file mode 100644
index 0000000..27af616
--- /dev/null
+++ b/templates/reva/revad.toml.erb
@@ -0,0 +1,181 @@
+[log]
+level = "debug"
+
+[shared]
+gatewaysvc = "<%= @reva_domain %>:19000"
+# gatewaycertfile = "/etc/revad/tls/revanc1.crt"
+
+# [registry]
+# driver = "static"
+#
+# [registry.static]
+# services = ["authprovider","userprovider"]
+#
+# [registry.static.authprovider]
+# bearer = ["localhost:0123"]
+# basic = ["localhost:1234"]
+# publiclink = ["localhost:9876"]
+
+[grpc]
+address = "0.0.0.0:19000"
+# certfile = "/etc/revad/tls/revanc1.crt"
+# keyfile = "/etc/revad/tls/revanc1.key"
+
+[grpc.services.gateway]
+authregistrysvc = "<%= @reva_domain %>:19000"
+appprovidersvc = "<%= @reva_domain %>:19000"
+appregistry = "<%= @reva_domain %>:19000"
+storageregistrysvc = "<%= @reva_domain %>:19000"
+preferencessvc = "<%= @reva_domain %>:19000"
+userprovidersvc = "<%= @reva_domain %>:19000"
+usershareprovidersvc = "<%= @reva_domain %>:19000"
+publicshareprovidersvc = "<%= @reva_domain %>:19000"
+ocmcoresvc = "<%= @reva_domain %>:19000"
+ocmshareprovidersvc = "<%= @reva_domain %>:19000"
+ocminvitemanagersvc = "<%= @reva_domain %>:19000"
+ocmproviderauthorizersvc = "<%= @reva_domain %>:19000"
+commit_share_to_storage_grant = false
+datagateway = "https://<%= @reva_domain %>/data"
+transfer_expires = 6 # give it a moment
+
+[grpc.services.authregistry]
+driver = "static"
+
+[grpc.services.authregistry.drivers.static.rules]
+basic = "<%= @reva_domain %>:19000"
+
+[grpc.services.storageregistry]
+driver = "static"
+
+[grpc.services.storageregistry.drivers.static]
+home_provider = "/home"
+
+[grpc.services.storageregistry.drivers.static.rules]
+"/home" = {"address" = "<%= @reva_domain %>:19000"}
+"123e4567-e89b-12d3-a456-426655440000" = {"address" = "<%= @reva_domain %>:19000"}
+
+[grpc.services.usershareprovider]
+driver = "memory"
+
+[grpc.services.ocmcore]
+driver = "nextcloud"
+
+[grpc.services.ocmcore.drivers.nextcloud]
+webdav_host = "https://<%= @domain %>/"
+endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
+shared_secret = "<%= @shared_secret %>"
+mock_http = false
+
+[grpc.services.ocminvitemanager]
+driver = "json"
+
+[grpc.services.ocmshareprovider]
+driver = "nextcloud"
+
+[grpc.services.ocmshareprovider.drivers.nextcloud]
+webdav_host = "https://<%= @domain %>/"
+endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
+shared_secret = "<%= @shared_secret %>"
+mock_http = false
+
+[grpc.services.ocmproviderauthorizer]
+#driver = "mentix"
+driver = "open"
+
+[grpc.services.ocmproviderauthorizer.drivers.mentix]
+url = "https://iop.sciencemesh.uni-muenster.de/iop/mentix/cs3"
+verify_request_hostname = false
+insecure = false
+timeout = 10
+refresh = 900
+
+[grpc.services.publicshareprovider]
+driver = "memory"
+
+[grpc.services.appprovider]
+driver = "demo"
+iopsecret = "<%= @iopsecret %>"
+wopiurl = "http://0.0.0.0:8880/"
+wopibridgeurl = "http://localhost:8000/wopib"
+
+[grpc.services.appregistry]
+driver = "static"
+
+[grpc.services.appregistry.static.rules]
+"text/plain" = "<%= @reva_domain %>:19000"
+"text/markdown" = "<%= @reva_domain %>:19000"
+"application/compressed-markdown" = "<%= @reva_domain %>:19000"
+"application/vnd.oasis.opendocument.text" = "<%= @reva_domain %>:19000"
+"application/vnd.oasis.opendocument.spreadsheet" = "<%= @reva_domain %>:19000"
+"application/vnd.oasis.opendocument.presentation" = "<%= @reva_domain %>:19000"
+
+[grpc.services.storageprovider]
+driver = "nextcloud"
+expose_data_server = true
+data_server_url = "https://<%= @reva_domain %>/data"
+enable_home_creation = true
+
+[grpc.services.storageprovider.drivers.nextcloud]
+endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
+shared_secret = "<%= @shared_secret %>"
+mock_http = false
+
+[grpc.services.authprovider]
+auth_manager = "nextcloud"
+
+[grpc.services.authprovider.auth_managers.nextcloud]
+endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
+shared_secret = "<%= @shared_secret %>"
+mock_http = false
+
+[grpc.services.userprovider]
+driver = "nextcloud"
+
+[grpc.services.userprovider.drivers.nextcloud]
+endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
+shared_secret = "<%= @shared_secret %>"
+mock_http = false
+
+[http]
+enabled_services = ["ocmd"]
+enabled_middlewares = ["providerauthorizer", "cors"]
+address = "0.0.0.0:443"
+certfile = "/etc/revad/tls/drive.test.sunet.se.crt"
+keyfile = "/etc/revad/tls/drive.test.sunet.se.key"
+
+[http.services.dataprovider]
+driver = "nextcloud"
+
+[http.services.prometheus]
+[http.services.sysinfo]
+
+[http.services.dataprovider.drivers.nextcloud]
+endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
+shared_secret = "<%= @shared_secret %>"
+mock_http = false
+
+[http.services.ocmd]
+prefix = "ocm"
+
+[http.services.ocmd.config]
+host = "<%= @reva_domain %>"
+provider = "test-revanc1"
+
+[http.middlewares.providerauthorizer]
+#driver = "mentix"
+driver = "open"
+
+[http.middlewares.providerauthorizer.drivers.mentix]
+url = "https://iop.sciencemesh.uni-muenster.de/iop/mentix/cs3"
+verify_request_hostname = false
+insecure = false
+timeout = 10
+refresh = 900
+
+[http.services.ocs]
+prefix = "ocs"
+
+[http.services.ocdav]
+prefix = "ocdav"
+
+[http.middlewares.cors]
diff --git a/templates/satosa/restart.erb.sh b/templates/satosa/restart.erb.sh
new file mode 100755
index 0000000..1b1c8a3
--- /dev/null
+++ b/templates/satosa/restart.erb.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within# a specific host, but will differ between hosts, making sure we don't restart at the same time
+sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
+
+# Restart satosa
+/usr/bin/systemctl restart docker-satosa.service
diff --git a/templates/script/aggregate.sh b/templates/script/aggregate.sh
new file mode 100644
index 0000000..0087b92
--- /dev/null
+++ b/templates/script/aggregate.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+# Datafile with customers
+commonyaml="/etc/hiera/data/common.yaml"
+billingbucket="statistics:drive-storage-report"
+aggregatedir="${billingbucket}/billing/daily"
+latestdir="${billingbucket}/billing"
+aggregatefile="billing-$(date +%Y%m%d).csv"
+tempdir=$(mktemp -d)
+
+# Install yq if needed
+yq="/usr/local/bin/yq"
+if ! [[ -x ${yq} ]]; then
+ pip3 install yq
+fi
+
+olddir=${PWD}
+cd "${tempdir}" || (echo "could not move to tempdir" && exit 1)
+
+# Make sure we have dir
+rclone mkdir -p "${aggregatedir}"
+# Output headers
+csv="DATE:$(date +%F) TIME:$(date +%H:%M)
+Customer;Total GB;Users;Product"
+
+# Aggregate data
+for customer in $(${yq} -r '.fullnodes | .[]' ${commonyaml}); do
+ product=1 # Prisplan 1
+ csv="${csv}
+$(rclone cat "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
+ grep -E -v '^DATE|^Customer' |
+ sed 's/$/;1/')"
+done
+for customer in $(${yq} -r '.singlenodes | .[]' ${commonyaml}); do
+ product=2 # Prisplan 2
+ csv="${csv}
+$(rclone cat "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
+ grep -E -v '^DATE|^Customer' |
+ sed 's/$/;'${product}'/')"
+done
+echo "${csv}" >"${aggregatefile}"
+
+rclone copy "${aggregatefile}" "${aggregatedir}/"
+mv "${aggregatefile}" "latest.csv"
+rclone move "latest.csv" "${latestdir}/"
+cd "${olddir}" || (echo "Could not switch back to old dir" && exit 1)
+rmdir "${tempdir}"
diff --git a/templates/script/backup-all-buckets.erb.sh b/templates/script/backup-all-buckets.erb.sh
new file mode 100644
index 0000000..9ded804
--- /dev/null
+++ b/templates/script/backup-all-buckets.erb.sh
@@ -0,0 +1,68 @@
+#!/bin/bash
+# Backup all buckets
+# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
+# a specific host, but will differ between hosts
+sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
+number_of_full_to_keep='<%= @full_backup_retention %>'
+fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
+customer="<%= @customer %>"
+#<% if @location.start_with?('common') %>
+declare -a sixmonths=('mau')
+if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
+ number_of_full_to_keep=6
+fi
+declare -a projects
+#<% @singlenodes.each do |singlenode| %>
+projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %>")
+#<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %>
+projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
+#<% end %>
+#<% end %>
+#<% else %>
+declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
+#<% @assigned_projects.each do |project| %>
+projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
+#<% end %>
+#<% end %>
+
+if [[ ${customer} == 'common' ]]; then
+ projects+=("<%= @location %> <%= @location %>-mirror")
+fi
+
+
+function do_backup {
+ local project="${1}"
+ local mirror="${2}"
+ local bucket="${3}"
+ local mirrorbucket="${bucket}-mirror"
+ local mountpoint="/opt/backupmounts/${bucket}"
+ mkdir -p ${mountpoint}
+ rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
+ rclone mkdir ${mirror}:${mirrorbucket}
+ duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \
+ --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
+ umount ${mountpoint}
+ rmdir ${mountpoint}
+ # Clean up
+ duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
+ --force rclone://${mirror}:/${mirrorbucket}
+}
+
+for entry in "${projects[@]}"; do
+ project=$(echo ${entry} | awk '{print $1}')
+ mirror=$(echo ${entry} | awk '{print $2}')
+ for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
+ maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
+ if [[ ${?} -eq 124 ]]; then
+ size=$((${fork_limit} * 1000000001))
+ else
+ size=$(echo ${maybesize} | jq -r '.bytes' )
+ fi
+ # If bucket is above 50 GB we fork
+ if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
+ do_backup ${project} ${mirror} ${bucket} &
+ else
+ do_backup ${project} ${mirror} ${bucket}
+ fi
+ done
+done
diff --git a/templates/script/backup-buckets-v2.erb.sh b/templates/script/backup-buckets-v2.erb.sh
new file mode 100644
index 0000000..1979b12
--- /dev/null
+++ b/templates/script/backup-buckets-v2.erb.sh
@@ -0,0 +1,86 @@
+#!/bin/bash
+# Backup all buckets
+# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
+# a specific host, but will differ between hosts
+sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
+number_of_full_to_keep="<%= @full_backup_retention %>"
+fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
+split_limit=1000 #in GB, if bigger than this number, we fork backup of each directory to it's own process
+
+declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
+#<% @assigned_projects.each do |project| %>
+projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
+#<% end %>
+
+function do_huge_backup {
+ local project="${1}"
+ local mirror="${2}"
+ local bucket="${3}"
+ declare -a directories
+ declare -a empty
+ for dir in $(rclone lsd ${project}:${bucket} | awk '{print $NF}'); do
+ directories+=("${dir}")
+ mountpoint="/opt/backupmounts/${bucket}-${dir}"
+ do_backup ${project} ${mirror} ${bucket} ${mountpoint} ${dir} ${empty} &
+ done
+ mountpoint="/opt/backupmounts/${bucket}"
+ do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${directories[@]} &
+
+}
+
+function do_backup {
+ local project="${1}"
+ shift
+ local mirror="${1}"
+ shift
+ local bucket="${1}"
+ shift
+ local mountpoint="${1}"
+ shift
+ local dire="${1}"
+ shift
+ declare -a exclude
+ exclude=( "${@}" )
+ suffix=""
+ opts=""
+ if [[ "${dire}" != "none" ]]; then
+ suffix="/${dire}"
+ fi
+ if ((${#exclude[@]})); then
+ for dir in "${exclude[@]}"; do
+ opts="${opts} --exclude /${dir}"
+ done
+ fi
+ local mirrorbucket="${bucket}-mirror"
+ mkdir -p ${mountpoint}
+ rclone mount ${project}:${bucket}${suffix} ${mountpoint}/ --daemon --allow-other
+ rclone mkdir ${mirror}:${mirrorbucket}${suffix}
+ duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt ${opts} \
+ --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}${suffix}
+ umount ${mountpoint}
+ rmdir ${mountpoint}
+ # Clean up
+ duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
+ --force rclone://${mirror}:/${mirrorbucket}${suffix}
+}
+
+for entry in "${projects[@]}"; do
+ project=$(echo ${entry} | awk '{print $1}')
+ mirror=$(echo ${entry} | awk '{print $2}')
+ declare -a empty
+ for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
+ size=$(rclone size --json ${project}:${bucket} | jq -r '.bytes')
+ mirrorbucket="${bucket}-mirror"
+ mountpoint="/opt/backupmounts/${bucket}"
+ # If bucket is above ${split_limit} we fork and do backup per directory
+ if [[ ${size} -gt $((${split_limit} * 1000000000)) ]]; then
+ do_huge_backup ${project} ${mirror} ${bucket} &
+ # If bucket is above ${fork_limit} we fork and do backup for bucket
+ elif [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
+ do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${empty} &
+ else
+ # If bucket is below ${fork_limit} we do not fork and do backup for bucket
+ do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${empty}
+ fi
+ done
+done
diff --git a/templates/script/backupdb.erb.sh b/templates/script/backupdb.erb.sh
new file mode 100644
index 0000000..b35acbc
--- /dev/null
+++ b/templates/script/backupdb.erb.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Backup all databases
+# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
+# a specific host, but will differ between hosts
+sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
+number_of_full_to_keep="<%= @full_backup_retention %>"
+
+backup="${1}"
+if ! [[ ${backup} =~ backup1.*sunet.se$ ]]; then
+ echo "Usage: ${0} "
+ echo "Example: ${0} backup1.sunet.drive.sunet.se"
+fi
+backup_dir="/opt/backups"
+bucket="db-backups"
+mirror="<%= @customer %>-<%= @environment %>-mirror"
+if [[ ${mirror} =~ common-(test|prod)-mirror ]]; then
+ suffix=$(echo ${backup} | sed 's/backup1.*//')
+ bucket="${bucket}-${suffix}"
+ backup_dir="${backup_dir}-${suffix}"
+fi
+echo "Backing up database for ${backup}"
+ssh ${backup} "sudo /home/script/bin/backup_db.sh"
+echo "Cleaning up old backups for ${backup}"
+ssh ${backup} "sudo /home/script/bin/purge_backups.sh /opt/mariadb_backup/backups/"
+echo "Copying backups here"
+mkdir -p ${backup_dir}
+scp script@${backup}:/opt/mariadb_backup/backups/$(date +%Y/%m/%d)/*.gz ${backup_dir}
+echo "Copying backups to remote bucket"
+rclone mkdir ${mirror}:${bucket}
+duplicity --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption ${backup_dir} rclone://${mirror}:/${bucket}
+duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${bucket}
+echo "cleaning up"
+rm -r ${backup_dir}
diff --git a/templates/script/backupsinglenodedb.erb.sh b/templates/script/backupsinglenodedb.erb.sh
new file mode 100644
index 0000000..344d1d0
--- /dev/null
+++ b/templates/script/backupsinglenodedb.erb.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+# Backup all databases
+# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
+# a specific host, but will differ between hosts
+sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
+backup="${1}"
+customer=${2}
+
+declare -a sixmonths=('mau')
+
+function usage {
+ echo "Usage: ${0} "
+ echo "Example: ${0} multinode2.sunet.drive.sunet.se mau"
+ exit 1
+}
+
+if ! [[ ${backup} =~ multinode.*sunet.se$ ]]; then
+ usage
+fi
+if [[ -z ${customer} ]]; then
+ usage
+fi
+if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
+ number_of_full_to_keep=6
+else
+ number_of_full_to_keep=1
+fi
+container="mariadb${customer}_db_1"
+backup_dir="/opt/backups"
+bucket="db-backups"
+mirror="${customer}-<%= @environment %>-mirror"
+bucket="${bucket}-${customer}"
+backup_dir="${backup_dir}-${customer}"
+echo "Backing up database for ${customer} on ${backup}"
+ssh ${backup} "sudo /home/script/bin/backup_db.sh ${container} ${customer}"
+echo "Cleaning up old backups for ${backup}"
+ssh ${backup} "sudo /home/script/bin/purge_backups.sh /opt/multinode/${customer}/mariadb-${customer}/backups/"
+echo "Copying backups here"
+mkdir -p ${backup_dir}
+scp script@${backup}:/opt/multinode/${customer}/mariadb-${customer}/backups/$(date +%Y/%m/%d)/*.gz ${backup_dir}
+echo "Copying backups to remote bucket"
+rclone mkdir ${mirror}:${bucket}
+duplicity --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption ${backup_dir} rclone://${mirror}:/${bucket}
+duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${bucket}
+echo "cleaning up"
+rm -r ${backup_dir}
diff --git a/templates/script/backupsingleproject.erb.sh b/templates/script/backupsingleproject.erb.sh
new file mode 100755
index 0000000..f87c41d
--- /dev/null
+++ b/templates/script/backupsingleproject.erb.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Backup primary bucket or all buckets in a single project
+proj=${1}
+primary_only=${2}
+if [[ -z ${proj} ]]; then
+ echo "Usage: ${0} [primary only]"
+ echo "Example: ${0} 34 yes"
+ exit 1
+fi
+function do_backup {
+ local project="${1}"
+ local mirror="${2}"
+ local bucket="${3}"
+ local mirrorbucket="${bucket}-mirror"
+ local mountpoint="/opt/backupmounts/${bucket}"
+ mkdir -p "${mountpoint}"
+ rclone mount "${project}:${bucket}" "${mountpoint}/" --daemon --allow-other --dir-cache-time 24h --timeout 0
+ rclone mkdir "${mirror}:${mirrorbucket}"
+ duplicity full --asynchronous-upload --tempdir /mnt --archive-dir /mnt --timeout 3600 \
+ --verbosity debug --no-encryption "${mountpoint}" "rclone://${mirror}:/${mirrorbucket}"
+ umount "${mountpoint}"
+ rmdir "${mountpoint}"
+}
+
+project="sto4-${proj}"
+mirror="sto3-${proj}"
+for bucket in $(rclone lsd "${project}:/" | awk '{print $5}'); do
+ if [[ -n ${primary_only} ]] && ! [[ $bucket =~ 'primary-' ]]; then
+ continue
+ else
+ do_backup "${project}" "${mirror}" "${bucket}"
+ fi
+done
diff --git a/templates/script/check_backup.erb.sh b/templates/script/check_backup.erb.sh
new file mode 100644
index 0000000..d2dff74
--- /dev/null
+++ b/templates/script/check_backup.erb.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+project="${1}"
+bucket="${2}"
+number_of_full_to_keep='<%= @full_backup_retention %>'
+max_num_inc=$((32 * number_of_full_to_keep))
+max_num_full=$((2 * number_of_full_to_keep))
+output_status="OK"
+exit_status=0
+problems=""
+num_problems=0
+data_dir='/opt/backups/data'
+for project in $(ls ${data_dir}); do
+ for bucket in $(ls ${data_dir}/${project}/ | sed 's/\.dat$//'); do
+ tabular_data=$(cat "${data_dir}/${project}/${bucket}.dat")
+ # We warn if there are too many old backups
+ num_full=$(echo "${tabular_data}" | grep -c full)
+ num_inc=$(echo "${tabular_data}" | grep -c inc)
+ if [[ ${num_inc} -gt ${max_num_inc} ]] || [[ ${num_full} -gt ${max_num_full} ]]; then
+ problems="${problems} Too many backups: ${project}:${bucket}"
+ num_problems=$((num_problems + 1))
+ if [[ ${exit_status} -ne 2 ]]; then
+ output_status="WARNING"
+ exit_status=1
+ fi
+ fi
+
+ latest_full=$(echo "${tabular_data}" | grep full | sort | tail -1)
+ latest_inc=$(echo "${tabular_data}" | grep inc | sort | tail -1)
+
+ latest_full_date=$(date -d "$(echo "${latest_full}" | awk '{print $2}' | sed 's/T/ /' | sed -e 's/\([0-9][0-9]\)\([0-9][0-9]\)Z/:\1:\2/' -e 's/\(20[0-9][0-9]\)\([0-9][0-9]\)/\1-\2-/')" +%s)
+ latest_inc_date=$(date -d "$(echo "${latest_inc}" | awk '{print $2}' | sed 's/T/ /' | sed -e 's/\([0-9][0-9]\)\([0-9][0-9]\)Z/:\1:\2/' -e 's/\(20[0-9][0-9]\)\([0-9][0-9]\)/\1-\2-/')" +%s)
+
+ now=$(date +%s)
+ thirtytwodaysinseconds=$((32 * 24 * 60 * 60))
+ twodaysinseconds=$((2 * 24 * 60 * 60))
+
+ seconds_since_full=$((now - latest_full_date))
+ seconds_since_inc=$((now - latest_inc_date))
+
+ # We say that it is critical if backups are too old
+ if [[ ${seconds_since_full} -gt ${thirtytwodaysinseconds} ]] || [[ ${seconds_since_inc} -gt ${twodaysinseconds} ]]; then
+ if [[ ${seconds_since_full} -gt ${twodaysinseconds} ]]; then
+ num_problems=$((num_problems + 1))
+ problems="${problems} Too old backups: ${project}:${bucket}"
+ output_status="CRITICAL"
+ exit_status=2
+ fi
+ fi
+
+ done
+done
+if [[ -z ${problems} ]]; then
+ problems="No problems detected"
+fi
+
+output="${output_status}: ${problems} | num_problems=${num_problems};1;1;;"
+echo "${output}"
+exit ${exit_status}
diff --git a/templates/script/collect_backup_data.erb.sh b/templates/script/collect_backup_data.erb.sh
new file mode 100644
index 0000000..38297ec
--- /dev/null
+++ b/templates/script/collect_backup_data.erb.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+project="${1}"
+bucket="${2}"
+data_dir='/opt/backups/data'
+for project in $(rclone listremotes | grep -v 'mirror'); do
+ for bucket in $(rclone lsd "${project}" | awk '{print $NF}' | grep -E '\-mirror|db-backups'); do
+ mkdir -p "${data_dir}/${project}"
+ duplicity collection-status --log-file /dev/stdout --no-encryption "rclone://${project}${bucket}" | grep -E '^ inc|^ full' > "${data_dir}/${project}/${bucket}.dat"
+ done
+done
diff --git a/templates/script/create_folders_in_project_buckets.erb.sh b/templates/script/create_folders_in_project_buckets.erb.sh
new file mode 100644
index 0000000..66e44a8
--- /dev/null
+++ b/templates/script/create_folders_in_project_buckets.erb.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+customer="<%= @customer %>"
+environment="<%= @environment %>"
+container="nextcloud_app_1"
+yq="/usr/local/bin/yq"
+if ! [[ -x ${yq} ]]; then
+ pip install yq
+fi
+
+declare -a directories
+if [[ -n ${1} ]]; then
+ directories=("${@}")
+else
+ directories+=("Arbetsmaterial")
+ directories+=("Bevarande")
+ directories+=("Gallringsbart")
+fi
+olddir="${PWD}"
+tempdir=$(mktemp -d)
+dirty=0
+cd "${tempdir}" || echo "Could not cd to tempdir"
+for project in $(${yq} -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml); do
+ for bucket in $(rclone lsd "${project}:" | awk '{print $NF}'); do
+ count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
+ if [[ ${count} -gt 0 ]]; then
+ echo "Skipping ${project}:${bucket} because it has stuff in it already"
+ continue
+ fi
+ for directory in "${directories[@]}"; do
+ dirty=1
+ echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
+ temp="README.md"
+ echo "**${directory}**" >"${temp}"
+ echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
+ rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
+ done
+ done
+done
+cd "${olddir}" || echo "could not cd to home dir"
+rmdir "${tempdir}"
+if [[ ${dirty} -gt 0 ]]; then
+ ssh -t "node3.$(hostname -d)" -l script -i .ssh/id_script "sudo /usr/local/bin/occ ${container} files:scan admin"
+fi
diff --git a/templates/script/create_folders_in_singlenode_buckets.erb.sh b/templates/script/create_folders_in_singlenode_buckets.erb.sh
new file mode 100644
index 0000000..0bedfd4
--- /dev/null
+++ b/templates/script/create_folders_in_singlenode_buckets.erb.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+customer="${1}"
+shift
+include_userbuckets="${1}"
+shift
+environment="<%= @environment %>"
+container="nextcloud${customer}_app_1"
+
+yq="/usr/local/bin/yq"
+if ! [[ -x ${yq} ]]; then
+ pip install yq
+fi
+
+node=$(${yq} -r ".multinode_mapping | .${customer}.server" /etc/hiera/data/common.yaml)
+
+declare -a directories
+if [[ -n ${1} ]]; then
+ directories=("${@}")
+else
+ directories+=("Arbetsmaterial")
+ directories+=("Bevarande")
+ directories+=("Gallringsbart")
+fi
+olddir="${PWD}"
+tempdir=$(mktemp -d)
+dirty=0
+cd "${tempdir}" || echo "Could not cd to tempdir"
+declare -a projects=( $(${yq} -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml) )
+if [[ "${include_userbuckets}" == "true" ]]; then
+ projects+=( $(${yq} -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml) )
+fi
+declare -a users=("admin")
+for project in "${projects[@]}"; do
+ for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do
+ count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
+ if [[ ${count} -gt 0 ]]; then
+ echo "Skipping ${project}:${bucket} because it has stuff in it already"
+ continue
+ fi
+ dirty=1
+ # Check if this is a userbucket
+ if [[ ${bucket} =~ sunet.se ]] && [[ "${include_userbuckets}" == "true" ]]; then
+ user=$(echo "${bucket}" | awk '{print $NF}' | cut -d '-' -f 1)
+ users+=("${user}@${customer}.se")
+ fi
+ for directory in "${directories[@]}"; do
+ echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
+ temp="README.md"
+ echo "**${directory}**" >"${temp}"
+ echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
+ rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
+ done
+ done
+done
+cd "${olddir}" || echo "could not cd to home dir"
+rmdir "${tempdir}"
+if [[ ${dirty} -gt 0 ]]; then
+ for user in "${users[@]}"; do
+ ssh -t "${node}.$(hostname -d)" -l script -i .ssh/id_script "sudo /usr/local/bin/occ ${container} files:scan ${user}"
+ done
+fi
diff --git a/templates/script/listusers.erb.sh b/templates/script/listusers.erb.sh
new file mode 100644
index 0000000..7d49a07
--- /dev/null
+++ b/templates/script/listusers.erb.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+customer="${1}"
+multinode="${2}"
+environment="<%= @environment %>"
+location="${customer}-${environment}"
+userjson=$(ssh "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud${customer}_app_1")
+project="statistics"
+bucket="drive-server-coms"
+base_dir="${project}:${bucket}"
+stat_dir="/opt/statistics"
+customer_dir="${stat_dir}/${location}"
+mkdir -p "${customer_dir}"
+rclone mkdir "${base_dir}/${location}"
+echo "${userjson}" | jq . >"${customer_dir}/users.json"
+status=${?}
+if [[ ${status} -eq 0 ]]; then
+ # something is wrong if we cant copy the file in 30 seconds, so we should note that
+ if ! timeout 30s rclone copy --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
+ status=1
+ fi
+fi
+exit ${status}
diff --git a/templates/script/maintenance.erb.sh b/templates/script/maintenance.erb.sh
new file mode 100644
index 0000000..f37cdc7
--- /dev/null
+++ b/templates/script/maintenance.erb.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+state=${1}
+
+if ! [[ ${state} =~ ^(on|off)$ ]]; then
+ echo "Usage: ${0} "
+ exit 1
+fi
+
+CUSTOMER='<%= @customer %>'
+ENVIRONMENT='<%= @environment %>'
+types="node"
+env="test."
+customer="${CUSTOMER}."
+if [[ "${ENVIRONMENT}" == "prod" ]]; then
+ env=""
+fi
+if [[ "${CUSTOMER}" == "common" ]]; then
+ customer=""
+ types="multinode gss"
+fi
+
+domain="${customer}drive.${env}sunet.se"
+
+for prefix in ${types}; do
+ if [[ "${prefix}" == "multinode" ]]; then
+ range=4
+ else
+ range=3
+ fi
+ for i in $(seq ${range}); do
+ host="${prefix}${i}.${domain}"
+ ssh -t -o StrictHostKeyChecking=off ${host} "sudo /home/script/bin/maintenancemode.sh ${state}"
+ done
+done
diff --git a/templates/script/makebuckets.erb.sh b/templates/script/makebuckets.erb.sh
new file mode 100755
index 0000000..0e1e4d7
--- /dev/null
+++ b/templates/script/makebuckets.erb.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Lists all users from container
+# Create and attach buckets for users ending in @
+# The name of the bucket is the transformed user id
+# E.g. user tene3253@su.se will receive the bucket tene3253-su-drive-sunet-se
+echo "$(date) - Start executing makebuckets.sh"
+
+# These are configurable with positional args
+node=${1}
+container=${2}
+rcp=${3}
+if [[ -z ${node} ]]; then
+ num=$(seq 1 3 | shuf -n 1)
+ # shellcheck disable=SC2086
+ node=$(hostname --fqdn | sed 's/script1/node'${num}'/')
+fi
+if [[ -z ${container} ]]; then
+ container=nextcloud_app_1
+fi
+if [[ -z ${rcp} ]]; then
+ rcp="<%= @location %>"
+fi
+
+lock="/tmp/mkbucket-${rcp}-${node}.lock"
+if [[ -f ${lock} ]]; then
+ echo "Lockfile exists, another instance of ${0} is running"
+ exit 0
+else
+ touch "${lock}"
+fi
+# These only have defaults
+user_bucket_name="<%= @user_bucket_name %>"
+if [[ -z ${user_bucket_name} ]]; then
+ user_bucket_name="userdata"
+fi
+site_name="<%= @site_name %>"
+rclone="rclone --config /root/.rclone.conf"
+
+# These are dynamic
+buckets="$(${rclone} lsd "${rcp}:" | awk '{print $NF}')"
+users=$(${rclone} cat "statistics:drive-server-coms/${rcp}/users.json" | jq '. | with_entries( select(.key | match("@") ) )')
+for eppn in $(echo "${users}" | jq -r keys[]); do
+ user=${eppn%@*}
+
+ echo "$(date) - Check bucket status for ${eppn}"
+ bucketname="${user}-${site_name//./-}"
+ if ! echo "${buckets}" | grep "${bucketname}" &> /dev/null; then
+ echo "$(date) - ${eppn} has no mounts configured, adding bucket and mounts..."
+ ${rclone} mkdir "${rcp}:${bucketname}"
+ # shellcheck disable=SC2029
+ ssh-keygen -f "/root/.ssh/known_hosts" -R ${node}
+ ssh -o StrictHostKeyChecking=no "${node}" "sudo /home/script/bin/create_bucket.sh ${container} ${eppn} ${bucketname} ${user_bucket_name}"
+ fi
+done
+echo "$(date) - Done executing makebuckets.sh"
+rm "${lock}"
+
diff --git a/templates/script/makemanualuserbucket.erb.sh b/templates/script/makemanualuserbucket.erb.sh
new file mode 100755
index 0000000..488663c
--- /dev/null
+++ b/templates/script/makemanualuserbucket.erb.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+# The name of the bucket is the transformed user id
+# E.g. user tene3253@su.se will receive the bucket tene3253-su-drive-sunet-se
+echo "$(date) - Start executing ${0}"
+
+# These are configurable with positional args
+eppn=${1}
+node1=${2}
+container=${3}
+rcp=${4}
+if [[ -z ${node1} ]]; then
+ num=$(seq 1 3 | shuf -n 1)
+ node1=$(hostname --fqdn | sed 's/script1/node'${num}'/')
+fi
+if [[ -z ${container} ]]; then
+ container=nextcloud_app_1
+fi
+if [[ -z ${rcp} ]]; then
+ rcp="<%= @location %>"
+fi
+
+# These only have defaults
+user_bucket_name="<%= @user_bucket_name %>"
+if [[ -z ${user_bucket_name} ]]; then
+ user_bucket_name="userdata"
+fi
+site_name="<%= @site_name %>"
+rclone="rclone --config /root/.rclone.conf"
+
+user=${eppn%@*}
+
+bucketname="${user}-${site_name//./-}"
+
+echo "$(date) - ${eppn} adding bucket and mounts..."
+${rclone} mkdir ${rcp}:${bucketname}
+ssh ${node1} "sudo /home/script/bin/create_bucket.sh ${container} ${eppn} ${bucketname} ${user_bucket_name}"
+
diff --git a/templates/script/migratebuckets.erb.sh b/templates/script/migratebuckets.erb.sh
new file mode 100644
index 0000000..5042221
--- /dev/null
+++ b/templates/script/migratebuckets.erb.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Backup all buckets in rcp to rcpclone
+src="<%= @location %>-pilot"
+dest="<%= @location %>"
+
+buckets=$(rclone lsjson ${src}: | jq -r '.[].Path')
+
+for bucket in ${buckets}; do
+ if [[ "${bucket}" =~ -clone$ ]]; then
+ echo "Skipping clone bucket"
+ else
+ echo "Backing up bucket ${bucket}"
+ rclone --config /root/.rclone.conf -c sync ${src}:${bucket} ${dest}:${bucket} --s3-upload-cutoff 0 --checkers 32 --low-level-retries 16 --transfers 8 -P
+ fi
+done
diff --git a/templates/script/rclone.conf.erb b/templates/script/rclone.conf.erb
new file mode 100644
index 0000000..980cacc
--- /dev/null
+++ b/templates/script/rclone.conf.erb
@@ -0,0 +1,108 @@
+[<%= @location %>]
+type = s3
+provider = Ceph
+access_key_id = <%= @s3_key %>
+secret_access_key = <%= @s3_secret %>
+endpoint = <%= @s3_host %>
+acl = private
+[<%= @location %>-mirror]
+type = s3
+provider = Ceph
+access_key_id = <%= @s3_key_mirror %>
+secret_access_key = <%= @s3_secret_mirror %>
+endpoint = <%= @s3_host_mirror %>
+acl = private
+[statistics]
+type = webdav
+url = https://sunet.drive.sunet.se/remote.php/dav/files/_script/
+vendor = nextcloud
+user = _script
+pass = <%= @statistics_secret %>
+[<%= @primary_project %>]
+type = s3
+provider = Ceph
+access_key_id = <%= @s3_key %>
+secret_access_key = <%= @s3_secret %>
+endpoint = <%= @s3_host %>
+acl = private
+[<%= @mirror_project %>]
+type = s3
+provider = Ceph
+access_key_id = <%= @s3_key_mirror %>
+secret_access_key = <%= @s3_secret_mirror %>
+endpoint = <%= @s3_host_mirror %>
+acl = private
+<% if @s3_key_pilot and @s3_secret_pilot -%>
+[<%= @location %>-pilot]
+type = s3
+provider = Ceph
+access_key_id = <%= @s3_key_pilot %>
+secret_access_key = <%= @s3_secret_pilot %>
+endpoint = <%= @s3_host %>
+acl = private
+<% end -%>
+<% if @location.start_with?('common') -%>
+<% @singlenodes.each do |singlenode| -%>
+[<%= singlenode %>-<%= @environment %>]
+type = s3
+provider = Ceph
+access_key_id = <%= scope.call_function('safe_hiera', [['s3_key', @full_project_mapping[singlenode][@environment]['primary_project'].sub('-', '_')].join('_')] ) %>
+secret_access_key = <%= scope.call_function('safe_hiera', [['s3_secret', @full_project_mapping[singlenode][@environment]['primary_project'].sub('-', '_')].join('_')] ) %>
+endpoint = <%= @s3_host %>
+acl = private
+[<%= singlenode %>-<%= @environment %>-mirror]
+type = s3
+provider = Ceph
+access_key_id = <%= scope.call_function('safe_hiera',[['s3_key', @full_project_mapping[singlenode][@environment]['mirror_project'].sub('-', '_')].join('_')] ) %>
+secret_access_key = <%= scope.call_function('safe_hiera', [['s3_secret', @full_project_mapping[singlenode][@environment]['mirror_project'].sub('-', '_')].join('_')] ) %>
+endpoint = <%= @s3_host_mirror %>
+acl = private
+[<%= @full_project_mapping[singlenode][@environment]['primary_project'] %>]
+type = s3
+provider = Ceph
+access_key_id = <%= scope.call_function('safe_hiera', [['s3_key', @full_project_mapping[singlenode][@environment]['primary_project'].sub('-', '_')].join('_')] ) %>
+secret_access_key = <%= scope.call_function('safe_hiera', [['s3_secret', @full_project_mapping[singlenode][@environment]['primary_project'].sub('-', '_')].join('_')] ) %>
+endpoint = <%= @s3_host %>
+acl = private
+[<%= @full_project_mapping[singlenode][@environment]['mirror_project'] %>]
+type = s3
+provider = Ceph
+access_key_id = <%= scope.call_function('safe_hiera',[['s3_key', @full_project_mapping[singlenode][@environment]['mirror_project'].sub('-', '_')].join('_')] ) %>
+secret_access_key = <%= scope.call_function('safe_hiera', [['s3_secret', @full_project_mapping[singlenode][@environment]['mirror_project'].sub('-', '_')].join('_')] ) %>
+endpoint = <%= @s3_host_mirror %>
+acl = private
+<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| -%>
+[<%= project['project'] %>]
+type = s3
+provider = Ceph
+access_key_id = <%= scope.call_function('safe_hiera', [['s3_key', project['project'].sub('-', '_')].join('_')] ) %>
+secret_access_key = <%= scope.call_function('safe_hiera', [['s3_secret', project['project'].sub('-', '_')].join('_')] ) %>
+endpoint = <%= @s3_host %>
+acl = private
+[<%= project['mirror_project'] %>]
+type = s3
+provider = Ceph
+access_key_id = <%= scope.call_function('safe_hiera',[['s3_key', project['mirror_project'].sub('-', '_')].join('_')] ) %>
+secret_access_key = <%= scope.call_function('safe_hiera', [['s3_secret', project['mirror_project'].sub('-', '_')].join('_')] ) %>
+endpoint = <%= @s3_host_mirror %>
+acl = private
+<% end -%>
+<% end -%>
+<% else %>
+<% @assigned_projects.each do |project| -%>
+[<%= project['project'] %>]
+type = s3
+provider = Ceph
+access_key_id = <%= scope.call_function('safe_hiera', [['s3_key', project['project'].sub('-', '_')].join('_')] ) %>
+secret_access_key = <%= scope.call_function('safe_hiera', [['s3_secret', project['project'].sub('-', '_')].join('_')] ) %>
+endpoint = <%= @s3_host %>
+acl = private
+[<%= project['mirror_project'] %>]
+type = s3
+provider = Ceph
+access_key_id = <%= scope.call_function('safe_hiera',[['s3_key', project['mirror_project'].sub('-', '_')].join('_')] ) %>
+secret_access_key = <%= scope.call_function('safe_hiera', [['s3_secret', project['mirror_project'].sub('-', '_')].join('_')] ) %>
+endpoint = <%= @s3_host_mirror %>
+acl = private
+<% end -%>
+<% end -%>
diff --git a/templates/script/reboot-customer.erb.sh b/templates/script/reboot-customer.erb.sh
new file mode 100644
index 0000000..487beae
--- /dev/null
+++ b/templates/script/reboot-customer.erb.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
+# a specific host, but will differ between hosts
+sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
+
+needs_reboot="$(curl -s -H 'X-Thruk-Auth-User: thrukadmin' -H "X-Thruk-Auth-Key: <%= @apikey_prod %>" 'https://monitor.drive.sunet.se/thruk/r/services?display_name=Reboot+Needed&columns=state' -d 'q=host_groups>=<%= @location %>' | jq '. | any(.state != 0)')"
+status=0
+if [[ "${needs_reboot}" != "false" ]]; then
+ /root/tasks/restart-db-cluster
+ status=$((status + ${?}))
+ /root/tasks/restart-nextcloud-farm
+ status=$((status + ${?}))
+ ssh "<%= @backup_server %>" "sudo /usr/local/bin/safer_reboot"
+fi
+exit ${status}
diff --git a/templates/script/restart-db-cluster.erb b/templates/script/restart-db-cluster.erb
new file mode 100755
index 0000000..2631a2f
--- /dev/null
+++ b/templates/script/restart-db-cluster.erb
@@ -0,0 +1,99 @@
+#!/usr/bin/env python3
+# vim: set filetype=python:
+
+import sys
+import time
+import urllib.parse
+
+import requests
+
+from drive_utils import (build_fqdn, get_ips_for_hostname, run_remote_command,
+ smoketest_db_node)
+
+
+def add_downtime(fqdn: str,
+ apikey: str,
+ monitor_host: str = 'monitor.drive.test.sunet.se') -> None:
+ if not apikey:
+ return
+ print("\tAdding downtime for: {}".format(fqdn))
+ action = 'schedule_host_svc_downtime'
+
+ start_time = int(time.time())
+ end_time = start_time + (10 * 60) # 10 minutes
+ data = {
+ 'comment_data': 'Reboot from script',
+ 'start_time': start_time,
+ 'end_time': end_time
+ }
+
+ post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format(
+ monitor_host, fqdn, action)
+ headers = {'X-Thruk-Auth-Key': apikey}
+ requests.post(post_url, data=data, headers=headers)
+
+
+def remove_downtime(fqdn: str,
+ apikey: str,
+ monitor_host: str = 'monitor.drive.test.sunet.se') -> None:
+ if not apikey:
+ return
+ print("\tRemoving downtime for: {}".format(fqdn))
+ get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format(
+ monitor_host, fqdn)
+ headers = {'X-Thruk-Auth-Key': apikey}
+ req = requests.get(get_url, headers=headers)
+ action = 'del_active_service_downtimes'
+ for service in req.json()[0]['services']:
+ post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
+ monitor_host, fqdn, urllib.parse.quote(service), action)
+ requests.post(post_url, headers=headers)
+
+
+def main() -> int:
+ customer = "<%= @customer %>"
+ environment = "<%= @environment %>"
+ apikey_test = "<%= @apikey_test %>"
+ apikey_prod = "<%= @apikey_prod %>"
+ user = "script"
+
+ reboot_command = ['sudo /usr/local/bin/safer_reboot']
+
+ for number in reversed(range(1, 4)):
+ fqdn = build_fqdn(customer, environment, number)
+ ipv4, _ = get_ips_for_hostname(fqdn)
+ ip = ipv4[0]
+ print("Upgrading: {} with ip: {}".format(fqdn, ip))
+ add_downtime(fqdn, apikey_test)
+ add_downtime(fqdn, apikey_prod, monitor_host="monitor.drive.sunet.se")
+
+ run_remote_command(fqdn, reboot_command, user = user)
+ success = False
+
+ for testnumber in reversed(range(1, 32, 2)):
+ print("\tSleeping for {} seconds before smoketest on {}".format(
+ testnumber, fqdn))
+ time.sleep(testnumber)
+
+ if smoketest_db_node(fqdn, user = user):
+ success = True
+
+ break
+
+ remove_downtime(fqdn, apikey_test)
+ remove_downtime(fqdn, apikey_prod, monitor_host="monitor.drive.sunet.se")
+ if success:
+ print("Upgrade cycle succeeded on {} ".format(fqdn))
+ else:
+ print("Smoketest failed on {} after server reboot command".format(
+ fqdn))
+
+ return 5
+ print("All {}-servers successfully upgraded for {}".format(
+ environment, customer))
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/templates/script/restart-nextcloud-farm.erb b/templates/script/restart-nextcloud-farm.erb
new file mode 100755
index 0000000..c24346a
--- /dev/null
+++ b/templates/script/restart-nextcloud-farm.erb
@@ -0,0 +1,141 @@
+#!/usr/bin/env python3.9
+# vim: set filetype=python:
+
+import subprocess
+import sys
+import time
+import urllib.parse
+
+import requests
+from drive_utils import (build_fqdn, run_remote_command,
+ smoketest_nextcloud_node)
+
+
+def add_downtime(fqdn: str,
+ apikey: str,
+ monitor_host: str = 'monitor.drive.test.sunet.se') -> None:
+ if not apikey:
+ return
+ print("Adding downtime for: {}".format(fqdn))
+ action = 'schedule_host_svc_downtime'
+
+ start_time = int(time.time())
+ end_time = start_time + (10 * 60) # 10 minutes
+ data = {
+ 'comment_data': 'Reboot from script',
+ 'start_time': start_time,
+ 'end_time': end_time
+ }
+
+ post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format(
+ monitor_host, fqdn, action)
+ headers = {'X-Thruk-Auth-Key': apikey}
+ requests.post(post_url, data=data, headers=headers)
+
+
+def remove_downtime(fqdn: str,
+ apikey: str,
+ monitor_host: str = 'monitor.drive.test.sunet.se') -> None:
+ if not apikey:
+ return
+ print("Removing downtime for: {}".format(fqdn))
+ get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format(
+ monitor_host, fqdn)
+ headers = {'X-Thruk-Auth-Key': apikey}
+ req = requests.get(get_url, headers=headers)
+ action = 'del_active_service_downtimes'
+ for service in req.json()[0]['services']:
+ post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
+ monitor_host, fqdn, urllib.parse.quote(service), action)
+ requests.post(post_url, headers=headers)
+
+
+def run_command(command: list[str]) -> tuple:
+ with subprocess.Popen(command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE) as proc:
+ outs, errs = proc.communicate()
+ try:
+ reply = outs.decode().strip('\n')
+ except AttributeError:
+ reply = str()
+ return (reply, errs)
+
+
+def main() -> int:
+ customer = "<%= @customer %>"
+ environment = "<%= @environment %>"
+ apikey_test = "<%= @apikey_test %>"
+ apikey_prod = "<%= @apikey_prod %>"
+
+ first_fqdn = build_fqdn(customer, environment, 1, "node")
+ last_fqdn = build_fqdn(customer, environment, 3, "node")
+
+ def set_redis_master_to(node: str = "last"):
+ script_fqdn = build_fqdn(customer, environment, 1, "script")
+ new_master = last_fqdn
+ if node == "first":
+ new_master = first_fqdn
+ run_command(["/root/tasks/switch_redis_master_to.sh", new_master])
+
+ cosmos_command = ['sudo run-cosmos']
+ nc_upgrade_command = 'sudo /usr/local/bin/occ upgrade'
+ repair_command = 'sudo /usr/local/bin/occ maintenance:repair'
+ reboot_command = ['sudo /usr/local/bin/safer_reboot']
+ server_type = "node"
+ if customer == "common":
+ customer = "gss"
+ server_type = "gss"
+
+ for number in reversed(range(1, 4)):
+ fqdn = build_fqdn(customer, environment, number, server_type)
+ if number == 1:
+ set_redis_master_to("last")
+ add_downtime(fqdn, apikey_test)
+ add_downtime(fqdn, apikey_prod, monitor_host="monitor.drive.sunet.se")
+ print("Upgrading: {}".format(fqdn))
+
+ print("\tRunning cosmos command at {}".format(fqdn))
+ run_remote_command(fqdn,
+ cosmos_command,
+ user="script",
+ output=subprocess.DEVNULL)
+ print("\tRunning reboot command at {}".format(fqdn))
+ run_remote_command(fqdn, reboot_command, user="script")
+ success = False
+ for testnumber in reversed(range(1, 32, 2)):
+ print("\tSleeping for {} seconds before smoketest on {}".format(
+ testnumber, fqdn))
+ time.sleep(testnumber)
+
+ if smoketest_nextcloud_node(fqdn):
+ success = True
+
+ break
+
+ remove_downtime(fqdn, apikey_test)
+ remove_downtime(fqdn,
+ apikey_prod,
+ monitor_host="monitor.drive.sunet.se")
+ if success:
+ if number == 1:
+ set_redis_master_to("first")
+ print("Upgrade cycle succeeded on {} ".format(fqdn))
+ else:
+ print("Smoketest failed on {} after server reboot command".format(
+ fqdn))
+
+ return 6
+ fqdn = build_fqdn(customer, environment, 3, server_type)
+ print("Running nextcloud upgrade command at {}".format(fqdn))
+ run_remote_command(fqdn, [nc_upgrade_command], user="script")
+ print("Running repair command on {}".format(fqdn))
+ run_remote_command(fqdn, [repair_command], user="script")
+ print("All {}-servers successfully upgraded for {}".format(
+ environment, customer))
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/templates/script/restart-proxysql.erb.sh b/templates/script/restart-proxysql.erb.sh
new file mode 100644
index 0000000..c8eab53
--- /dev/null
+++ b/templates/script/restart-proxysql.erb.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+domain="$(hostname -d)"
+ssh_command="ssh -q -tt -o StrictHostKeyChecking=off -l script -i /root/.ssh/id_script"
+
+for index in 1 2 3; do
+ no_mysql_servers=$(${ssh_command} node${index}.${domain} 'sudo /home/script/bin/get_no_mysql_servers.sh' | tr -d '\n')
+ if [[ -n ${no_mysql_servers} ]] && [[ ${no_mysql_servers} -lt 2 ]]; then
+ echo "Number of sql servers is ${no_mysql_servers}, so restarting proxysql"
+ ${ssh_command} "node${index}.${domain}" "sudo /home/script/bin/restart_sunet_service.sh proxysql"
+ else
+ echo "Number of sql servers is ${no_mysql_servers}, so doing nothing"
+ fi
+done
+
+exit 0
diff --git a/templates/script/restore-files.sh b/templates/script/restore-files.sh
new file mode 100644
index 0000000..b9d5759
--- /dev/null
+++ b/templates/script/restore-files.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+function usage {
+ echo "Usage: ${0} --user --bucket --project [--time ] [--file-to-restore ]"
+ exit 1
+}
+
+while [[ "$#" -gt 0 ]]; do
+ case $1 in
+ -t | --time)
+ time="${2}"
+ shift
+ ;;
+ -u | --user)
+ user="${2}"
+ shift
+ ;;
+ -b | --bucket)
+ bucket="${2}"
+ shift
+ ;;
+ -f | --file-to-restore)
+ files="${2}"
+ shift
+ ;;
+ -p | --project)
+ project="${2}"
+ shift
+ ;;
+ *)
+ usage
+ ;;
+ esac
+ shift
+done
+
+if [[ -z ${bucket} ]] || [[ -z ${project} ]]; then
+ usage
+fi
+
+duplicity_opts="--no-encryption"
+if [[ -n ${files} ]]; then
+ duplicity_opts="${duplicity_opts} --file-to-restore ${files}"
+fi
+if [[ -n ${time} ]]; then
+ duplicity_opts="${duplicity_opts} --time ${time}"
+fi
+node1=$(hostname --fqdn | sed 's/script/node/')
+restoredir="/opt/restoremounts/${bucket}"
+restorepath="${project}:${bucket}-restore"
+backuppath="${project}:${bucket}-mirror"
+mkdir -p "${restoredir}"
+rclone mkdir "${restorepath}"
+rclone mount "${restorepath}" "${restoredir}" --daemon --allow-other
+# shellcheck disable=SC2086
+duplicity restore ${duplicity_opts} "rclone://${backuppath}" "${restoredir}"
+key=""
+secret=""
+endpoint=""
+
+# shellcheck disable=SC2029
+ssh "script@${node1}" "/home/script/bin/create_bucket_without_question.sh ${key} ${secret} ${endpoint} ${bucket}-mirror ${user}"
diff --git a/templates/script/switch_redis_master_to.erb.sh b/templates/script/switch_redis_master_to.erb.sh
new file mode 100644
index 0000000..3462a13
--- /dev/null
+++ b/templates/script/switch_redis_master_to.erb.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+new_master=${1}
+
+if ! [[ ${new_master} =~ sunet\.se$ ]]; then
+ new_master="${new_master}.$(hostname -d)"
+fi
+
+new_master_ip=$(host -t A "${new_master}" | awk '{print $NF}')
+
+if [[ ${?} -ne 0 ]] || [[ "x${new_master_ip}" == "x" ]]; then
+ echo "usage: ${0} "
+ echo "example: ${0} node3.sunet.drive.test.sunet.se"
+ exit 1
+fi
+ssh_command="ssh -q -t -o StrictHostKeyChecking=off -l script -i /root/.ssh/id_script"
+declare -a all_nodes
+#<% @config['app'].each do |node| %>
+all_nodes+=('<%= node %>')
+#<% end %>
+declare -a slave_nodes
+for node in "${all_nodes[@]}"; do
+ if [[ "${node}" != "${new_master_ip}" ]]; then
+ slave_nodes+=("${node}")
+ fi
+done
+
+for node in "${slave_nodes[@]}" ${new_master}; do
+ ${ssh_command} "${node}" "sudo /home/script/bin/stop_sentinel.sh stop_cosmos"
+done
+
+${ssh_command} "${new_master}" "sudo /usr/local/bin/redis-cli slaveof no one"
+
+for node in "${slave_nodes[@]}"; do
+ ${ssh_command} "${node}" "sudo /usr/local/bin/redis-cli slaveof ${new_master_ip} 6379"
+done
+
+for node in "${slave_nodes[@]}" ${new_master}; do
+ ${ssh_command} "${node}" "sudo /home/script/bin/start_sentinel.sh clear_cosmos"
+done
diff --git a/templates/script/usage.erb.sh b/templates/script/usage.erb.sh
new file mode 100644
index 0000000..f358a2b
--- /dev/null
+++ b/templates/script/usage.erb.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+# List usage for all buckets
+date=$(date "+%Y%m%d")
+hdate=$(date "+%Y-%m-%d")
+htime=$(date "+%H:%M")
+remotes=$(rclone listremotes | grep -E '^sto[3-4]')
+location="<%= @location %>"
+if [[ -n "${1}" ]]; then
+ customer="${1}"
+ location="${customer}-<%= @environment %>"
+ allowmixedcustomers=yes
+else
+ customer="<%= @customer %>"
+ allowmixedcustomers=no
+fi
+userjson=$(rclone cat "statistics:drive-server-coms/${location}/users.json")
+users=$(echo ${userjson} | jq -r '.| keys | .[] | test("^((?!(^admin|^[_])).)*$")' | grep true | wc -l)
+outfile1="${customer}-${date}-detailed.csv"
+outfile2="${customer}-${date}.csv"
+header="DATE:${hdate} TIME:${htime}"$'\n'
+result1="${header}Project:Bucket;Files;MB;GB"$'\n'
+result2="${header}Customer;Total GB;Users"$'\n'
+foundwrongcustomer=0
+totalMB=0
+totalFiles=0
+
+for remote in ${remotes}; do
+ project=${remote}
+ buckets=$(rclone --config /root/.rclone.conf lsjson "${project}" | jq -r '.[].Name' | grep -E -v '^statistics')
+
+ for bucket in ${buckets}; do
+ echo "${bucket}" | grep -E "${customer}|db-backups$" &>/dev/null
+ status=${?}
+ if [[ ${status} -ne 0 ]] && [[ "${allowmixedcustomers}" == "no" ]]; then
+ ((foundwrongcustomer += 1))
+ echo "Found ${project}${bucket} not maching ${customer}"
+ continue
+ elif [[ ${status} -ne 0 ]] && [[ "${allowmixedcustomers}" == "yes" ]]; then
+ echo "Found ${project}${bucket} not maching ${customer}"
+ continue
+ fi
+ bucketinfo=$(rclone --config /root/.rclone.conf size "${project}${bucket}" --json)
+ numfiles=$(jq '.count' <<<"${bucketinfo}")
+ ((totalFiles += numfiles))
+ bytes=$(jq '.bytes' <<<"${bucketinfo}")
+ KB=$((bytes / 1024))
+ MB=$((KB / 1024))
+ ((totalMB += MB))
+ GB=$((MB / 1024))
+ result1="${result1}${project}${bucket};${numfiles};${MB};${GB}"$'\n'
+ #printf '%s:%s \t Files: %s \t S3: %s MB \t %s GB\n' "${project}" "${bucket}" "${numfiles}" "${MB}" "${GB}"| expand -t 45
+ done
+done
+
+totalGB=$((totalMB / 1024))
+result2="${result2}${customer};${totalGB};${users}"
+
+rclone mkdir "${location}:drive-${location}-share"
+echo -n "${result1}" >"${outfile1}"
+echo -n "${result2}" >"${outfile2}"
+
+rclone copyto --use-cookies "${outfile1}" "statistics:drive-storage-report/${customer}-usage/daily/${outfile1}"
+rclone copyto --use-cookies "${outfile2}" "statistics:drive-storage-report/${customer}-usage/daily/${outfile2}"
+
+rclone copyto --use-cookies "${outfile1}" "statistics:drive-storage-report/${customer}-usage/${customer}-latest-detailed.csv"
+rclone copyto --use-cookies "${outfile2}" "statistics:drive-storage-report/${customer}-usage/${customer}-latest.csv"
+
+rm "${outfile1}"
+rm "${outfile2}"
+exit ${foundwrongcustomer}
diff --git a/templates/scriptreceiver/backup_db.erb.sh b/templates/scriptreceiver/backup_db.erb.sh
new file mode 100644
index 0000000..7267843
--- /dev/null
+++ b/templates/scriptreceiver/backup_db.erb.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+container="${1}"
+customer="${2}"
+if [[ -z ${container} ]]; then
+ container="mariadbbackup_mariadb_backup_1"
+fi
+if [[ -z ${customer} ]]; then
+ backupdir="/opt/mariadb_backup/backups/"
+else
+ backupdir="/opt/multinode/${customer}/mariadb-${customer}/backups/"
+fi
+
+docker exec ${container} /do_backup.sh ${customer}
+chmod 755 ${backupdir}
+chown -R script:root ${backupdir}
diff --git a/templates/scriptreceiver/check_inodes b/templates/scriptreceiver/check_inodes
new file mode 100755
index 0000000..bd260ad
--- /dev/null
+++ b/templates/scriptreceiver/check_inodes
@@ -0,0 +1,37 @@
+#!/usr/bin/env python3
+import os
+import argparse
+import sys
+
+parser = argparse.ArgumentParser()
+parser.add_argument('-p','--path',
+ help='Path to work on',
+ required=True)
+parser.add_argument('-w', '--warning',
+ help='Warning threashold',
+ required=True)
+parser.add_argument('-c', '--critical',
+ help='Critical threashold',
+ required=True)
+
+args = parser.parse_args()
+path = args.path
+warning = float(args.warning)
+critical = float(args.critical)
+
+st = os.statvfs(path)
+total_inode = st.f_files # total inodes
+free_inode = st.f_ffree # free inodes
+percent = (free_inode / total_inode) * 100
+
+status = "OK"
+exit = 0
+if percent < warning:
+ status = "WARNING"
+ exit = 1
+if percent < critical:
+ status = "critical"
+ exit = 2
+
+print("{0}: {1} {2:.2f}% free | free_inodes_percent={2:.2f}%;{3};{4};".format(status, path, percent,warning,critical))
+sys.exit(exit)
diff --git a/templates/scriptreceiver/create_bucket.erb.sh b/templates/scriptreceiver/create_bucket.erb.sh
new file mode 100755
index 0000000..a64a8f8
--- /dev/null
+++ b/templates/scriptreceiver/create_bucket.erb.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+container=${1}
+user=${2}
+bucket=${3}
+
+user_bucket_name="userdata"
+
+function usage {
+ echo "Usage: ${0} "
+ echo "Example : ${0} nextcloud_app_1 kano@sunet.se kano"
+ exit 1
+}
+
+if ! [[ ${container} =~ ^nextcloud[a-z]*_app_1$ ]]; then
+ usage
+fi
+if ! [[ ${bucket} =~ ^[a-zA-Z0-9]+ ]]; then
+ usage
+fi
+
+if [[ "x${4}" != "x" ]]; then
+ user_bucket_name=${4}
+fi
+
+echo "$(date): Start executing create_bucket.sh ${1} ${2} ${3}"
+
+key=$(grep access_key_id /opt/nextcloud/rclone.conf | awk '{print $3}')
+secret=$(grep secret_access_key /opt/nextcloud/rclone.conf | awk '{print $3}')
+endpoint=$(grep endpoint /opt/nextcloud/rclone.conf | awk '{print $3}')
+preexisting="$(docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:list --output json ${user} | jq -r '.[] | .configuration.bucket' | grep ${bucket})"
+
+if [[ "x${preexisting}" == "x" ]]; then
+ docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:create ${user_bucket_name} \
+ amazons3 -c bucket=${bucket} -c key=${key} -c secret=${secret} -c hostname=${endpoint} -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
+ amazons3::accesskey --user ${user}
+ for shareid in $(docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:export ${user} | jq -r '.[].mount_id'); do
+ docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:option ${shareid} enable_sharing true
+ done
+else
+ echo "$(date): Preexisting: ${preexisting}"
+fi
+echo "$(date): Done executing create_bucket.sh"
diff --git a/templates/scriptreceiver/create_bucket_without_question.sh b/templates/scriptreceiver/create_bucket_without_question.sh
new file mode 100644
index 0000000..12c53f3
--- /dev/null
+++ b/templates/scriptreceiver/create_bucket_without_question.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+key=${1}
+secret=${2}
+endpoint=${3}
+bucket=${4}
+user=${5}
+/usr/local/bin/occ files_external:create "${bucket}" \
+ amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
+ amazons3::accesskey --user "${user}"
+for shareid in $(/usr/local/bin/occ files_external:export "${user}" | jq -r '.[].mount_id'); do
+ /usr/local/bin/occ files_external:option "${shareid}" enable_sharing true
+done
diff --git a/templates/scriptreceiver/ini2json.py b/templates/scriptreceiver/ini2json.py
new file mode 100644
index 0000000..284f3be
--- /dev/null
+++ b/templates/scriptreceiver/ini2json.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python3
+import configparser
+import sys
+import json
+
+config = configparser.ConfigParser()
+str_conf = ""
+for line in sys.stdin.read().splitlines(keepends=True):
+ if not (line.startswith("-") or line.startswith("#")):
+ str_conf += line
+
+config.read_string(str_conf)
+dictionary = {}
+for section in config.sections():
+ dictionary[section] = {}
+ for option in config.options(section):
+ dictionary[section][option] = config.get(section, option)
+print(json.dumps(dictionary))
diff --git a/templates/scriptreceiver/list_files_for_user.erb.sh b/templates/scriptreceiver/list_files_for_user.erb.sh
new file mode 100644
index 0000000..5c23c29
--- /dev/null
+++ b/templates/scriptreceiver/list_files_for_user.erb.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+container=${1}
+user=${2}
+format=${3}
+
+function usage {
+ echo "Usage: ${0} "
+ echo "Example : ${0} nextcloud_app_1 kano@sunet.se"
+ exit 1
+}
+
+if ! [[ ${container} =~ ^nextcloud[a-z]*_app_1$ ]]; then
+ usage
+fi
+
+if [[ "x${user}" == "x" ]]; then
+ usage
+fi
+
+if [[ "x${format}" != "x" ]]; then
+ format="--output ${format}"
+fi
+
+docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:list ${format} ${user}
diff --git a/templates/scriptreceiver/list_users.erb.sh b/templates/scriptreceiver/list_users.erb.sh
new file mode 100644
index 0000000..c470b30
--- /dev/null
+++ b/templates/scriptreceiver/list_users.erb.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+container=${1}
+
+if ! [[ ${container} =~ ^nextcloud[a-z]*_app_1$ ]]; then
+ echo "Usage: ${0} "
+ echo "Example : ${0} nextcloud_app_1"
+ exit 1
+fi
+
+docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ user:list --limit 10000 --output=json_pretty
diff --git a/templates/scriptreceiver/maintenancemode.erb.sh b/templates/scriptreceiver/maintenancemode.erb.sh
new file mode 100755
index 0000000..1e26bf5
--- /dev/null
+++ b/templates/scriptreceiver/maintenancemode.erb.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+state=${1}
+
+if ! [[ ${state} =~ ^(on|off)$ ]]; then
+ echo "Usage: ${0} "
+ exit 1
+fi
+
+for container in $(docker ps | grep docker.sunet.se/drive/nextcloud-custom | grep -v cron | awk '{print $NF}'); do
+ docker exec -ti ${container} su - www-data -s /bin/bash -c "php --define apc.enable_cli=1 /var/www/html/occ maintenance:mode --${state}"
+done
diff --git a/templates/scriptreceiver/purge_backups.erb.sh b/templates/scriptreceiver/purge_backups.erb.sh
new file mode 100755
index 0000000..9c42211
--- /dev/null
+++ b/templates/scriptreceiver/purge_backups.erb.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+thisyear=$(date +%Y)
+lastyear=$(date +%Y -d "last year")
+thismonth=$(date +%m)
+lastmonth=$(date +%m -d "last month")
+
+backupdir=${1}
+
+if ! [[ -d ${backupdir} ]]; then
+ echo "Usage: ${0} "
+ echo "Example: ${0} /opt/mariadb_backup/backups/"
+ exit 1
+fi
+
+cd "${backupdir}"
+# We want to keep:
+# * one backup per year for a year that is not this
+# * one backup per month of this year that is not current month
+# * one backup per day of current month
+for year in $(ls -d 20*/ | sed 's_/$__'); do
+ # If it is not this year
+ if [[ "${year}" != "${thisyear}" ]]; then
+ # We loop jan - nov
+ for month in $(seq -w 1 11); do
+ if [[ -d "${year}/${month}" ]]; then
+ rm -r "${year}/${month}"
+ fi
+ done
+ # If current month is january and we are dealing with last year we skip it
+ if ! ([[ "${thismonth}" == "01" ]] && [[ "${year}" == "${lastyear}" ]]); then
+ for day in $(seq -w 1 30); do
+ if [[ -d "${year}/12/${day}" ]]; then
+ rm -r "${year}/12/$day"
+ fi
+ done
+ fi
+ else
+ # This means it is this year
+ # so we loop each month of this year
+ for month in $(ls -d ${year}/* | sed "s_${year}/__"); do
+ nexttolastdayoflastmonth=$(date -d "${year}-${month}-01 + 1 month - 2 day" +"%d")
+ # If it is not the current month, we delete all days except the last
+ # unless it is january, because then we keep the days around untill february
+ if [[ "${month}" != "${thismonth}" ]]; then
+ for day in $(seq -w 1 ${nexttolastdayoflastmonth}); do
+ if [[ -d "${year}/${month}/${day}" ]]; then
+ rm -r "${year}/${month}/${day}/"
+ fi
+ done
+ fi
+ done
+ fi
+done
+# Finally we remove any empty directories
+find "${backupdir}" -type d -empty -delete
diff --git a/templates/scriptreceiver/restart_sunet_service.erb.sh b/templates/scriptreceiver/restart_sunet_service.erb.sh
new file mode 100644
index 0000000..43d151b
--- /dev/null
+++ b/templates/scriptreceiver/restart_sunet_service.erb.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+service="${1}"
+
+if [[ "x${service}" == "x" ]]; then
+ echo "usage: ${0} "
+ echo "example: ${0} proxysql"
+ exit 1
+fi
+
+systemctl restart sunet-"${service}".service
diff --git a/templates/scriptreceiver/rotatefiles.erb b/templates/scriptreceiver/rotatefiles.erb
new file mode 100644
index 0000000..07174e4
--- /dev/null
+++ b/templates/scriptreceiver/rotatefiles.erb
@@ -0,0 +1,57 @@
+#!/bin/bash
+config="/opt/rotate/files.conf"
+config_dir="/opt/rotate/conf.d"
+found_config="false"
+for file in $(ls ${config} ${config_dir}/*.conf 2>/dev/null); do
+ if [[ -f ${file} ]]; then
+ found_config="true"
+ fi
+done
+
+# Silently quit if there is no config
+if [[ "${found_config}" == "false" ]]; then
+ exit 0
+fi
+
+for row in $(cat ${config} ${config_dir}/*.conf 2>/dev/null | grep -v '^#'); do
+ in_file=$(echo ${row} | awk -F ':' '{print $1}')
+ if ! [[ -f ${in_file} ]]; then
+ echo "ERROR - No such file: ${in_file}"
+ continue
+ fi
+ retention=$(echo ${row} | awk -F ':' '{print $2}')
+ max_size=$(echo ${row} | awk -F ':' '{print $3}')
+ # Retention time in days
+ if [[ "x${retention}" == "x" ]]; then
+ retention=30
+ fi
+ # max_size is maximum size of file in mega bytes before we rotate
+ if [[ "x${max_size}" == "x" ]]; then
+ # Default 256 mb
+ max_size=256
+ fi
+ echo "Retention is: ${retention} days"
+ # Now retention is in seconds
+ retention=$(( retention * 60 * 60 * 24 ))
+ echo "Max size is ${max_size} mb"
+ # Now max_size is in bytes
+ max_size=$(( max_size * 1024 * 1024 ))
+ if [[ $(stat -c %s ${in_file}) -gt ${max_size} ]]; then
+ out_file="${in_file}-$(date +%Y%m%d%H%M%S).gz"
+ echo "Compressing and truncating ${in_file} to ${out_file}"
+ cat ${in_file} | gzip > ${out_file}
+ :> ${in_file}
+ fi
+ now=$(date +%s)
+ for saved_file in $(ls ${in_file}-*.gz 2>/dev/null); do
+ birth_time=$(stat -c %Y ${saved_file})
+ if [[ ${birth_time} -eq 0 ]]; then
+ birth_time=${now}
+ fi
+ keep_until=$(( birth_time + retention))
+ if [[ ${now} -gt ${keep_until} ]]; then
+ echo "removing ${saved_file} since it was created at $(date -d +%Y%m%d%H%M%S @${birth_time}) and should only be saved untill $(date -d +%Y%m%d%H%M%S @${keep_until})"
+ rm ${saved_file}
+ fi
+ done
+done
diff --git a/templates/scriptreceiver/safer_reboot.erb b/templates/scriptreceiver/safer_reboot.erb
new file mode 100644
index 0000000..4a722f2
--- /dev/null
+++ b/templates/scriptreceiver/safer_reboot.erb
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+# Check for running backups:
+ls -d /opt/backupmounts/* > /dev/null 2>&1
+
+if [[ "${?}" == 0 ]]; then
+ echo "Backup in progress, bailing out"
+ exit 0
+fi
+
+# Run docker pull if needed
+[[ $(which docker) ]] && for image in $(docker ps | awk '{print $2}' | grep : | sort -u); do docker pull ${image}; done
+
+# Get sunet specific units
+enabled_units_of_interest="$(systemctl list-unit-files | egrep 'sunet|frontend' | grep -v disabled | awk '{print $1}')"
+
+# Group them so we can handle them in order
+frontend_units=$(echo "${enabled_units_of_interest}" | grep "frontend")
+mariadb_units=$(echo "${enabled_units_of_interest}" | grep "mariadb")
+nextcloud_units=$(echo "${enabled_units_of_interest}" | grep "nextcloud")
+proxysql_units=$(echo "${enabled_units_of_interest}" | grep "proxysql")
+redis_units=$(echo "${enabled_units_of_interest}" | grep "redis")
+
+# Now get the rest of the units that we can do in no particular order
+negative_match_pattern=$(echo -e "${frontend_units}\n${mariadb_units}\n${nextcloud_units}\n${proxysql_units}\n${redis_units}" | sed -z 's/\n/|/g;s/|$/\n/' | sed -e 's/^|//' -e 's/||\+/|/g')
+misc_units=$(echo "${enabled_units_of_interest}" | egrep -v "${negative_match_pattern}")
+
+# This is where we stop the services in the preffered order
+if [[ "x" != "x${frontend_units}" ]]; then
+ for unit in $(echo ${frontend_units}); do
+ systemctl stop ${unit}
+ done
+fi
+if [[ "x" != "x${nextcloud_units}" ]]; then
+ for unit in $(echo ${nextcloud_units}); do
+ systemctl stop ${unit}
+ done
+fi
+if [[ "x" != "x${mariadb_units}" ]]; then
+ for unit in $(echo ${mariadb_units}); do
+ systemctl stop ${unit}
+ done
+fi
+if [[ "x" != "x${proxysql_units}" ]]; then
+ for unit in $(echo ${proxysql_units}); do
+ systemctl stop ${unit}
+ done
+fi
+if [[ "x" != "x${redis_units}" ]]; then
+ for unit in $(echo ${redis_units}); do
+ systemctl stop ${unit}
+ done
+fi
+if [[ "x" != "x${misc_units}" ]]; then
+ for unit in $(echo ${misc_units}); do
+ systemctl stop ${unit}
+ done
+fi
+
+# Install updates
+[[ $(which docker) ]] && apt-mark unhold containerd.io
+apt update
+apt upgrade -y
+[[ $(which docker) ]] && apt-mark hold containerd.io
+
+# Remove lockfiles
+rm /tmp/mkbucket-*.lock &>/dev/null
+rm /tmp/cron-*.lock &>/dev/null
+# Now do the real reboot
+/lib/molly-guard/reboot
diff --git a/templates/scriptreceiver/start_sentinel.erb.sh b/templates/scriptreceiver/start_sentinel.erb.sh
new file mode 100644
index 0000000..9a67067
--- /dev/null
+++ b/templates/scriptreceiver/start_sentinel.erb.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+docker start redis_redis-sentinel_1
+if [[ -z ${1} ]]; then
+ rm /etc/no-automatic-cosmos
+fi
diff --git a/templates/scriptreceiver/stop_sentinel.erb.sh b/templates/scriptreceiver/stop_sentinel.erb.sh
new file mode 100644
index 0000000..29c2532
--- /dev/null
+++ b/templates/scriptreceiver/stop_sentinel.erb.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+if [[ -z ${1} ]]; then
+ touch /etc/no-automatic-cosmos
+fi
+docker stop redis_redis-sentinel_1