Compare commits

..

No commits in common. "main" and "mandersson-nmhost" have entirely different histories.

66 changed files with 490 additions and 901 deletions

View file

@ -1,9 +1,5 @@
#!/bin/bash #!/bin/bash
if ! [[ $(hostname) =~ monitor ]]; then
exit 0
fi
repo="/var/cache/cosmos/repo" repo="/var/cache/cosmos/repo"
common="${repo}/global/overlay/etc/hiera/data/common.yaml" common="${repo}/global/overlay/etc/hiera/data/common.yaml"
@ -25,7 +21,7 @@ for environment in test prod; do
version=$(yq -r ".${key}" "${group}") version=$(yq -r ".${key}" "${group}")
print_fact "${customer}" "${environment}" "${version}" print_fact "${customer}" "${environment}" "${version}"
done done
for customer in $(yq -r '.fullnodes[]' "${common}"); do for customer in $(yq -r '.fullnodes[]' "${common}") gss; do
group="${repo}/${customer}-common/overlay/etc/hiera/data/group.yaml" group="${repo}/${customer}-common/overlay/etc/hiera/data/group.yaml"
version=$(yq -r ".${key}" "${group}") version=$(yq -r ".${key}" "${group}")
print_fact "${customer}" "${environment}" "${version}" print_fact "${customer}" "${environment}" "${version}"

View file

@ -1,8 +1,14 @@
# Lets determin who the customer is by looking at the hostname # Lets determin who the customer is by looking at the hostname
function sunetdrive::get_customer() >> String { function sunetdrive::get_customer() >> String {
$hostnameparts = split($facts['networking']['fqdn'],'\.') $hostnameparts = split($facts['fqdn'],'\.')
if $hostnameparts[1] == 'drive' { if $hostnameparts[1] == 'drive' {
return 'common' if $hostnameparts[0] =~ /^gss/ {
return 'gss'
} elsif $hostnameparts[0] =~ /^lookup/ {
return 'lookup'
} else {
return 'common'
}
} elsif $hostnameparts[0] =~ /idp-proxy/ { } elsif $hostnameparts[0] =~ /idp-proxy/ {
return 'common' return 'common'
} }

View file

@ -1,6 +1,6 @@
# Lets determin where we are by looking at the hostname # Lets determin where we are by looking at the hostname
function sunetdrive::get_environment() >> String { function sunetdrive::get_environment() >> String {
$hostname = $facts['networking']['fqdn'] $hostname = $facts['fqdn']
if $hostname =~ /^.*\.drive\.sunet\.se$/ { if $hostname =~ /^.*\.drive\.sunet\.se$/ {
if $hostname =~ /^.*\.pilot\.drive\.sunet\.se$/ { if $hostname =~ /^.*\.pilot\.drive\.sunet\.se$/ {
return 'pilot' return 'pilot'

View file

@ -1,4 +1,4 @@
# Lets determin where we are by looking at the hostname # Lets determin where we are by looking at the hostname
function sunetdrive::get_node_number() >> Integer { function sunetdrive::get_node_number() >> Integer {
Integer(regsubst($facts['networking']['fqdn'], /^[a-zA-Z\-]+(\d).*$/, '\\1')) Integer(regsubst($::fqdn, /^[a-zA-Z\-]+(\d).*$/, '\\1'))
} }

View file

@ -5,7 +5,6 @@ define sunetdrive::app_type (
$override_config = undef, $override_config = undef,
$override_compose = undef $override_compose = undef
) { ) {
include sunet::packages::netcat_openbsd
# Config from group.yaml and customer specific conf # Config from group.yaml and customer specific conf
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$customer = sunetdrive::get_customer() $customer = sunetdrive::get_customer()
@ -15,6 +14,7 @@ define sunetdrive::app_type (
# The config used # The config used
$config = $override_config $config = $override_config
# Other settings # Other settings
$admin_password = $config[ 'admin_password' ]
$dbhost = $config[ 'dbhost' ] $dbhost = $config[ 'dbhost' ]
$dbname = $config[ 'dbname' ] $dbname = $config[ 'dbname' ]
$dbuser = $config[ 'dbuser' ] $dbuser = $config[ 'dbuser' ]
@ -30,10 +30,10 @@ define sunetdrive::app_type (
$config = hiera_hash($environment) $config = hiera_hash($environment)
$skeletondirectory = $config['skeletondirectory'] $skeletondirectory = $config['skeletondirectory']
# Other settings # Other settings
$admin_password = safe_hiera('admin_password')
$dbhost = 'proxysql_proxysql_1' $dbhost = 'proxysql_proxysql_1'
$dbname = 'nextcloud' $dbname = 'nextcloud'
$dbuser = 'nextcloud' $dbuser = 'nextcloud'
$full_backup_retention = hiera('full_backup_retention')
$instanceid = safe_hiera('instanceid') $instanceid = safe_hiera('instanceid')
$mysql_user_password = safe_hiera('mysql_user_password') $mysql_user_password = safe_hiera('mysql_user_password')
$passwordsalt = safe_hiera('passwordsalt') $passwordsalt = safe_hiera('passwordsalt')
@ -43,8 +43,8 @@ define sunetdrive::app_type (
$s3_secret = safe_hiera('s3_secret') $s3_secret = safe_hiera('s3_secret')
$secret = safe_hiera('secret') $secret = safe_hiera('secret')
} }
$twofactor_enforced_groups = $config['twofactor_enforced_groups'] $twofactor_enforced_groups = hiera_array('twofactor_enforced_groups')
$twofactor_enforced_excluded_groups = $config['twofactor_enforced_excluded_groups'] $twofactor_enforced_excluded_groups = hiera_array('twofactor_enforced_excluded_groups')
$nextcloud_version = hiera("nextcloud_version_${environment}") $nextcloud_version = hiera("nextcloud_version_${environment}")
$nextcloud_version_string = split($nextcloud_version, '[-]')[0] $nextcloud_version_string = split($nextcloud_version, '[-]')[0]
# Common settings for multinode and full nodes # Common settings for multinode and full nodes
@ -61,16 +61,20 @@ define sunetdrive::app_type (
$php_memory_limit_mb = 512 $php_memory_limit_mb = 512
} }
if $::facts['dockerhost2'] == 'yes' { if $::facts['dockerhost2'] == 'yes' {
$hostnet = true $hostnet = 'yes'
} }
# These are encrypted values from local.eyaml # These are encrypted values from local.eyaml
$gss_jwt_key = safe_hiera('gss_jwt_key')
$smtppassword = safe_hiera('smtp_password') $smtppassword = safe_hiera('smtp_password')
#These are global values from common.yaml #These are global values from common.yaml
$gs_enabled = hiera('gs_enabled') $gs_enabled = hiera('gs_enabled')
$gs_federation = hiera('gs_federation') $gs_federation = hiera('gs_federation')
$gss_master_admin = hiera_array('gss_master_admin')
$gss_master_url = hiera("gss_master_url_${environment}")
$lookup_server = hiera("lookup_server_${environment}")
$mail_domain = hiera("mail_domain_${environment}") $mail_domain = hiera("mail_domain_${environment}")
$mail_smtphost = hiera("mail_smtphost_${environment}") $mail_smtphost = hiera("mail_smtphost_${environment}")
$mail_from_address = hiera("mail_from_address_${environment}") $mail_from_address = hiera("mail_from_address_${environment}")
@ -78,6 +82,9 @@ define sunetdrive::app_type (
$smtpuser = hiera("smtp_user_${environment}") $smtpuser = hiera("smtp_user_${environment}")
$tug_office = hiera_array('tug_office') $tug_office = hiera_array('tug_office')
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
$gss_mode = hiera('gss_mode')
# These are global values from common.yaml but can be overridden in group.yaml # These are global values from common.yaml but can be overridden in group.yaml
$drive_email_template_text_left = $config['drive_email_template_text_left'] $drive_email_template_text_left = $config['drive_email_template_text_left']
$drive_email_template_plain_text_left = $config['drive_email_template_plain_text_left'] $drive_email_template_plain_text_left = $config['drive_email_template_plain_text_left']
@ -85,39 +92,9 @@ define sunetdrive::app_type (
$lb_servers = hiera_hash($environment)['lb_servers'] $lb_servers = hiera_hash($environment)['lb_servers']
$document_servers = hiera_hash($environment)['document_servers'] $document_servers = hiera_hash($environment)['document_servers']
# Calculate some values
$expiration_months = max(12, $full_backup_retention)
$expiration_days_min = $expiration_months * 31
$expiration_days_max = $expiration_months * 31 + 93
unless $is_multinode{ unless $is_multinode{
user { 'www-data': ensure => present, system => true } user { 'www-data': ensure => present, system => true }
file { '/usr/local/bin/get_containers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/get_containers'),
mode => '0744',
}
if ($nodenumber == 3) {
file { '/usr/local/bin/scan_external_mounts':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/scan_external_mounts.sh'),
mode => '0744',
}
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
cmd => '/usr/local/bin/scan_external_mounts',
hour => '1',
minute => '20',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
}
file { '/opt/nextcloud/cron.sh': file { '/opt/nextcloud/cron.sh':
ensure => file, ensure => file,
owner => 'root', owner => 'root',
@ -131,22 +108,16 @@ define sunetdrive::app_type (
minute => '*/5', minute => '*/5',
} }
file { '/opt/nextcloud/user-sync.sh': file { '/opt/nextcloud/user-sync.sh':
ensure => absent,
}
file { '/usr/local/bin/nocc':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/nocc.erb'),
mode => '0740',
}
file { '/etc/sudoers.d/99-nocc':
ensure => file, ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
mode => '0440',
owner => 'root', owner => 'root',
group => 'root', group => 'root',
mode => '0700',
content => template('sunetdrive/application/user-sync.erb.sh'),
}
-> cron { 'gss_user_sync':
command => '/opt/nextcloud/user-sync.sh',
user => 'root',
minute => '*/5',
} }
file { '/usr/local/bin/occ': file { '/usr/local/bin/occ':
ensure => present, ensure => present,
@ -171,21 +142,12 @@ define sunetdrive::app_type (
content => template('sunetdrive/application/upgrade23-25.erb.sh'), content => template('sunetdrive/application/upgrade23-25.erb.sh'),
mode => '0744', mode => '0744',
} }
file { '/usr/local/bin/remount_user_bucket_as_project.sh':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/remount_user_bucket_as_project.sh'),
mode => '0744',
}
file { '/opt/rotate/conf.d/nextcloud.conf': file { '/opt/rotate/conf.d/nextcloud.conf':
ensure => file, ensure => file,
force => true, force => true,
owner => 'root', owner => 'root',
group => 'root', group => 'root',
content => "#This file is managed by puppet content => "#This file is managed by puppet\n#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n",
#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n/opt/nextcloud/audit.log:180:256\n",
mode => '0644', mode => '0644',
} }
file { '/opt/rotate/conf.d/redis.conf': file { '/opt/rotate/conf.d/redis.conf':
@ -234,14 +196,7 @@ define sunetdrive::app_type (
force => true, force => true,
owner => 'www-data', owner => 'www-data',
group => 'root', group => 'root',
mode => '0640', mode => '0644',
}
file { '/opt/nextcloud/audit.log':
ensure => file,
force => true,
owner => 'www-data',
group => 'root',
mode => '0640',
} }
file { '/opt/nextcloud/rclone.conf': file { '/opt/nextcloud/rclone.conf':
ensure => file, ensure => file,
@ -338,7 +293,23 @@ define sunetdrive::app_type (
mode => '0744', mode => '0744',
} }
} }
if $location =~ /^kau/ { if $location =~ /^gss-test/ {
file { '/opt/nextcloud/mappingfile.json':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/application/mappingfile-test.json.erb'),
mode => '0644',
}
} elsif $location =~ /^gss/ {
file { '/opt/nextcloud/mappingfile.json':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/application/mappingfile-prod.json.erb'),
mode => '0644',
}
} elsif $location =~ /^kau/ {
file { '/mnt': file { '/mnt':
ensure => directory, ensure => directory,
owner => 'www-data', owner => 'www-data',

View file

@ -38,8 +38,8 @@ define sunetdrive::cloudimage(
resolver => ['130.242.80.14', '130.242.80.99'], resolver => ['130.242.80.14', '130.242.80.99'],
search => $search, search => $search,
# #
repo => $facts['cosmos_repo_origin_url'], repo => $::cosmos_repo_origin_url,
tagpattern => $facts['cosmos_tag_pattern'], tagpattern => $::cosmos_tag_pattern,
# #
cpus => $cpus, cpus => $cpus,
memory => $memory, memory => $memory,

View file

@ -1,53 +0,0 @@
# Common class
class sunetdrive::common {
include sunet::tools
include sunet::motd
include apt
include apparmor
include sunet::packages::jq
if $::facts['sunet_nftables_enabled'] != 'yes' {
warning('Enabling UFW')
include ufw
} else {
if $facts['networking']['hostname'] =~ /^kube[wc]/ {
warning('Setting nftables to installed but disabled')
ensure_resource ('class','sunet::nftables::init', { enabled => false })
} else {
warning('Enabling nftables')
ensure_resource ('class','sunet::nftables::init', { })
}
}
package {'sysstat': ensure => 'latest'}
package {'needrestart': ensure => installed}
service {'sysstat': provider => 'systemd'}
file_line { 'enable_sa':
ensure => 'present',
line => 'ENABLED="true"',
path => '/etc/default/sysstat',
match => 'ENABLED="false"',
require => Package['sysstat'],
}
file_line { 'sa_cron_comment':
ensure => 'present',
line => '# Activity reports every 2 minutes everyday',
path => '/etc/cron.d/sysstat',
match => '^#\ Activity\ reports\ every\ 10\ minutes\ everyday',
require => Package['sysstat'],
notify => Service['sysstat'],
}
file_line { 'sa_cron':
ensure => 'present',
line => '*/2 * * * * root command -v debian-sa1 > /dev/null && debian-sa1 1 1',
path => '/etc/cron.d/sysstat',
match => '^5-55/10',
require => Package['sysstat'],
notify => Service['sysstat'],
}
if $::facts['os']['distro']['id'] == 'Debian' {
exec { 'sysstat_systemd_timer':
command => 'systemctl enable --now sysstat-collect.timer',
unless => 'systemctl is-enabled --quiet sysstat-collect.timer',
}
}
}

View file

@ -18,7 +18,7 @@ define sunetdrive::db_type(
$mariadb_dir = '/etc/mariadb' $mariadb_dir = '/etc/mariadb'
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb' $mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
$server_id = 1000 + Integer($facts['networking']['hostname'][-1]) $server_id = 1000 + Integer($facts['networking']['hostname'][-1])
ensure_resource('file',$mariadb_dir, { ensure => directory } ) ensure_resource('file',$mariadb_dir, { ensure => directory, recurse => true } )
$dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ] $dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
$dirs.each |$dir| { $dirs.each |$dir| {
ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } ) ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
@ -33,7 +33,7 @@ define sunetdrive::db_type(
$ports = [3306, 4444, 4567, 4568] $ports = [3306, 4444, 4567, 4568]
if $location =~ /^multinode/ { if $location =~ /^multinode/ {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['kube'] + $config['kube_v6'] $from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['kube'] + $config['kube_v6']
} elsif $location == 'sunet-test' or $location == 'sunet-prod' { } elsif $location == 'sunet-test' {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6'] $from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6']
} else { } else {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 $from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6
@ -46,7 +46,11 @@ define sunetdrive::db_type(
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' } sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql'] if $location =~ /^lookup/ {
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '05-lookup.sql']
} else {
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql']
}
$sql_files.each |$sql_file|{ $sql_files.each |$sql_file|{
file { "${mariadb_dir}/init/${sql_file}": file { "${mariadb_dir}/init/${sql_file}":
ensure => present, ensure => present,
@ -86,11 +90,6 @@ define sunetdrive::db_type(
ok_criteria => ['exit_status=0','max_age=2d'], ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'], warn_criteria => ['exit_status=1','max_age=3d'],
} }
file { '/usr/local/bin/mysql':
ensure => present,
content => template('sunetdrive/mariadb/mysql.erb.sh'),
mode => '0744',
}
file { '/usr/local/bin/size-test': file { '/usr/local/bin/size-test':
ensure => present, ensure => present,
content => template('sunetdrive/mariadb/size-test.erb'), content => template('sunetdrive/mariadb/size-test.erb'),

View file

@ -1,3 +1,4 @@
include apt
# Wrapper for sunet::dockerhost to do thiss specific things # Wrapper for sunet::dockerhost to do thiss specific things
class sunetdrive::dockerhost( class sunetdrive::dockerhost(
String $version = safe_hiera('docker_version'), String $version = safe_hiera('docker_version'),

View file

@ -4,8 +4,10 @@ class sunetdrive::infra_script (
$location = undef $location = undef
) { ) {
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$customer = 'common' $customer = "common"
$config = hiera_hash($environment) $config = hiera_hash($environment)
$gss_backup_server = $config['gss_backup_server']
$lookup_backup_server = $config['lookup_backup_server']
$ssh_config = "Host *.sunet.se $ssh_config = "Host *.sunet.se
User script User script
IdentityFile /root/.ssh/id_script" IdentityFile /root/.ssh/id_script"
@ -57,16 +59,14 @@ class sunetdrive::infra_script (
mode => '0700', mode => '0700',
} }
sunet::scriptherder::cronjob { 'backupgssdb': sunet::scriptherder::cronjob { 'backupgssdb':
ensure => 'absent', cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
cmd => 'true',
hour => '2', hour => '2',
minute => '0', minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'], ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'], warn_criteria => ['exit_status=1','max_age=3d'],
} }
sunet::scriptherder::cronjob { 'backuplookupdb': sunet::scriptherder::cronjob { 'backuplookupdb':
ensure => 'absent', cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
cmd => 'true',
hour => '2', hour => '2',
minute => '0', minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'], ok_criteria => ['exit_status=0','max_age=2d'],

42
manifests/lookup.pp Normal file
View file

@ -0,0 +1,42 @@
#Class for SUNET-Drive-Lookup-Server
class sunetdrive::lookup (
$bootstrap = undef,
$location = undef
) {
$environment = sunetdrive::get_environment()
# Firewall settings
$nextcloud_ip = hiera_array("${location}_app", [])
$tug_office = hiera_array('tug_office')
$dbhost = 'proxysql_proxysql_1'
$gss_jwt_key = safe_hiera('gss_jwt_key')
$mysql_user_password = safe_hiera('mysql_user_password')
$lookup_version = hiera("lookup_version_${environment}")
#Create users
user { 'www-data': ensure => present, system => true }
file { '/opt/lookup/config.php':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/lookup/config.php.erb'),
mode => '0644',
}
sunet::docker_compose { 'drive_lookup_docker_compose':
content => template('sunetdrive/lookup/docker-compose_lookup.yml.erb'),
service_name => 'lookup',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Lookup server',
}
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
}
}

View file

@ -10,9 +10,9 @@ class sunetdrive::mariadb (
$quorum_id = $facts['networking']['fqdn'] $quorum_id = $facts['networking']['fqdn']
$quorum_password = safe_hiera('quorum_password') $quorum_password = safe_hiera('quorum_password')
$db = sunetdrive::db_type { 'base_db': $db = sunetdrive::db_type { 'base_db':
bootstrap => $bootstrap, bootstrap => $bootstrap,
tag_mariadb => $tag_mariadb, tag_mariadb => $tag_mariadb,
location => $location, location => $location,
} }
file { '/etc/quorum.conf': file { '/etc/quorum.conf':
ensure => file, ensure => file,

View file

@ -1,13 +1,11 @@
# This is a asyncronous replica of the Maria DB Cluster for SUNET Drive # This is a asyncronous replica of the Maria DB Cluster for SUNET Drive
class sunetdrive::mariadb_backup($tag_mariadb=undef, $location=undef) { class sunetdrive::mariadb_backup($tag_mariadb=undef, $location=undef) {
include sunet::packages::netcat_openbsd
$dirs = [ 'datadir', 'init', 'conf', 'backups' ] $dirs = [ 'datadir', 'init', 'conf', 'backups' ]
$dirs.each | $dir | { $dirs.each | $dir | {
ensure_resource('file',"/opt/mariadb_backup/${dir}", { ensure => directory, recurse => true } ) ensure_resource('file',"/opt/mariadb_backup/${dir}", { ensure => directory, recurse => true } )
} }
# Config from group.yaml # Config from group.yaml
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$mariadb_version = hiera("mariadb_version_${environment}")
$config = hiera_hash($environment) $config = hiera_hash($environment)
$first_db = $config['first_db'] $first_db = $config['first_db']

View file

@ -5,7 +5,7 @@ class sunetdrive::multinode (
) )
{ {
include sunet::packages::yq include sunet::packages::yq
$myname = $facts['networking']['hostname'] $myname = $facts['hostname']
$is_multinode = true; $is_multinode = true;
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$lb_servers = hiera_hash($environment)['lb_servers'] $lb_servers = hiera_hash($environment)['lb_servers']
@ -14,6 +14,8 @@ class sunetdrive::multinode (
$db_ip = hiera_hash($environment)['db'] $db_ip = hiera_hash($environment)['db']
$admin_password = hiera('admin_password') $admin_password = hiera('admin_password')
$cluster_admin_password = hiera('cluster_admin_password') $cluster_admin_password = hiera('cluster_admin_password')
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
$gss_mode = hiera('gss_mode')
$twofactor_enforced_groups = [] $twofactor_enforced_groups = []
$twofactor_enforced_excluded_groups = [] $twofactor_enforced_excluded_groups = []
@ -38,44 +40,6 @@ class sunetdrive::multinode (
user { 'www-data': ensure => present, system => true } user { 'www-data': ensure => present, system => true }
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' } sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
ensure_resource('file', '/opt/nextcloud' , { ensure => directory, recurse => true } ) ensure_resource('file', '/opt/nextcloud' , { ensure => directory, recurse => true } )
file { '/usr/local/bin/get_containers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/get_containers'),
mode => '0744',
}
file { '/usr/local/bin/scan_external_mounts':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/scan_external_mounts.sh'),
mode => '0744',
}
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
cmd => '/usr/local/bin/scan_external_mounts',
hour => '1',
minute => '20',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
file { '/usr/local/bin/nocc':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/nocc.erb'),
mode => '0740',
}
file { '/etc/sudoers.d/99-nocc':
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
mode => '0440',
owner => 'root',
group => 'root',
}
file { '/usr/local/bin/occ': file { '/usr/local/bin/occ':
ensure => present, ensure => present,
force => true, force => true,
@ -110,6 +74,14 @@ class sunetdrive::multinode (
content => template('sunetdrive/multinode/get_non_paying_customers.erb.sh'), content => template('sunetdrive/multinode/get_non_paying_customers.erb.sh'),
mode => '0744', mode => '0744',
} }
file { '/usr/local/bin/get_containers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/multinode/get_containers'),
mode => '0744',
}
file { '/usr/local/bin/restart_and_prune': file { '/usr/local/bin/restart_and_prune':
ensure => present, ensure => present,
force => true, force => true,
@ -239,36 +211,26 @@ MACAddressPolicy=none'
hour => '0', hour => '0',
weekday => '0', weekday => '0',
} }
# if $nodenumber == '2' { if $nodenumber == '2' {
# cron { 'add_back_bucket_for_karin_nordgren': cron { 'add_back_bucket_for_karin_nordgren':
# command => '(/usr/local/bin/occ nextcloud-kmh-app-1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh-app-1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true', command => '(/usr/local/bin/occ nextcloud-kmh_app_1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh_app_1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true',
# user => 'root', user => 'root',
# minute => '*/10', minute => '*/10',
# } }
# } }
$customers.each | $index, $customer | { $customers.each | $index, $customer | {
$customer_config_full = hiera_hash($customer)
$customer_config = $customer_config_full[$environment]
cron { "multinode_cron_${customer}": cron { "multinode_cron_${customer}":
command => "/opt/nextcloud/cron.sh nextcloud-${customer}-app-1", command => "/opt/nextcloud/cron.sh nextcloud-${customer}_app_1",
require => File['/opt/nextcloud/cron.sh'], require => File['/opt/nextcloud/cron.sh'],
user => 'root', user => 'root',
minute => '*/10', minute => '*/10',
} }
if $environment == 'prod' { if $environment == 'prod' {
if 'primary_bucket' in $customer_config.keys() { $s3_bucket = "primary-${customer}-drive.sunet.se"
$s3_bucket = $customer_config['primary_bucket']
} else {
$s3_bucket = "primary-${customer}-drive.sunet.se"
}
$site_name = "${customer}.drive.sunet.se" $site_name = "${customer}.drive.sunet.se"
$trusted_proxies = ['lb1.drive.sunet.se','lb2.drive.sunet.se', 'lb3.drive.sunet.se', 'lb4.drive.sunet.se'] $trusted_proxies = ['lb1.drive.sunet.se','lb2.drive.sunet.se', 'lb3.drive.sunet.se', 'lb4.drive.sunet.se']
} else { } else {
if 'primary_bucket' in $customer_config.keys() { $s3_bucket = "primary-${customer}-${environment}.sunet.se"
$s3_bucket = $customer_config['primary_bucket']
} else {
$s3_bucket = "primary-${customer}-${environment}.sunet.se"
}
$site_name = "${customer}.drive.${environment}.sunet.se" $site_name = "${customer}.drive.${environment}.sunet.se"
$trusted_proxies = ["lb1.drive.${environment}.sunet.se","lb2.drive.${environment}.sunet.se", $trusted_proxies = ["lb1.drive.${environment}.sunet.se","lb2.drive.${environment}.sunet.se",
"lb3.drive.${environment}.sunet.se","lb4.drive.${environment}.sunet.se"] "lb3.drive.${environment}.sunet.se","lb4.drive.${environment}.sunet.se"]
@ -277,6 +239,8 @@ MACAddressPolicy=none'
$apache_error_path = "/opt/multinode/${customer}/404.html" $apache_error_path = "/opt/multinode/${customer}/404.html"
$config_php_path = "/opt/multinode/${customer}/config.php" $config_php_path = "/opt/multinode/${customer}/config.php"
$cron_log_path ="/opt/multinode/${customer}/cron.log" $cron_log_path ="/opt/multinode/${customer}/cron.log"
$customer_config_full = hiera_hash($customer)
$customer_config = $customer_config_full[$environment]
$dbhost = 'proxysql_proxysql_1' $dbhost = 'proxysql_proxysql_1'
$dbname = "nextcloud_${customer}" $dbname = "nextcloud_${customer}"
@ -284,12 +248,14 @@ MACAddressPolicy=none'
$gs_enabled = hiera('gs_enabled') $gs_enabled = hiera('gs_enabled')
$gs_federation = hiera('gs_federation') $gs_federation = hiera('gs_federation')
$gss_master_admin = hiera_array('gss_master_admin')
$gss_master_url = hiera("gss_master_url_${environment}")
$https_port = hiera_hash('multinode_mapping')[$customer]['port'] $https_port = hiera_hash('multinode_mapping')[$customer]['port']
$lookup_server = hiera("lookup_server_${environment}")
$mail_domain = hiera("mail_domain_${environment}") $mail_domain = hiera("mail_domain_${environment}")
$mail_from_address = hiera("mail_from_address_${environment}") $mail_from_address = hiera("mail_from_address_${environment}")
$mail_smtphost = hiera("mail_smtphost_${environment}") $mail_smtphost = hiera("mail_smtphost_${environment}")
$nextcloud_log_path ="/opt/multinode/${customer}/nextcloud.log" $nextcloud_log_path ="/opt/multinode/${customer}/nextcloud.log"
$audit_log_path ="/opt/multinode/${customer}/audit.log"
if $customer_config['nextcloud_version'] { if $customer_config['nextcloud_version'] {
$nextcloud_version = $customer_config['nextcloud_version'] $nextcloud_version = $customer_config['nextcloud_version']
} else { } else {
@ -299,16 +265,11 @@ MACAddressPolicy=none'
$rclone_conf_path = "/opt/multinode/${customer}/rclone.conf" $rclone_conf_path = "/opt/multinode/${customer}/rclone.conf"
$redis_conf_dir = "/opt/multinode/${customer}/server" $redis_conf_dir = "/opt/multinode/${customer}/server"
$redis_conf_path = "${redis_conf_dir}/redis.conf" $redis_conf_path = "${redis_conf_dir}/redis.conf"
if $::facts['sunet_nftables_enabled'] == 'yes' { $redis_host= "redis-${customer}_redis-server_1"
$redis_host= "redis-${customer}-redis-server-1"
} else {
$redis_host= "redis-${customer}_redis-server_1"
}
$s3_host = $customer_config['s3_host'] $s3_host = $customer_config['s3_host']
$s3_usepath = hiera('s3_usepath') $s3_usepath = hiera('s3_usepath')
$smtpuser = hiera("smtp_user_${environment}") $smtpuser = hiera("smtp_user_${environment}")
$trusted_domains = [$site_name, $facts['networking']['fqdn'], 'localhost'] $trusted_domains = [$site_name, $facts['fqdn'], 'localhost']
$tug_office = hiera_array('tug_office') $tug_office = hiera_array('tug_office')
if $customer_config['twofactor_enforced_groups'] { if $customer_config['twofactor_enforced_groups'] {
$twofactor_enforced_groups = $customer_config['twofactor_enforced_groups'] $twofactor_enforced_groups = $customer_config['twofactor_enforced_groups']
@ -316,15 +277,6 @@ MACAddressPolicy=none'
if $customer_config['twofactor_enforced_excluded_groups'] { if $customer_config['twofactor_enforced_excluded_groups'] {
$twofactor_enforced_excluded_groups = $customer_config['twofactor_enforced_excluded_groups'] $twofactor_enforced_excluded_groups = $customer_config['twofactor_enforced_excluded_groups']
} }
if $customer_config['full_backup_retention'] {
$full_backup_retention = $customer_config['full_backup_retention']
} else {
$full_backup_retention = hiera('full_backup_retention')
}
# Calculate some values
$expiration_months = max(12, $full_backup_retention)
$expiration_days_min = $expiration_months * 31
$expiration_days_max = $expiration_months * 31 + 93
# Secrets from local.eyaml # Secrets from local.eyaml
$admin_password = safe_hiera("${customer}_admin_password") $admin_password = safe_hiera("${customer}_admin_password")
@ -337,6 +289,7 @@ MACAddressPolicy=none'
$secret = safe_hiera("${customer}_secret") $secret = safe_hiera("${customer}_secret")
$passwordsalt= safe_hiera("${customer}_passwordsalt") $passwordsalt= safe_hiera("${customer}_passwordsalt")
$redis_host_password = safe_hiera("${customer}_redis_host_password") $redis_host_password = safe_hiera("${customer}_redis_host_password")
$gss_jwt_key = safe_hiera('gss_jwt_key')
$smtppassword = safe_hiera('smtp_password') $smtppassword = safe_hiera('smtp_password')
$extra_config = { $extra_config = {
@ -423,13 +376,6 @@ MACAddressPolicy=none'
group => 'root', group => 'root',
mode => '0644', mode => '0644',
} }
file { $audit_log_path:
ensure => file,
force => true,
owner => 'www-data',
group => 'root',
mode => '0644',
}
file { $rclone_conf_path: file { $rclone_conf_path:
ensure => present, ensure => present,
owner => 'www-data', owner => 'www-data',
@ -445,18 +391,10 @@ MACAddressPolicy=none'
content => template('sunetdrive/multinode/complete_reinstall.erb.sh'), content => template('sunetdrive/multinode/complete_reinstall.erb.sh'),
mode => '0744', mode => '0744',
} }
if $::facts['sunet_nftables_enabled'] == 'yes' { # Open ports
$name = "https_port_${customer}" sunet::misc::ufw_allow { "https_port_${customer}":
ensure_resource('sunet::nftables::ufw_allow_compat', $name, { from => '0.0.0.0',
from => ['0.0.0.0/0', '::/0'], port => $https_port,
port => $https_port,
})
} else {
# Open ports
sunet::misc::ufw_allow { "https_port_${customer}":
from => '0.0.0.0',
port => $https_port,
}
} }
} }
} }

View file

@ -14,61 +14,4 @@ class sunetdrive::multinode_db(){
mode => '0744', mode => '0744',
} }
} }
if $facts["networking"]["fqdn"] =~ /^multinode-db1\.drive\.(test\.){1}sunet\.se$/ {
$statistics_secret = safe_hiera('statistics_secret')
notify { 'hostmessage':
message => 'We are on multinode-db1. Set up statistics environment.',
}
$custdata=$customers.reduce({}) |$memo, $value| {
$memo + {$value => lookup($value)}
}
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
$local_path = '/tmp/rclone-current-linux-amd64.deb'
exec { 'rclone_deb':
command => "/usr/bin/wget -q ${rclone_url} -O ${local_path}",
creates => $local_path,
}
package { 'rclone':
ensure => installed,
provider => dpkg,
source => $local_path,
require => Exec['rclone_deb'],
}
file { '/root/.rclone.conf':
ensure => file,
content => template('sunetdrive/mariadb_backup/rclone.conf.erb'),
owner => 'root',
group => 'root',
mode => '0600',
}
file { '/root/tasks/listusersbydep.sh':
ensure => file,
content => template('sunetdrive/mariadb/listusersdep.sh.erb'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/genusersondepartmentlists.sh':
ensure => file,
content => template('sunetdrive/mariadb/genuserdeplists.sh.erb'),
owner => 'root',
group => 'root',
mode => '0700',
}
file {'/opt/mariadb/statistics/custdata.json':
ensure => file,
content => template('sunetdrive/mariadb/custconfig.json.erb'),
owner => 'root',
group => 'root',
mode => '0600',
}
sunet::scriptherder::cronjob { 'genuserdeplists':
cmd => '/root/tasks/genusersondepartmentlists.sh',
hour => '2',
minute => '5',
ok_criteria => ['exit_status=0','max_age=30h'],
warn_criteria => ['exit_status=1', 'max_age=60h'],
}
}
} }

View file

@ -1,79 +0,0 @@
# NRPE class
class sunetdrive::nrpe(
$loadw = '15,10,5',
$loadc = '30,25,20',
$procsw = 150,
$procsc = 200,
) {
require apt
class { 'sunet::nagios':
command_timeout => 600,
loadw => $loadw,
loadc => $loadc,
procsw => $procsw,
procsc => $procsc,
}
package {'nagios-plugins-contrib': ensure => latest}
if ($facts['os']['name'] == 'Ubuntu' and versioncmp($facts['os']['release']['full'], '22.04') >= 0 ){
$mem_w = '90'
$mem_c = '95'
} else {
$mem_w = '10'
$mem_c = '5'
}
$checks = ['nrpe_check_memory']
$checks.each |$check| {
ensure_resource("sunet::nagios::${check}", "nagios-nrpe-${check}")
}
sunet::nagios::nrpe_command {'check_entropy':
command_line => '/usr/lib/nagios/plugins/check_entropy -w 256'
}
sunet::nagios::nrpe_command {'check_ntp_time':
command_line => '/usr/lib/nagios/plugins/check_ntp_time -H ntp.se'
}
sunet::nagios::nrpe_command {'check_scriptherder':
command_line => '/usr/local/bin/scriptherder --mode check'
}
sunet::sudoer {'nagios_run_needrestart_command':
user_name => 'nagios',
collection => 'nrpe_needrestart_check',
command_line => '/usr/sbin/needrestart -p -l'
}
sunet::sudoer {'nagios_run_galera_command':
user_name => 'nagios',
collection => 'nrpe_galera_check',
command_line => '/usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
}
sunet::nagios::nrpe_command {'check_galera_cluster':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
}
sunet::sudoer {'nagios_run_proxysql_command':
user_name => 'nagios',
collection => 'nrpe_proxysql_check',
command_line => '/usr/lib/nagios/plugins/check_proxysql_server, /usr/lib/nagios/plugins/check_mysql_server_status'
}
sunet::nagios::nrpe_command {'check_proxysql_server':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_proxysql_server'
}
sunet::sudoer {'nagios_run_replication_command':
user_name => 'nagios',
collection => 'nrpe_replication_check',
command_line => '/usr/local/bin/check_replication'
}
sunet::nagios::nrpe_command {'check_async_replication':
command_line => '/usr/bin/sudo /usr/local/bin/check_replication'
}
sunet::sudoer {'nagios_run_backup_command':
user_name => 'nagios',
collection => 'nrpe_backup_check',
command_line => '/usr/local/bin/check_backups'
}
sunet::nagios::nrpe_command {'check_backups':
command_line => '/usr/bin/sudo /usr/local/bin/check_backups'
}
sunet::nagios::nrpe_command {'check_mysql_server_status':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_mysql_server_status'
}
}

View file

@ -16,14 +16,14 @@ class sunetdrive::onlyoffice () {
command => "docker login registry.gitlab.collabora.com -u ${token_name} -p ${access_token}", command => "docker login registry.gitlab.collabora.com -u ${token_name} -p ${access_token}",
} }
sunet::collabora::docs { 'sunet-onlyoffice': sunet::collabora::docs { 'sunet-onlyoffice':
dns => [ '89.32.32.32' ], dns => [ '89.32.32.32' ],
extra_hosts => $extra_hosts, extra_hosts => $extra_hosts,
extra_volumes => ['/opt/collabora/coolwsd.xml:/etc/coolwsd/coolwsd.xml'], extra_volumes => ['/opt/collabora/coolwsd.xml:/etc/coolwsd/coolwsd.xml'],
docker_tag => $docker_tag, docker_tag => $docker_tag,
docker_image => 'registry.gitlab.collabora.com/productivity/collabora-online-nc', docker_image => 'registry.gitlab.collabora.com/productivity/collabora-online-nc',
} }
file {'/opt/collabora/coolwsd.xml': file {'/opt/collabora/coolwsd.xml':
ensure => present, ensure => present,
content => template('sunetdrive/document/coolwsd.xml.erb'), content => template('sunetdrive/document/coolwsd.xml.erb'),
} }
} }

View file

@ -4,7 +4,6 @@ class sunetdrive::proxysql (
$location = undef, $location = undef,
$proxysql_container_name = 'proxysql_proxysql_1', $proxysql_container_name = 'proxysql_proxysql_1',
$manage_config = true, $manage_config = true,
$manage_network = true,
) { ) {
# Config from group.yaml # Config from group.yaml
@ -12,8 +11,6 @@ class sunetdrive::proxysql (
$config = hiera_hash($environment) $config = hiera_hash($environment)
$db_ip = $config['db'] $db_ip = $config['db']
$nextcloud_ip = $config['app'] $nextcloud_ip = $config['app']
$nextcloud_ipv6 = $config['app_v6']
$nextcloud_ip_all = $nextcloud_ip + $nextcloud_ipv6
$proxysql_ok_num = length($nextcloud_ip) $proxysql_ok_num = length($nextcloud_ip)
$proxysql_warn_num = $proxysql_ok_num - 1 $proxysql_warn_num = $proxysql_ok_num - 1
@ -29,8 +26,8 @@ class sunetdrive::proxysql (
$mysql_user = safe_hiera('mysql_user') $mysql_user = safe_hiera('mysql_user')
$transaction_persistent = 1 $transaction_persistent = 1
if $::facts['dockerhost2'] == 'yes' and $manage_network { if $::facts['dockerhost2'] == 'yes' {
$hostnet = true $hostnet = 'yes'
} }
file { '/usr/local/bin/proxysql': file { '/usr/local/bin/proxysql':
@ -81,7 +78,7 @@ class sunetdrive::proxysql (
iif => 'ens3', iif => 'ens3',
} }
sunet::nftables::docker_expose { 'proxysql': sunet::nftables::docker_expose { 'proxysql':
allow_clients => $nextcloud_ip_all, allow_clients => ['any'],
port => 6032, port => 6032,
iif => 'ens3', iif => 'ens3',
} }

View file

@ -17,10 +17,4 @@ class sunetdrive::redis_cluster (
content => template('sunetdrive/redis_cluster/reset_cluster.erb.sh'), content => template('sunetdrive/redis_cluster/reset_cluster.erb.sh'),
mode => '0700', mode => '0700',
} }
exec { 'set_permissions_directory':
command => 'chown -R 999:root /opt/redis/node-*'
}
exec { 'set_permissions_files':
command => 'chown -R 999:999 /opt/redis/node-*/*'
}
} }

View file

@ -29,23 +29,17 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
} }
} }
} }
$dehydrated_status = $dehydrated_name ? { sunet::docker_run {'satosa':
undef => 'absent', image => $image,
default => 'present' imagetag => $tag,
} dns => ['89.32.32.32'],
file {'/opt/docker_run': volumes => ['/etc/satosa:/etc/satosa','/etc/dehydrated:/etc/dehydrated'],
ensure => 'absent', ports => ['443:8000'],
} env => ['METADATA_DIR=/etc/satosa/metadata', 'WORKER_TIMEOUT=120']
sunet::docker_compose { 'satosa':
content => template('sunetdrive/satosa/docker-compose.yml.erb'),
service_name => 'satosa',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Satosa',
} }
file {'/etc/satosa/proxy_conf.yaml': file {'/etc/satosa/proxy_conf.yaml':
content => inline_template("<%= @merged_conf.to_yaml %>\n"), content => inline_template("<%= @merged_conf.to_yaml %>\n"),
notify => Sunet::Docker_compose['satosa'] notify => Sunet::Docker_run['satosa']
} }
$plugins = hiera('satosa_config') $plugins = hiera('satosa_config')
sort(keys($plugins)).each |$n| { sort(keys($plugins)).each |$n| {
@ -53,13 +47,23 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
$fn = $plugins[$n] $fn = $plugins[$n]
file { $fn: file { $fn:
content => inline_template("<%= @conf.to_yaml %>\n"), content => inline_template("<%= @conf.to_yaml %>\n"),
notify => Sunet::Docker_compose['satosa'] notify => Sunet::Docker_run['satosa']
} }
} }
sunet::misc::ufw_allow { 'satosa-allow-https': sunet::misc::ufw_allow { 'satosa-allow-https':
from => 'any', from => 'any',
port => '443' port => '443'
} }
$dehydrated_status = $dehydrated_name ? {
undef => 'absent',
default => 'present'
}
sunet::docker_run {'alwayshttps':
ensure => $dehydrated_status,
image => 'docker.sunet.se/always-https',
ports => ['80:80'],
env => ['ACME_URL=http://acme-c.sunet.se']
}
sunet::misc::ufw_allow { 'satosa-allow-http': sunet::misc::ufw_allow { 'satosa-allow-http':
ensure => $dehydrated_status, ensure => $dehydrated_status,
from => 'any', from => 'any',
@ -74,6 +78,12 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
cert_file => '/etc/satosa/https.crt' cert_file => '/etc/satosa/https.crt'
} }
} }
file { '/opt/satosa':
ensure => directory,
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { '/opt/satosa/restart.sh': -> file { '/opt/satosa/restart.sh':
ensure => file, ensure => file,
owner => 'root', owner => 'root',

View file

@ -4,7 +4,6 @@ class sunetdrive::script (
$location = undef $location = undef
) { ) {
include sunet::packages::python3_pip include sunet::packages::python3_pip
include sunet::packages::kopia
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$customer = sunetdrive::get_customer() $customer = sunetdrive::get_customer()
$apikey_test = safe_hiera('monitor_apikey_test') $apikey_test = safe_hiera('monitor_apikey_test')
@ -21,7 +20,6 @@ class sunetdrive::script (
$local_path = '/tmp/rclone-current-linux-amd64.deb' $local_path = '/tmp/rclone-current-linux-amd64.deb'
$singlenodes = lookup('singlenodes') $singlenodes = lookup('singlenodes')
$multinodes = keys(lookup('multinode_mapping')) $multinodes = keys(lookup('multinode_mapping'))
$extra_backup_jobs = pick($config['extra_backup_jobs'], {})
if $customer == 'mdu' { if $customer == 'mdu' {
$eppn_suffix = 'mdh.se' $eppn_suffix = 'mdh.se'
@ -57,17 +55,6 @@ class sunetdrive::script (
} }
$site_name = $config['site_name'] $site_name = $config['site_name']
$user_bucket_name = $config['user_bucket_name'] $user_bucket_name = $config['user_bucket_name']
if $config['user_scans'] {
$config['user_scans'].each |$job| {
sunet::scriptherder::cronjob { $job['name']:
cmd => "ssh -t -l script ${job['server']} /usr/bin/sudo /usr/local/bin/occ ${job['container']} files:scan ${job['user']}",
hour => $job['hour'],
minute => $job['minute'],
ok_criteria => ['exit_status=0','max_age=1d'],
warn_criteria => ['exit_status=1','max_age=2d'],
}
}
}
# It is a start that will get us user buckets and primary buckets # It is a start that will get us user buckets and primary buckets
$backup_projects = $location $backup_projects = $location
@ -109,14 +96,6 @@ class sunetdrive::script (
unless => "python3 -m pip list | grep drive-utils | grep ${drive_version}", unless => "python3 -m pip list | grep drive-utils | grep ${drive_version}",
require => Package['python3'], require => Package['python3'],
} }
file { '/opt/backups':
ensure => directory,
mode => '0700'
}
file { '/opt/backups/scripts':
ensure => directory,
mode => '0700'
}
file { '/root/.ssh/': file { '/root/.ssh/':
ensure => directory, ensure => directory,
mode => '0700', mode => '0700',
@ -254,11 +233,8 @@ class sunetdrive::script (
group => 'root', group => 'root',
mode => '0700', mode => '0700',
} }
file_line { 'FIXME_remove_when_s3_migration_done_in_sto3': file { '/root/tasks/backupsinglenodedb.sh':
ensure => 'present', ensure => absent,
line => '37.156.195.53 s3.sto3.safedc.net',
path => '/etc/hosts',
match => '^37.156.195.53',
} }
if $environment == 'test' { if $environment == 'test' {
sunet::scriptherder::cronjob { 'reboot-customer': sunet::scriptherder::cronjob { 'reboot-customer':
@ -273,7 +249,7 @@ class sunetdrive::script (
if $customer in ['extern', 'gih', 'hkr', 'suni', 'common', 'su', 'lnu'] { if $customer in ['extern', 'gih', 'hkr', 'suni', 'common', 'su', 'lnu'] {
sunet::scriptherder::cronjob { 'makebuckets': sunet::scriptherder::cronjob { 'makebuckets':
ensure => absent, ensure => absent,
cmd => 'bin/true', cmd => 'bin/true',
} }
} else { } else {
sunet::scriptherder::cronjob { 'makebuckets': sunet::scriptherder::cronjob { 'makebuckets':
@ -358,13 +334,6 @@ class sunetdrive::script (
group => 'root', group => 'root',
mode => '0700', mode => '0700',
} }
file { '/opt/backups/scripts/hb.sh':
ensure => file,
content => template('sunetdrive/script/backup-hb.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
sunet::scriptherder::cronjob { 'backupmultinodedb': sunet::scriptherder::cronjob { 'backupmultinodedb':
cmd => '/root/tasks/backupmultinodedb.sh', cmd => '/root/tasks/backupmultinodedb.sh',
hour => '2', hour => '2',
@ -391,7 +360,7 @@ class sunetdrive::script (
$singlenodes.each | $singlenode| { $singlenodes.each | $singlenode| {
$multinode = hiera_hash('multinode_mapping')[$singlenode]['server'] $multinode = hiera_hash('multinode_mapping')[$singlenode]['server']
$multinodeserver = "${multinode}.${site_name}" $multinodeserver = "${multinode}.${site_name}"
$nccontainer = "nextcloud-${singlenode}-app-1" $nccontainer = "nextcloud-${singlenode}_app_1"
sunet::scriptherder::cronjob { "backup${singlenode}db": sunet::scriptherder::cronjob { "backup${singlenode}db":
ensure => absent, ensure => absent,
@ -421,17 +390,17 @@ class sunetdrive::script (
} }
} }
} }
$gss_backup_server = $config['gss_backup_server']
$lookup_backup_server = $config['lookup_backup_server']
sunet::scriptherder::cronjob { 'backupgssdb': sunet::scriptherder::cronjob { 'backupgssdb':
ensure => 'absent', cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
cmd => 'true',
hour => '2', hour => '2',
minute => '0', minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'], ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'], warn_criteria => ['exit_status=1','max_age=3d'],
} }
sunet::scriptherder::cronjob { 'backuplookupdb': sunet::scriptherder::cronjob { 'backuplookupdb':
ensure => 'absent', cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
cmd => 'true',
hour => '2', hour => '2',
minute => '0', minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'], ok_criteria => ['exit_status=0','max_age=2d'],

View file

@ -64,12 +64,12 @@ class sunetdrive::scriptreceiver()
} }
file { '/root/.bashrc': file { '/root/.bashrc':
ensure => file, ensure => file,
content => template('sunetdrive/scriptreceiver/bashrc.erb.sh'), content => template('sunetdrive/scriptreceiver/baschrc.erb.sh'),
mode => '0644', mode => '0644',
owner => 'root', owner => 'root',
group => 'root', group => 'root',
} }
file { '/etc/sudoers.d/99-safer_reboot': file { "/etc/sudoers.d/99-safer_reboot":
ensure => file, ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/safer_reboot\n", content => "script ALL=(root) NOPASSWD: /usr/local/bin/safer_reboot\n",
mode => '0440', mode => '0440',

View file

@ -7,7 +7,7 @@ class sunetdrive::sitemonitornaemon() {
$tls_servers_with_port = hiera_array('tls_servers_with_port') $tls_servers_with_port = hiera_array('tls_servers_with_port')
$nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0] $nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0]
$nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0] $nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0]
$monitorhost = $facts['networking']['fqdn'] $monitorhost = $::fqdn
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$influx_passwd = safe_hiera('influx_passwd') $influx_passwd = safe_hiera('influx_passwd')
$slack_url = safe_hiera('slack_url') $slack_url = safe_hiera('slack_url')
@ -42,15 +42,11 @@ class sunetdrive::sitemonitornaemon() {
} }
file { '/etc/naemon/conf.d/sunetdrive_thruk_templates.conf': file { '/etc/naemon/conf.d/sunetdrive_thruk_templates.conf':
ensure => present, ensure => present,
owner => 'naemon',
group => 'naemon',
content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'), content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'),
mode => '0644', mode => '0644',
} }
nagioscfg::service {'check_scriptherder':
hostgroup_name => ['sunetdrive::nrpe'],
check_command => 'check_nrpe_1arg_to300!check_scriptherder',
description => 'Scriptherder Status',
contact_groups => ['naemon-admins'],
}
nagioscfg::service {'check_galera_cluster': nagioscfg::service {'check_galera_cluster':
hostgroup_name => ['galera_monitor'], hostgroup_name => ['galera_monitor'],
check_command => 'check_nrpe_1arg!check_galera_cluster', check_command => 'check_nrpe_1arg!check_galera_cluster',

View file

@ -3,6 +3,7 @@
config_php='/var/www/html/config/config.php' config_php='/var/www/html/config/config.php'
dbhost="<%= @dbhost %>" dbhost="<%= @dbhost %>"
mysql_user_password="<%= @mysql_user_password %>" mysql_user_password="<%= @mysql_user_password %>"
admin_password="<%= @admin_password %>"
location="<%= @location %>" location="<%= @location %>"
bucket="<%= @s3_bucket %>" bucket="<%= @s3_bucket %>"
@ -13,9 +14,6 @@ if [[ "${user_input}" == "IKnowWhatIAmDoing" ]]; then
echo "WARNING: This will delete everything in the database and reinstall Nextcloud." echo "WARNING: This will delete everything in the database and reinstall Nextcloud."
echo "You have 10 seconds to abort by hitting CTRL/C" echo "You have 10 seconds to abort by hitting CTRL/C"
sleep 10s sleep 10s
echo "Setting temp admin password"
apt update && apt install -y apg
admin_password="$(apg -m 40 | head -1)"
echo "Ok, proceeding." echo "Ok, proceeding."
echo "Dropping database in 3 seconds" echo "Dropping database in 3 seconds"
sleep 3s sleep 3s
@ -50,11 +48,6 @@ EOF
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}') instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}') secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}') passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
echo "Now delete the admin user:"
echo " occ user:delete admin"
echo "and then create a new admin user:"
echo " /usr/local/bin/add_admin_user <username> <email address>"
echo ""
echo "Please use edit-secrets to add these variables to all Nextcloud servers:" echo "Please use edit-secrets to add these variables to all Nextcloud servers:"
echo "instanceid: DEC::PKCS7[${instanceid}]!" echo "instanceid: DEC::PKCS7[${instanceid}]!"
echo "secret: DEC::PKCS7[${secret}]!" echo "secret: DEC::PKCS7[${secret}]!"

View file

@ -1,5 +1,10 @@
<?php <?php
$CONFIG = array ( $CONFIG = array (
'app_install_overwrite' =>
array (
0 => 'globalsiteselector',
),
'apps_paths' => 'apps_paths' =>
array ( array (
0 => 0 =>
@ -43,12 +48,28 @@ $CONFIG = array (
'gs.enabled' => '<%= @gs_enabled %>', 'gs.enabled' => '<%= @gs_enabled %>',
'gs.federation' => '<%= @gs_federation %>', 'gs.federation' => '<%= @gs_federation %>',
'gs.trustedHosts' => ['*.sunet.se'], 'gs.trustedHosts' => ['*.sunet.se'],
'htaccess.RewriteBase' => '/', 'gss.discovery.manual.mapping.file' => '/var/www/html/mappingfile.json',
'gss.discovery.manual.mapping.parameter' => 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6',
'gss.discovery.manual.mapping.regex' => true,
'gss.jwt.key' => '<%= @gss_jwt_key %>',
'gss.master.admin' =>
array (
<%- index = 0 -%>
<%- @gss_master_admin.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
),
'gss.master.url' => '<%= @gss_master_url %>',
'gss.mode' => '<%= @gss_mode %>',
'gss.user.discovery.module' => '\\OCA\\GlobalSiteSelector\\UserDiscoveryModules\\ManualUserMapping',
'gss.username_format' => 'sanitize',
'installed' => true, 'installed' => true,
'instanceid' => '<%= @instanceid %>', 'instanceid' => '<%= @instanceid %>',
'integrity.check.disabled' => true, 'integrity.check.disabled' => true,
'log_type' => 'file', 'log_type' => 'file',
'loglevel' => 1, 'loglevel' => 0,
'lookup_server' => '<%= @lookup_server %>',
'mail_domain' => '<%= @mail_domain %>', 'mail_domain' => '<%= @mail_domain %>',
'mail_from_address' => '<%= @mail_from_address %>', 'mail_from_address' => '<%= @mail_from_address %>',
'mail_sendmailmode' => 'smtp', 'mail_sendmailmode' => 'smtp',
@ -76,7 +97,6 @@ $CONFIG = array (
'region' => 'us-east-1', 'region' => 'us-east-1',
'hostname' => '<%= @s3_host %>', 'hostname' => '<%= @s3_host %>',
'port' => '', 'port' => '',
'useMultipartCopy' => false,
'objectPrefix' => 'urn:oid:', 'objectPrefix' => 'urn:oid:',
'autocreate' => false, 'autocreate' => false,
'use_ssl' => true, 'use_ssl' => true,
@ -84,11 +104,29 @@ $CONFIG = array (
'legacy_auth' => false, 'legacy_auth' => false,
), ),
), ),
'overwrite.cli.url' => 'https://<%= @site_name %>/', 'overwrite.cli.url' => 'https://<%= @site_name %>',
'overwritehost' => '<%= @site_name %>', 'overwritehost' => '<%= @site_name %>',
'overwriteprotocol' => 'https', 'overwriteprotocol' => 'https',
'passwordsalt' => '<%= @passwordsalt %>', 'passwordsalt' => '<%= @passwordsalt %>',
<% if @environment == 'test' && ! @is_multinode -%> <% if @location == 'gss-test' -%>
'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>',
'read_timeout' => 0.0,
'seeds' => [
'redis1.drive.test.sunet.se:6379',
'redis2.drive.test.sunet.se:6379',
'redis3.drive.test.sunet.se:6379',
'redis1.drive.test.sunet.se:6380',
'redis2.drive.test.sunet.se:6380',
'redis3.drive.test.sunet.se:6380',
'redis1.drive.test.sunet.se:6381',
'redis2.drive.test.sunet.se:6381',
'redis3.drive.test.sunet.se:6381'
],
'timeout' => 1.1
],
<% elsif @environment == 'test' && ! @is_multinode -%>
'redis.cluster' => [ 'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR, 'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>', 'password' => '<%= @redis_cluster_password %>',
@ -106,7 +144,7 @@ $CONFIG = array (
], ],
'timeout' => 1.1 'timeout' => 1.1
], ],
<% elsif @environment == 'prod' && ! @is_multinode -%> <% elsif @environment == 'prod' && ! @is_multinode && @location != 'gss-prod' -%>
'redis.cluster' => [ 'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR, 'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>', 'password' => '<%= @redis_cluster_password %>',
@ -135,9 +173,6 @@ $CONFIG = array (
'secret' => '<%= @secret %>', 'secret' => '<%= @secret %>',
'skeletondirectory' => '<%= @skeletondirectory %>', 'skeletondirectory' => '<%= @skeletondirectory %>',
'templatedirectory' => '', 'templatedirectory' => '',
<% if @environment == 'test' -%>
'trashbin_retention_obligation' => 'auto, 30',
<% end -%>
'trusted_domains' => 'trusted_domains' =>
array ( array (
<%- index = 0 -%> <%- index = 0 -%>
@ -160,27 +195,20 @@ $CONFIG = array (
array ( array (
0 => 'admin', 0 => 'admin',
1 => 'forcemfa', 1 => 'forcemfa',
<%- if @twofactor_enforced_groups -%> <%- index = 2 -%>
<%- index = 2 -%> <%- @twofactor_enforced_groups.each do |item| -%>
<%- @twofactor_enforced_groups.each do |item| -%>
<%= index %> => '<%= item %>', <%= index %> => '<%= item %>',
<%- index += 1 -%> <%- index += 1 -%>
<%- end -%>
<%- end -%> <%- end -%>
), ),
'twofactor_enforced_excluded_groups' => 'twofactor_enforced_excluded_groups' =>
array ( array (
<%- if @twofactor_enforced_excluded_groups -%> <%- index = 0 -%>
<%- index = 0 -%> <%- @twofactor_enforced_excluded_groups.each do |item| -%>
<%- @twofactor_enforced_excluded_groups.each do |item| -%>
<%= index %> => '<%= item %>', <%= index %> => '<%= item %>',
<%- index += 1 -%> <%- index += 1 -%>
<%- end -%>
<%- end -%> <%- end -%>
), ),
'updatechecker' => false, 'updatechecker' => false,
'version' => '<%= @nextcloud_version_string %>', 'version' => '<%= @nextcloud_version_string %>',
<% if @environment == 'test' -%>
'versions_retention_obligation' => '<%= @expiration_days_min %>, <%= @expiration_days_max %>',
<% end -%>
); );

View file

@ -9,6 +9,8 @@ services:
<%- if @hostnet -%> <%- if @hostnet -%>
network_mode: host network_mode: host
<%- end -%> <%- end -%>
environment:
- NC_PASS=<%= @admin_password%>
volumes: volumes:
- /opt/nextcloud/000-default.conf:/etc/apache2/sites-enabled/000-default.conf - /opt/nextcloud/000-default.conf:/etc/apache2/sites-enabled/000-default.conf
- /opt/nextcloud/mpm_prefork.conf:/etc/apache2/mods-available/mpm_prefork.conf - /opt/nextcloud/mpm_prefork.conf:/etc/apache2/mods-available/mpm_prefork.conf
@ -19,11 +21,13 @@ services:
- /opt/nextcloud/complete_reinstall.sh:/complete_reinstall.sh - /opt/nextcloud/complete_reinstall.sh:/complete_reinstall.sh
- /opt/nextcloud/config.php:/var/www/html/config/config.php - /opt/nextcloud/config.php:/var/www/html/config/config.php
- /opt/nextcloud/nextcloud.log:/var/www/html/data/nextcloud.log - /opt/nextcloud/nextcloud.log:/var/www/html/data/nextcloud.log
- /opt/nextcloud/audit.log:/var/www/html/data/audit.log
- /opt/nextcloud/rclone.conf:/rclone.conf - /opt/nextcloud/rclone.conf:/rclone.conf
<%- if @skeletondirectory -%> <%- if @skeletondirectory -%>
- /opt/nextcloud/skeleton:<%= @skeletondirectory %> - /opt/nextcloud/skeleton:<%= @skeletondirectory %>
<%- end -%> <%- end -%>
<%- if @location =~ /^gss/ -%>
- /opt/nextcloud/mappingfile.json:/var/www/html/mappingfile.json
<%- end -%>
<% if @location =~ /^kau/ -%> <% if @location =~ /^kau/ -%>
- /mnt:/opt/tmp/ - /mnt:/opt/tmp/
<%- end -%> <%- end -%>
@ -33,12 +37,14 @@ services:
- proxysql_proxysql - proxysql_proxysql
<%- end -%> <%- end -%>
dns: dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32 - 89.32.32.32
<%- if !@hostnet -%> <%- if !@hostnet -%>
ports: ports:
- 443:443 - 443:443
<%- end -%> <%- end -%>
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND' command: sh -c 'tail -f /var/www/html/data/nextcloud.log | tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
tty: true tty: true
<%- if !@hostnet -%> <%- if !@hostnet -%>

View file

@ -1,21 +0,0 @@
#!/bin/bash
if [[ "${1}" =~ ^nextcloud ]]; then
container=${1}
shift
else
container="nextcloud_app_1"
fi
oc_list=$(env| grep 'OC_')
if [[ "x${oc_list}" != "x" ]]; then
for row in $(echo "${oc_list}"); do
MY_VARS="${MY_VARS} -e ${row}"
done
fi
docker exec -ti ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
exit 0

View file

@ -1,79 +0,0 @@
#!/bin/bash
mountid="${1}"
user="${2}"
container="${3}"
if [[ -z ${mountid} ]] || [[ -z ${user} ]]; then
echo "We need a valid mount id and user to proceed"
echo "Usage: ${0} <mountid> <user> [<container>]"
exit
fi
if [[ -z ${container} ]]; then
container="nextcloud_app_1"
fi
occ="/usr/local/bin/occ ${container}"
function get_config {
${occ} files_external:config ${mountid} ${1} | tr -d '\n\t\r'
}
echo "Gathering information, hang tight."
echo -n "."
bucket="$(get_config bucket)"
echo -n "."
hostname="$(get_config hostname)"
echo -n "."
key="$(get_config key)"
echo -n "."
region="$(get_config region)"
echo -n "."
secret="$(get_config secret)"
jsonfile="/tmp/${user}-user-bucket.json"
mount_point="${user/@/-}"
mount_point="${mount_point/./-}-user-bucket"
echo "This will remount the user bucket with mountid ${mountid} for ${user} as project bucket with mountpoint ${mount_point}."
read -r -p "Press enter to continue"
echo '
[
{
"mount_point": "\/'${mount_point}'",
"storage": "\\OCA\\Files_External\\Lib\\Storage\\AmazonS3",
"authentication_type": "amazons3::accesskey",
"configuration": {
"bucket": "'${bucket}'",
"hostname": "'${hostname}'",
"key": "'${key}'",
"legacy_auth": false,
"port": "443",
"region": "'${region}'",
"secret": "'${secret}'",
"storageClass": "",
"useMultipartCopy": false,
"use_path_style": true,
"use_ssl": true
},
"options": {
"encrypt": true,
"previews": true,
"enable_sharing": true,
"filesystem_check_changes": 0,
"encoding_compatibility": false,
"readonly": false
},
"applicable_users": [
],
"applicable_groups": ["admin"]
}
]
' > "${jsonfile}"
docker cp ${jsonfile} ${container}:/${jsonfile}
${occ} files_external:import /${jsonfile}
docker exec ${container} rm /${jsonfile}
rm ${jsonfile}
${occ} files_external:delete ${mountid}

View file

@ -1,34 +0,0 @@
#!/bin/bash
error_ids=""
# Only run if this is the only instance of this script running
# note: since this script forks to run pgrep, we need -eq 2 here
# shellcheck disable=SC2126
if [[ $(pgrep -a -f "${0}" | grep -v scriptherder | wc -l) -eq 2 ]]; then
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
# a specific host, but will differ between hosts
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
errors=''
for container in $(get_containers); do
error_ids="${error_ids} ${container}: "
for id in $(nocc "${container}" files_external:list --all --output json | jq '.[].mount_id' | jq .); do
nocc "${container}" files_external:scan "${id}" | grep Error
# shellcheck disable=SC2181
if [[ ${?} -eq 0 ]]; then
errors="${errors} ${id}"
error_ids="${error_ids} ${id}"
fi
done
done
else
echo "Another instance of this script is already running, exiting"
pgrep -a -f "${0}" | grep -v scriptherder
exit 0
fi
if [[ -n "${errors}" ]]; then
echo "Errors found in the following mounts: ${error_ids}"
exit 1
fi
echo "No errors found"
exit 0

View file

@ -0,0 +1,16 @@
<?php
$CONFIG = [
'DB' => [
'host' => "<%= @dbhost %>",
'db' => "lookup" ,
'user' => "lookup",
'pass' => "<%= @mysql_user_password %>",
],
'GLOBAL_SCALE' => true,
'AUTH_KEY' => "<%= @gss_jwt_key %>",
];

View file

@ -0,0 +1,24 @@
version: '3.2'
services:
app:
image: docker.sunet.se/drive/nextcloud-lookup:<%= @lookup_version %>
restart: always
volumes:
- /opt/lookup/config.php:/var/www/html/config/config.php
networks:
- default
- proxysql_proxysql
dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32
ports:
- 443:443
command: apachectl -D FOREGROUND
tty: true
networks:
proxysql_proxysql:
external: true

View file

@ -0,0 +1,62 @@
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
CREATE DATABASE IF NOT EXISTS `lookup` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
USE `lookup`;
CREATE USER 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
GRANT ALL PRIVILEGES ON lookup.* TO 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
DROP TABLE IF EXISTS `emailValidation`;
CREATE TABLE IF NOT EXISTS `emailValidation` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`storeId` int(11) NOT NULL,
`token` varchar(16) COLLATE utf8mb4_unicode_ci NOT NULL,
PRIMARY KEY (`id`),
KEY `token` (`token`),
KEY `storeId` (`storeId`)
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
DROP TABLE IF EXISTS `store`;
CREATE TABLE IF NOT EXISTS `store` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`userId` int(11) NOT NULL,
`k` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`v` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`valid` tinyint(1) NOT NULL DEFAULT '0',
PRIMARY KEY (`id`),
KEY `key` (`k`(191)),
KEY `value` (`v`(191)),
KEY `userId` (`userId`)
) ENGINE=InnoDB AUTO_INCREMENT=51 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
DROP TABLE IF EXISTS `users`;
CREATE TABLE IF NOT EXISTS `users` (
`id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT,
`federationId` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
KEY `federationId` (`federationId`(191))
) ENGINE=InnoDB AUTO_INCREMENT=15 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
DROP TABLE IF EXISTS `toVerify`;
CREATE TABLE IF NOT EXISTS `toVerify` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`userId` int(11) NOT NULL,
`storeId` int(11) NOT NULL,
`property` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL,
`location` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`tries` int(11) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;

View file

@ -1 +0,0 @@
<%= @custdata.to_json %>

View file

@ -21,6 +21,6 @@ services:
- MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %> - MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %>
- BOOTSTRAP=<%= @bootstrap %> - BOOTSTRAP=<%= @bootstrap %>
- FORCE_BOOTSTRAP=0 - FORCE_BOOTSTRAP=0
command: "--wsrep_cluster_address=gcomm://<%= @db_ip.join(',') %>" command: "--wsrep_cluster_address=gcomm://<%= @db_ip[0] %>,<%= @db_ip[1] %>,<%= @db_ip[2] %>"
tty: true tty: true

View file

@ -1,32 +0,0 @@
#!/bin/bash
<% basedir="statistics:drive-server-coms" -%>
<% cupath="/opt/mariadb/statistics/users/" -%>
<% custdata="/opt/mariadb/statistics/custdata.json" -%>
status=0
<% @custdata.each do |cust,data| -%>
#Customer <%= cust %> has no billing departments.
<% if defined?(data[@environment]["billdomains"]) && data[@environment]["billdomains"] -%>
mkdir -p /opt/mariadb/statistics/users/<%= cust %>
chmod '0700' /opt/mariadb/statistics/users/<%= cust %>
<% data[@environment]["billdomains"].each do |dom| -%>
/root/tasks/listusersbydep.sh <%= cust %> <%= dom %> > /opt/mariadb/statistics/users/<%= cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json
if jq . <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json &>/dev/null
then
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json <%= basedir%>/<%= cust %>-<%= @environment%>/
[[ $? -eq 0 ]] || { status=1 ; echo "Error: Upload of user data failed." ; }
else
echo "Error in json data"
status=1
fi
<% end -%>
<% end -%>
<% end -%>
if [[ -f <%= custdata %> ]]
then
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= custdata %> <%= basedir%>/
fi
exit ${status}

View file

@ -1,24 +0,0 @@
#!/bin/bash
function usage () {
printf "Usage:\t%s <customer> <department email domain>\n" "${0##*/}"
printf "Example:\t%s uu int.uu.se\n" "${0##*/}"
}
[[ -z "${1}" || -z "${2}" || ! "${1}" =~ ^[a-zA-Z0-9.]{1,200}$ || ! "${2}" =~ ^[a-zA-Z0-9.]{1,200}$ ]] && usage && exit 1
depdom="${2}"
customer="${1}"
docker exec mariadb_db_1 /bin/bash -c 'mysql -p${MYSQL_ROOT_PASSWORD} -NB -e '\
$'\'select JSON_PRETTY(JSON_OBJECTAGG(uid,displayname)) from ('\
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_global_scale_users.displayname'\
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_global_scale_users'\
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_global_scale_users.uid'\
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\""'\
$' UNION'\
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_user_saml_users.displayname'\
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_user_saml_users'\
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_user_saml_users.uid'\
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\"") as users\'' \
| sed 's/\\n/\n/g'

View file

@ -1,4 +0,0 @@
#!/bin/bash
pw=$(yq -r '.services.db.environment[0]' /opt/mariadb/docker-compose.yml | awk -F '=' '{print $2}')
docker exec -ti mariadb_db_1 mysql -u root -p"${pw}" "${@}"

View file

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
result="$(docker exec -u root mariadb_backup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')" result="$(docker exec mariadb_backup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
if [[ "${result}" == "Slave_running ON" ]]; then if [[ "${result}" == "Slave_running ON" ]]; then
echo "OK: Replica running" echo "OK: Replica running"
exit 0 exit 0

View file

@ -8,12 +8,10 @@ mkdir -p "${backup_dir}"
if [[ -z ${customer} ]]; then if [[ -z ${customer} ]]; then
buopts="--slave-info --safe-slave-backup" buopts="--slave-info --safe-slave-backup"
dumpopts="--dump-slave" dumpopts="--dump-slave"
mysql -p"${MYSQL_ROOT_PASSWORD}" -u root -e "stop slave" mysql -p${MYSQL_ROOT_PASSWORD} -e "stop slave"
fi fi
# shellcheck disable=SC2086 mariadb-backup --backup ${buopts} -u root -p${MYSQL_ROOT_PASSWORD} --stream=xbstream | gzip >"${backup_dir}/${stream_name}"
mariadb-backup --backup ${buopts} -u root -p"${MYSQL_ROOT_PASSWORD}" --stream=xbstream | gzip >"${backup_dir}/${stream_name}" mysqldump --all-databases --single-transaction ${dumpopts} -u root -p${MYSQL_ROOT_PASSWORD} | gzip >"${backup_dir}/${dump_name}"
# shellcheck disable=SC2086
mysqldump --all-databases --single-transaction ${dumpopts} -u root -p"${MYSQL_ROOT_PASSWORD}" | gzip >"${backup_dir}/${dump_name}"
if [[ -z ${customer} ]]; then if [[ -z ${customer} ]]; then
mysql -p"${MYSQL_ROOT_PASSWORD}" -u root -e "start slave" mysql -p${MYSQL_ROOT_PASSWORD} -e "start slave"
fi fi

View file

@ -3,7 +3,7 @@ version: '3.2'
services: services:
mariadb_backup: mariadb_backup:
image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %> image: docker.sunet.se/drive/mariadb
container_name: mariadb_backup_mariadb_backup_1 container_name: mariadb_backup_mariadb_backup_1
dns: dns:
- 89.46.20.75 - 89.46.20.75

View file

@ -13,18 +13,11 @@ dexec="docker exec ${container}"
password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}') password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}')
mysql="${dexec} mysql -p${password} -u root" mysql="${dexec} mysql -p${password}"
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_global_scale_users")')" == "1" ]] users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
then users="${users}
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')" $(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
fi
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_user_saml_users")')" == "1" ]]
then
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_user_saml_users')"
fi
users="$(echo "${users}" | sort | uniq)"
project="statistics" project="statistics"
bucket="drive-server-coms" bucket="drive-server-coms"

View file

@ -1,6 +1,6 @@
[statistics] [statistics]
type = webdav type = webdav
url = https://89.45.236.246/remote.php/dav/files/_script/ url = https://89.46.20.42/remote.php/dav/files/_script/
vendor = nextcloud vendor = nextcloud
user = _script user = _script
pass = <%= @statistics_secret %> pass = <%= @statistics_secret %>

View file

@ -131,6 +131,32 @@ define host {
<% end -%> <% end -%>
use monitor-site use monitor-site
} }
<% if site.match('lookup') %>
define service {
notes_url https://<%= site %>
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
check_command check_https
check_interval 5
check_period 24x7
<% if @environment == 'prod' %>
contacts slack
<% else -%>
contact_groups naemon-admins
<% end -%>
host_name <%= site %>
max_check_attempts 3
notification_interval 60
notification_period 24x7
retry_interval 1
service_description HTTPS
<% if site.match('test') -%>
servicegroups test-sites
<% else -%>
servicegroups prod-sites
<% end -%>
}
<% end -%>
<% unless site.match('lookup') %>
define service { define service {
notes_url https://<%= site %>/status.php notes_url https://<%= site %>/status.php
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
@ -138,6 +164,7 @@ define service {
check_interval 5 check_interval 5
check_period 24x7 check_period 24x7
<% cur_cust = site.gsub(/\.drive.*/,'') %> <% cur_cust = site.gsub(/\.drive.*/,'') %>
<% cur_cust = cur_cust.gsub(/drive.*/,'gss') %>
# 'check_nextcloud' command definition # 'check_nextcloud' command definition
<% if @environment == 'prod' and not site.match('test') and @fullnodes.include?(cur_cust) %> <% if @environment == 'prod' and not site.match('test') and @fullnodes.include?(cur_cust) %>
contacts slack contacts slack
@ -182,3 +209,4 @@ define service {
<% end -%> <% end -%>
} }
<% end -%> <% end -%>
<% end -%>

View file

@ -3,12 +3,10 @@
config_php='/var/www/html/config/config.php' config_php='/var/www/html/config/config.php'
dbhost="<%= @dbhost %>" dbhost="<%= @dbhost %>"
mysql_user_password="<%= @mysql_user_password %>" mysql_user_password="<%= @mysql_user_password %>"
admin_password="<%= @admin_password %>"
location="<%= @location %>" location="<%= @location %>"
bucket="<%= @s3_bucket %>" bucket="<%= @s3_bucket %>"
customer="<%= @customer %>" customer="<%= @customer %>"
echo "Setting temp admin password"
apt update && apt install -y apg
admin_password="$(apg -m 40 | head -1)"
/usr/bin/mysql -e "drop database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1 /usr/bin/mysql -e "drop database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
/usr/bin/mysql -e "create database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1 /usr/bin/mysql -e "create database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
@ -37,11 +35,6 @@ EOF
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}') instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}') secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}') passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
echo "Now delete the admin user:"
echo " occ <container> user:delete admin"
echo "and then create a new admin user:"
echo " /usr/local/bin/add_admin_user <username> <email address> <container>"
echo ""
echo "${customer}_instanceid: DEC::PKCS7[${instanceid}]!" echo "${customer}_instanceid: DEC::PKCS7[${instanceid}]!"
echo "${customer}_secret: DEC::PKCS7[${secret}]!" echo "${customer}_secret: DEC::PKCS7[${secret}]!"
echo "${customer}_passwordsalt: DEC::PKCS7[${passwordsalt}]!" echo "${customer}_passwordsalt: DEC::PKCS7[${passwordsalt}]!"

View file

@ -5,7 +5,7 @@ no_files=30 # Keep this many files as an archive, script is run once a week
# a specific host, but will differ between hosts # a specific host, but will differ between hosts
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
for logfile in $(ls /opt/multinode/*/{nextcloud.log,audit.log,server/server.log}); do for logfile in $(ls /opt/multinode/*/{nextcloud.log,server/server.log}); do
if [[ -f ${logfile}.gz.${no_files} ]]; then if [[ -f ${logfile}.gz.${no_files} ]]; then
rm ${logfile}.gz.${no_files} rm ${logfile}.gz.${no_files}
fi fi

View file

@ -11,10 +11,11 @@ services:
volumes: volumes:
- <%= @redis_conf_dir %>:/data - <%= @redis_conf_dir %>:/data
command: redis-server /data/redis.conf --loglevel verbose command: redis-server /data/redis.conf --loglevel verbose
restart: always
networks: networks:
- proxysql_proxysql - proxysql_proxysql
restart: always
networks: networks:
proxysql_proxysql: proxysql_proxysql:
external: true external: true

View file

@ -13,9 +13,9 @@ services:
- /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini - /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini
- <%= @config_php_path %>:/var/www/html/config/config.php - <%= @config_php_path %>:/var/www/html/config/config.php
- <%= @nextcloud_log_path %>:/var/www/html/data/nextcloud.log - <%= @nextcloud_log_path %>:/var/www/html/data/nextcloud.log
- <%= @audit_log_path %>:/var/www/html/data/audit.log
- <%= @rclone_conf_path %>:/rclone.conf - <%= @rclone_conf_path %>:/rclone.conf
environment:
- NC_PASS=<%= @admin_password%>
networks: networks:
- default - default
- proxysql_proxysql - proxysql_proxysql
@ -25,7 +25,7 @@ services:
- 89.32.32.32 - 89.32.32.32
ports: ports:
- <%= @https_port %>:443 - <%= @https_port %>:443
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND' command: apachectl -D FOREGROUND
tty: true tty: true
networks: networks:

View file

@ -1,6 +1,6 @@
[statistics] [statistics]
type = webdav type = webdav
url = https://89.45.236.246/remote.php/dav/files/_script/ url = https://89.46.20.42/remote.php/dav/files/_script/
vendor = nextcloud vendor = nextcloud
user = _script user = _script
pass = <%= @statistics_secret %> pass = <%= @statistics_secret %>

View file

@ -1,23 +0,0 @@
services:
satosa:
environment:
- "METADATA_DIR=/etc/satosa/metadata"
- "WORKER_TIMEOUT=120"
dns:
- "89.32.32.32"
image: "<%= @image %><% if @tag %>:<%= @tag %><% end %>"
pull_policy: "always"
ports:
- "443:8000"
volumes:
- "/etc/satosa:/etc/satosa"
- "/etc/dehydrated:/etc/dehydrated"
alwayshttps:
environment:
- "ACME_URL=http://acme-c.sunet.se"
dns:
- "89.32.32.32"
image: "docker.sunet.se/always-https"
pull_policy: "always"
ports:
- "80:80"

View file

@ -6,78 +6,65 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
number_of_full_to_keep='<%= @full_backup_retention %>' number_of_full_to_keep='<%= @full_backup_retention %>'
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
customer="<%= @customer %>" customer="<%= @customer %>"
environment="<%= @environment %>"
declare -A extra_backup_jobs
#<% if @extra_backup_jobs.any? %>
#<% @extra_backup_jobs.each do |client, job| %>
extra_backup_jobs["<%= client %>"]="<%= job %>"
#<% end %>
#<% end %>
#<% if @location.start_with?('common') %> #<% if @location.start_with?('common') %>
declare -a sixmonths=('mau') declare -a sixmonths=('mau')
if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
number_of_full_to_keep=6 number_of_full_to_keep=6
fi fi
declare -a projects declare -a projects
#<% @singlenodes.each do |singlenode| %> #<% @singlenodes.each do |singlenode| %>
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %> <%= singlenode %>") projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %>")
#<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %> #<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %>
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= singlenode %>") projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
#<% end %> #<% end %>
#<% end %> #<% end %>
#<% else %> #<% else %>
declare -a projects=("<%= @primary_project %> <%= @mirror_project %> ${customer}") declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
#<% @assigned_projects.each do |project| %> #<% @assigned_projects.each do |project| %>
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> ${customer}") projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
#<% end %> #<% end %>
#<% end %> #<% end %>
if [[ ${customer} == 'common' ]]; then if [[ ${customer} == 'common' ]]; then
projects+=("<%= @location %> <%= @location %>-mirror ${customer}") projects+=("<%= @location %> <%= @location %>-mirror")
fi fi
function do_backup { function do_backup {
local project="${1}" local project="${1}"
local mirror="${2}" local mirror="${2}"
local bucket="${3}" local bucket="${3}"
local customer="${4}" local mirrorbucket="${bucket}-mirror"
local mirrorbucket="${bucket}-mirror" local mountpoint="/opt/backupmounts/${bucket}"
local mountpoint="/opt/backupmounts/${bucket}" ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null local oktorun=$? # 1 == this bucket has no other bakup process in progress
local oktorun=$? # 1 == this bucket has no other bakup process in progress mkdir -p ${mountpoint}
if [[ ${oktorun} -ne 0 ]]; then [ ${oktorun} -ne 0 ] && rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
mkdir -p ${mountpoint} rclone mkdir ${mirror}:${mirrorbucket}
rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h [ ${oktorun} -ne 0 ] && duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \
rclone mkdir ${mirror}:${mirrorbucket} --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket} umount ${mountpoint}
if [[ "${extra_backup_jobs[${customer}]:+found}" == "found" ]] && [[ -f "${extra_backup_jobs[${customer}]}" ]]; then rmdir ${mountpoint}
${extra_backup_jobs[${customer}]} ${project} ${mirror} ${bucket} ${customer} ${environment} # Clean up
fi [ ${oktorun} -ne 0 ] && duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
umount ${mountpoint} --force rclone://${mirror}:/${mirrorbucket}
rmdir ${mountpoint}
# Clean up
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${mirrorbucket}
fi
} }
for entry in "${projects[@]}"; do for entry in "${projects[@]}"; do
project=$(echo ${entry} | awk '{print $1}') project=$(echo ${entry} | awk '{print $1}')
mirror=$(echo ${entry} | awk '{print $2}') mirror=$(echo ${entry} | awk '{print $2}')
customer=$(echo ${entry} | awk '{print $3}') for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
if [[ ${?} -eq 124 ]]; then if [[ ${?} -eq 124 ]]; then
size=$((${fork_limit} * 1000000001)) size=$((${fork_limit} * 1000000001))
else else
size=$(echo ${maybesize} | jq -r '.bytes' ) size=$(echo ${maybesize} | jq -r '.bytes' )
fi fi
# If bucket is above 50 GB we fork # If bucket is above 50 GB we fork
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
do_backup ${project} ${mirror} ${bucket} ${customer} & do_backup ${project} ${mirror} ${bucket} &
else else
do_backup ${project} ${mirror} ${bucket} ${customer} do_backup ${project} ${mirror} ${bucket}
fi fi
done done
done done

View file

@ -1,8 +0,0 @@
#!/bin/bash
project="${1}"
mirror="${2}"
bucket="${3}"
customer="${4}"
environment="${5}"
rsync -e "ssh -i ${HOME}/.ssh/id_script" -avz "/opt/backupmounts/${bucket}" "sd-${environment}@sd-${environment}-backup.hb.se:~/sd-${environment}/${bucket}"

View file

@ -20,7 +20,7 @@ for project in $(ls ${data_dir}); do
if [[ "${issixmonths}" == "true" ]]; then if [[ "${issixmonths}" == "true" ]]; then
number_of_full_to_keep=6 number_of_full_to_keep=6
fi fi
max_num_inc=$((50 * number_of_full_to_keep)) max_num_inc=$((32 * number_of_full_to_keep))
max_num_full=$((2 * number_of_full_to_keep)) max_num_full=$((2 * number_of_full_to_keep))
tabular_data=$(cat "${data_dir}/${project}/${bucket}.dat") tabular_data=$(cat "${data_dir}/${project}/${bucket}.dat")

View file

@ -7,16 +7,16 @@ include_userbuckets="<%= @include_userbuckets %>"
container="nextcloud_app_1" container="nextcloud_app_1"
yq="/usr/local/bin/yq" yq="/usr/local/bin/yq"
if ! [[ -x ${yq} ]]; then if ! [[ -x ${yq} ]]; then
pip install yq pip install yq
fi fi
declare -a directories declare -a directories
if [[ -n ${1} ]]; then if [[ -n ${1} ]]; then
directories=("${@}") directories=("${@}")
else else
directories+=("Arbetsmaterial") directories+=("Arbetsmaterial")
directories+=("Bevarande") directories+=("Bevarande")
directories+=("Gallringsbart") directories+=("Gallringsbart")
fi fi
olddir="${PWD}" olddir="${PWD}"
@ -25,31 +25,31 @@ dirty=0
primary='' primary=''
declare -a users=( 'admin' ) declare -a users=( 'admin' )
cd "${tempdir}" || echo "Could not cd to tempdir" cd "${tempdir}" || echo "Could not cd to tempdir"
declare -a projects=( $("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml) ) declare -a projects=( "${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml )
if [[ "${include_userbuckets}" == "true" ]]; then if [[ "${include_userbuckets}" == "true" ]]; then
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml) primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
projects+=( "${primary}" ) projects+=( "${primary}" )
fi fi
for project in "${projects[@]}"; do for project in "${projects[@]}"; do
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do
count=$(rclone size --json "${project}:${bucket}" | jq -r .count) count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
if [[ ${count} -gt 0 ]]; then if [[ ${count} -gt 0 ]]; then
echo "Skipping ${project}:${bucket} because it has stuff in it already" echo "Skipping ${project}:${bucket} because it has stuff in it already"
continue continue
fi fi
for directory in "${directories[@]}"; do for directory in "${directories[@]}"; do
dirty=1 dirty=1
if [[ -n ${primary} ]] && [[ ${project} == "${primary}" ]] ; then if [[ -n ${primary} ]] && [[ ${project} == "${primary}" ]] ; then
user=$(echo "${bucket}" | awk -F '-' '{print $1}') user=$(echo "${bucket}" | awk -F '-' '{print $1}')
users+=( "${user}@${eppn_suffix}" ) users+=( "${user}@${eppn_suffix}" )
fi fi
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty" echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
temp="README.md" temp="README.md"
echo "**${directory}**" >"${temp}" echo "**${directory}**" >"${temp}"
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}" echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}" rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
done done
done done
done done
cd "${olddir}" || echo "could not cd to home dir" cd "${olddir}" || echo "could not cd to home dir"
rmdir "${tempdir}" rmdir "${tempdir}"

View file

@ -5,7 +5,7 @@ shift
include_userbuckets="${1}" include_userbuckets="${1}"
shift shift
environment="<%= @environment %>" environment="<%= @environment %>"
container="nextcloud-${customer}-app-1" container="nextcloud-${customer}_app_1"
yq="/usr/local/bin/yq" yq="/usr/local/bin/yq"
if ! [[ -x ${yq} ]]; then if ! [[ -x ${yq} ]]; then

View file

@ -4,14 +4,14 @@ customer="${1}"
multinode="${2}" multinode="${2}"
environment="<%= @environment %>" environment="<%= @environment %>"
location="${customer}-${environment}" location="${customer}-${environment}"
userjson=$(ssh "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud-${customer}-app-1") userjson=$(ssh "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud-${customer}_app_1")
project="statistics" project="statistics"
bucket="drive-server-coms" bucket="drive-server-coms"
base_dir="${project}:${bucket}" base_dir="${project}:${bucket}"
stat_dir="/opt/statistics" stat_dir="/opt/statistics"
customer_dir="${stat_dir}/${location}" customer_dir="${stat_dir}/${location}"
mkdir -p "${customer_dir}" mkdir -p "${customer_dir}"
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}" rclone mkdir "${base_dir}/${location}"
echo "${userjson}" | jq . >"${customer_dir}/users.json" echo "${userjson}" | jq . >"${customer_dir}/users.json"
status=${?} status=${?}
if [[ ${status} -eq 0 ]]; then if [[ ${status} -eq 0 ]]; then

View file

@ -17,7 +17,7 @@ if [[ "${ENVIRONMENT}" == "prod" ]]; then
fi fi
if [[ "${CUSTOMER}" == "common" ]]; then if [[ "${CUSTOMER}" == "common" ]]; then
customer="" customer=""
types="multinode" types="multinode gss"
fi fi
domain="${customer}drive.${env}sunet.se" domain="${customer}drive.${env}sunet.se"

View file

@ -14,7 +14,7 @@ endpoint = <%= @s3_host_mirror %>
acl = private acl = private
[statistics] [statistics]
type = webdav type = webdav
url = https://89.45.236.246/remote.php/dav/files/_script/ url = https://89.46.20.42/remote.php/dav/files/_script/
vendor = nextcloud vendor = nextcloud
user = _script user = _script
pass = <%= @statistics_secret %> pass = <%= @statistics_secret %>

View file

@ -30,10 +30,7 @@ def add_downtime(fqdn: str,
post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format( post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format(
monitor_host, fqdn, action) monitor_host, fqdn, action)
headers = {'X-Thruk-Auth-Key': apikey} headers = {'X-Thruk-Auth-Key': apikey}
try: requests.post(post_url, data=data, headers=headers)
requests.post(post_url, data=data, headers=headers)
except Exception:
print("Failed to add downtime for {}".format(fqdn))
def remove_downtime(fqdn: str, def remove_downtime(fqdn: str,
@ -45,15 +42,12 @@ def remove_downtime(fqdn: str,
get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format( get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format(
monitor_host, fqdn) monitor_host, fqdn)
headers = {'X-Thruk-Auth-Key': apikey} headers = {'X-Thruk-Auth-Key': apikey}
try: req = requests.get(get_url, headers=headers)
req = requests.get(get_url, headers=headers) action = 'del_active_service_downtimes'
action = 'del_active_service_downtimes' for service in req.json()[0]['services']:
for service in req.json()[0]['services']: post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format( monitor_host, fqdn, urllib.parse.quote(service), action)
monitor_host, fqdn, urllib.parse.quote(service), action) requests.post(post_url, headers=headers)
requests.post(post_url, headers=headers)
except Exception:
print("Failed to remove downtime for {}".format(fqdn))
def main() -> int: def main() -> int:
@ -67,10 +61,14 @@ def main() -> int:
reboot_command = ['sudo /usr/local/bin/safer_reboot'] reboot_command = ['sudo /usr/local/bin/safer_reboot']
if customers[0] == "common": if customers[0] == "common":
customers = ["multinode"] customers = ["gss", "lookup", "multinode"]
for customer in customers: for customer in customers:
backup_type = "backup" backup_type = "backup"
if customer == "multinode": if customer == "gss":
backup_type = "gssbackup"
elif customer == "lookup":
backup_type = "lookupbackup"
elif customer == "multinode":
backup_command = ['sudo /home/script/bin/backup_multinode_db.sh'] backup_command = ['sudo /home/script/bin/backup_multinode_db.sh']
backup_type = "multinode-db" backup_type = "multinode-db"

View file

@ -30,10 +30,7 @@ def add_downtime(fqdn: str,
post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format( post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format(
monitor_host, fqdn, action) monitor_host, fqdn, action)
headers = {'X-Thruk-Auth-Key': apikey} headers = {'X-Thruk-Auth-Key': apikey}
try: requests.post(post_url, data=data, headers=headers)
requests.post(post_url, data=data, headers=headers)
except Exception:
print("Failed to add downtime for: {}".format(fqdn))
def remove_downtime(fqdn: str, def remove_downtime(fqdn: str,
@ -45,15 +42,12 @@ def remove_downtime(fqdn: str,
get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format( get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format(
monitor_host, fqdn) monitor_host, fqdn)
headers = {'X-Thruk-Auth-Key': apikey} headers = {'X-Thruk-Auth-Key': apikey}
try: req = requests.get(get_url, headers=headers)
req = requests.get(get_url, headers=headers) action = 'del_active_service_downtimes'
action = 'del_active_service_downtimes' for service in req.json()[0]['services']:
for service in req.json()[0]['services']: post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format( monitor_host, fqdn, urllib.parse.quote(service), action)
monitor_host, fqdn, urllib.parse.quote(service), action) requests.post(post_url, headers=headers)
requests.post(post_url, headers=headers)
except Exception:
print("Failed to remove downtime for: {}".format(fqdn))
def run_command(command: list) -> tuple: def run_command(command: list) -> tuple:
@ -89,8 +83,9 @@ def main() -> int:
server_type = "node" server_type = "node"
backup_type = "backup" backup_type = "backup"
if customer == "common": if customer == "common":
print("GSS no longer exists, bailing out.") customer = "gss"
sys.exit(0) server_type = "gss"
backup_type = "gssbackup"
backup = build_fqdn(customer, environment, 1, backup_type) backup = build_fqdn(customer, environment, 1, backup_type)
print("\tRunning backup command at {}".format(backup)) print("\tRunning backup command at {}".format(backup))

View file

@ -83,11 +83,7 @@ alias la='ls -A'
alias l='ls -CF' alias l='ls -CF'
if [[ $(hostname) =~ redis ]]; then if [[ $(hostname) =~ redis ]]; then
if [ -f /opt/redis/node-0/server.conf ]; then alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
else
alias redis-cli="redis-cli -a $(grep requirepass /opt/redict/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
fi
fi fi
# Alias definitions. # Alias definitions.

View file

@ -1,31 +0,0 @@
#!/usr/bin/env python3
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--warning',
help='Warning threashold',
required=True)
parser.add_argument('-c', '--critical',
help='Critical threashold',
required=True)
args = parser.parse_args()
warning = int(args.warning)
critical = int(args.critical)
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
days = int(uptime_seconds / 86400)
status = "OK"
exit = 0
if days > warning:
status = "WARNING"
exit = 1
if days > critical:
status = "CRITICAL"
exit = 2
print(f"{status}: uptime {days} days | uptime={days};{warning};{critical};")
sys.exit(exit)

View file

@ -11,7 +11,7 @@ function usage {
exit 1 exit 1
} }
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*-app-1$ ]]; then if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*_app_1$ ]]; then
usage usage
fi fi
if ! [[ ${bucket} =~ ^[a-zA-Z0-9]+ ]]; then if ! [[ ${bucket} =~ ^[a-zA-Z0-9]+ ]]; then
@ -26,7 +26,7 @@ echo "$(date): Start executing create_bucket.sh ${1} ${2} ${3}"
rclone_config="/opt/nextcloud/rclone.conf" rclone_config="/opt/nextcloud/rclone.conf"
if [[ "${container}" != "nextcloud_app_1" ]]; then if [[ "${container}" != "nextcloud_app_1" ]]; then
customer=$(echo "${container}" | sed -e 's/^nextcloud-//' -e 's/-app-1$//') customer=$(echo "${container}" | sed -e 's/^nextcloud-//' -e 's/_app_1$//')
rclone_config="/opt/multinode/${customer}/rclone.conf" rclone_config="/opt/multinode/${customer}/rclone.conf"
fi fi
@ -38,7 +38,7 @@ preexisting="$(docker exec -u www-data -i "${container}" php --define apc.enable
if [[ -z ${preexisting} ]]; then if [[ -z ${preexisting} ]]; then
docker exec -u www-data -i "${container}" php --define apc.enable_cli=1 /var/www/html/occ files_external:create "${user_bucket_name}" \ docker exec -u www-data -i "${container}" php --define apc.enable_cli=1 /var/www/html/occ files_external:create "${user_bucket_name}" \
amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \ amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
-c useMultipartCopy=false amazons3::accesskey --user ${user} amazons3::accesskey --user ${user}
for shareid in $(docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:export ${user} | jq -r '.[].mount_id'); do for shareid in $(docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:export ${user} | jq -r '.[].mount_id'); do
docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:option ${shareid} enable_sharing true docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:option ${shareid} enable_sharing true
done done

View file

@ -7,7 +7,7 @@ bucket=${4}
user=${5} user=${5}
/usr/local/bin/occ files_external:create "${bucket}" \ /usr/local/bin/occ files_external:create "${bucket}" \
amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \ amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
-c useMultipartCopy=false amazons3::accesskey --user "${user}" amazons3::accesskey --user "${user}"
for shareid in $(/usr/local/bin/occ files_external:export "${user}" | jq -r '.[].mount_id'); do for shareid in $(/usr/local/bin/occ files_external:export "${user}" | jq -r '.[].mount_id'); do
/usr/local/bin/occ files_external:option "${shareid}" enable_sharing true /usr/local/bin/occ files_external:option "${shareid}" enable_sharing true
done done

View file

@ -9,7 +9,7 @@ function usage {
exit 1 exit 1
} }
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*-app-1$ ]]; then if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*_app_1$ ]]; then
usage usage
fi fi

View file

@ -2,7 +2,7 @@
container=${1} container=${1}
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*-app-1$ ]]; then if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*_app_1$ ]]; then
echo "Usage: ${0} <nextcloud container name>" echo "Usage: ${0} <nextcloud container name>"
echo "Example : ${0} nextcloud_app_1" echo "Example : ${0} nextcloud_app_1"
exit 1 exit 1