Compare commits

..

9 commits

Author SHA1 Message Date
Micke Nordin 3bd444762b
Add nordu.net 2023-06-08 16:42:35 +02:00
Micke Nordin 4f7a727dd5
Add nordu.net 2023-06-08 16:33:29 +02:00
Micke Nordin b8fd3c22de
Don't expose php 2023-06-08 16:31:26 +02:00
Micke Nordin cbd61f1e9b
Try without verify peer 2023-06-01 13:37:45 +02:00
Micke Nordin f7b728a823
Try to set tcp 2023-06-01 13:25:01 +02:00
Micke Nordin 347d33d96e
Simplify config.ph with redis_seeds 2023-06-01 13:04:46 +02:00
Micke Nordin f5cc63843e
fix name 2023-06-01 12:53:57 +02:00
Micke Nordin 3daa399b06
Reorder config 2023-06-01 12:37:18 +02:00
Micke Nordin 3be9fb986d
Add nce config 2023-06-01 12:28:14 +02:00
80 changed files with 746 additions and 2268 deletions

View file

@ -1,6 +1,6 @@
# Lets determin who the customer is by looking at the hostname # Lets determin who the customer is by looking at the hostname
function sunetdrive::get_customer() >> String { function sunetdrive::get_customer() >> String {
$hostnameparts = split($facts['networking']['fqdn'],'\.') $hostnameparts = split($facts['fqdn'],'\.')
if $hostnameparts[1] == 'drive' { if $hostnameparts[1] == 'drive' {
if $hostnameparts[0] =~ /^gss/ { if $hostnameparts[0] =~ /^gss/ {
return 'gss' return 'gss'

View file

@ -1,6 +1,6 @@
# Lets determin where we are by looking at the hostname # Lets determin where we are by looking at the hostname
function sunetdrive::get_environment() >> String { function sunetdrive::get_environment() >> String {
$hostname = $facts['networking']['fqdn'] $hostname = $facts['fqdn']
if $hostname =~ /^.*\.drive\.sunet\.se$/ { if $hostname =~ /^.*\.drive\.sunet\.se$/ {
if $hostname =~ /^.*\.pilot\.drive\.sunet\.se$/ { if $hostname =~ /^.*\.pilot\.drive\.sunet\.se$/ {
return 'pilot' return 'pilot'

View file

@ -1,4 +1,4 @@
# Lets determin where we are by looking at the hostname # Lets determin where we are by looking at the hostname
function sunetdrive::get_node_number() >> Integer { function sunetdrive::get_node_number() >> Integer {
Integer(regsubst($facts['networking']['fqdn'], /^[a-zA-Z\-]+(\d).*$/, '\\1')) Integer(regsubst($::fqdn, /^[a-zA-Z\-]+(\d).*$/, '\\1'))
} }

View file

@ -5,16 +5,33 @@ define sunetdrive::app_type (
$override_config = undef, $override_config = undef,
$override_compose = undef $override_compose = undef
) { ) {
include sunet::packages::netcat_openbsd
# Config from group.yaml and customer specific conf # Config from group.yaml and customer specific conf
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$customer = sunetdrive::get_customer() $customer = sunetdrive::get_customer()
$nodenumber = sunetdrive::get_node_number() $nodenumber = sunetdrive::get_node_number()
$is_multinode = (($override_config != undef) and ($override_compose != undef)) $is_multinode = (($override_config != undef) and ($override_compose != undef))
if $is_multinode { if $is_multinode {
# The config used
$config = $override_config $config = $override_config
} else {
$config = hiera_hash($environment)
}
# Common settings for multinode and full nodes
$nextcloud_ip = $config['app']
$s3_bucket = $config['s3_bucket']
$s3_host = $config['s3_host']
$site_name = $config['site_name']
$trusted_domains = $config['trusted_domains']
$trusted_proxies = $config['trusted_proxies']
# These are encrypted values from local.eyaml
$gss_jwt_key = safe_hiera('gss_jwt_key')
$smtppassword = safe_hiera('smtp_password')
if $is_multinode {
# Other settings # Other settings
$redis_host = $config['redis_host']
$admin_password = $config[ 'admin_password' ]
$dbhost = $config[ 'dbhost' ] $dbhost = $config[ 'dbhost' ]
$dbname = $config[ 'dbname' ] $dbname = $config[ 'dbname' ]
$dbuser = $config[ 'dbuser' ] $dbuser = $config[ 'dbuser' ]
@ -25,11 +42,23 @@ define sunetdrive::app_type (
$s3_key = $config[ 's3_key' ] $s3_key = $config[ 's3_key' ]
$s3_secret = $config[ 's3_secret' ] $s3_secret = $config[ 's3_secret' ]
$secret = $config[ 'secret' ] $secret = $config[ 'secret' ]
$session_save_handler = 'redis'
$session_save_path = "tcp://${redis_host}:6379?auth=${redis_host_password}"
} else { } else {
# The config used
$config = hiera_hash($environment)
$skeletondirectory = $config['skeletondirectory'] $skeletondirectory = $config['skeletondirectory']
# Other settings # Other settings
$redis_seeds = [
{'host' => "redis1.${site_name}", 'port' => 6379},
{'host' => "redis2.${site_name}", 'port' => 6379},
{'host' => "redis3.${site_name}", 'port' => 6379},
{'host' => "redis1.${site_name}", 'port' => 6380},
{'host' => "redis2.${site_name}", 'port' => 6380},
{'host' => "redis3.${site_name}", 'port' => 6380},
{'host' => "redis1.${site_name}", 'port' => 6381},
{'host' => "redis2.${site_name}", 'port' => 6381},
{'host' => "redis3.${site_name}", 'port' => 6381},
]
$admin_password = safe_hiera('admin_password')
$dbhost = 'proxysql_proxysql_1' $dbhost = 'proxysql_proxysql_1'
$dbname = 'nextcloud' $dbname = 'nextcloud'
$dbuser = 'nextcloud' $dbuser = 'nextcloud'
@ -41,33 +70,13 @@ define sunetdrive::app_type (
$s3_key = safe_hiera('s3_key') $s3_key = safe_hiera('s3_key')
$s3_secret = safe_hiera('s3_secret') $s3_secret = safe_hiera('s3_secret')
$secret = safe_hiera('secret') $secret = safe_hiera('secret')
$session_save_handler = 'rediscluster'
$session_save_path = "seed[]=${redis_seeds[0]['host']}:${redis_seeds[0]['port']}&seed[]=${redis_seeds[1]['host']}:${redis_seeds[1]['port']}&seed[]=${redis_seeds[2]['host']}:${redis_seeds[2]['port']}&seed[]=${redis_seeds[3]['host']}:${redis_seeds[3]['port']}&seed[]=${redis_seeds[4]['host']}:${redis_seeds[4]['port']}&seed[]=${redis_seeds[5]['host']}:${redis_seeds[6]['port']}&seed[]=${redis_seeds[7]['host']}:${redis_seeds[7]['port']}&seed[]=${redis_seeds[8]['host']}:${redis_seeds[8]['port']}&timeout=2&read_timeout=2&failover=error&persistent=1&auth=${redis_cluster_password}"
} }
$twofactor_enforced_groups = $config['twofactor_enforced_groups'] $twofactor_enforced_groups = hiera_array('twofactor_enforced_groups')
$twofactor_enforced_excluded_groups = $config['twofactor_enforced_excluded_groups'] $twofactor_enforced_excluded_groups = hiera_array('twofactor_enforced_excluded_groups')
$nextcloud_version = hiera("nextcloud_version_${environment}") $nextcloud_version = hiera("nextcloud_version_${environment}")
$nextcloud_version_string = split($nextcloud_version, '[-]')[0] $nextcloud_version_string = split($nextcloud_version, '[-]')[0]
# Common settings for multinode and full nodes
$nextcloud_ip = $config['app']
$redis_host = $config['redis_host']
$s3_bucket = $config['s3_bucket']
$s3_host = $config['s3_host']
$site_name = $config['site_name']
$trusted_domains = $config['trusted_domains']
$trusted_proxies = $config['trusted_proxies']
if $location == 'kau-prod' {
$php_memory_limit_mb = 2048
} else {
$php_memory_limit_mb = 512
}
if $::facts['dockerhost2'] == 'yes' {
$hostnet = true
}
# These are encrypted values from local.eyaml
$gss_jwt_key = safe_hiera('gss_jwt_key')
$smtppassword = safe_hiera('smtp_password')
#These are global values from common.yaml #These are global values from common.yaml
$gs_enabled = hiera('gs_enabled') $gs_enabled = hiera('gs_enabled')
$gs_federation = hiera('gs_federation') $gs_federation = hiera('gs_federation')
@ -91,6 +100,14 @@ define sunetdrive::app_type (
$lb_servers = hiera_hash($environment)['lb_servers'] $lb_servers = hiera_hash($environment)['lb_servers']
$document_servers = hiera_hash($environment)['document_servers'] $document_servers = hiera_hash($environment)['document_servers']
file { '/opt/nextcloud/nce.ini':
ensure => file,
force => true,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/application/nce.ini.erb'),
mode => '0644',
}
unless $is_multinode{ unless $is_multinode{
user { 'www-data': ensure => present, system => true } user { 'www-data': ensure => present, system => true }
@ -134,28 +151,14 @@ define sunetdrive::app_type (
group => 'root', group => 'root',
} }
file { '/usr/local/bin/upgrade23-25.sh': file { '/usr/local/bin/upgrade23-25.sh':
ensure => present, ensure => absent,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/upgrade23-25.erb.sh'),
mode => '0744',
}
file { '/usr/local/bin/remount_user_bucket_as_project.sh':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/remount_user_bucket_as_project.sh'),
mode => '0744',
} }
file { '/opt/rotate/conf.d/nextcloud.conf': file { '/opt/rotate/conf.d/nextcloud.conf':
ensure => file, ensure => file,
force => true, force => true,
owner => 'root', owner => 'root',
group => 'root', group => 'root',
content => "#This file is managed by puppet content => "#This file is managed by puppet\n#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n",
#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n/opt/nextcloud/audit.log:180:256\n",
mode => '0644', mode => '0644',
} }
file { '/opt/rotate/conf.d/redis.conf': file { '/opt/rotate/conf.d/redis.conf':
@ -204,14 +207,7 @@ define sunetdrive::app_type (
force => true, force => true,
owner => 'www-data', owner => 'www-data',
group => 'root', group => 'root',
mode => '0640', mode => '0644',
}
file { '/opt/nextcloud/audit.log':
ensure => file,
force => true,
owner => 'www-data',
group => 'root',
mode => '0640',
} }
file { '/opt/nextcloud/rclone.conf': file { '/opt/nextcloud/rclone.conf':
ensure => file, ensure => file,
@ -220,32 +216,6 @@ define sunetdrive::app_type (
content => template('sunetdrive/application/rclone.conf.erb'), content => template('sunetdrive/application/rclone.conf.erb'),
mode => '0644', mode => '0644',
} }
file { '/opt/nextcloud/apache.php.ini':
ensure => file,
force => true,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/application/apache.php.ini.erb'),
mode => '0644',
}
file { '/opt/nextcloud/apcu.ini':
ensure => file,
force => true,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/application/apcu.ini.erb'),
mode => '0644',
}
file { '/opt/nextcloud/cli.php.ini':
ensure => file,
force => true,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/application/cli.php.ini.erb'),
mode => '0644',
}
file { '/usr/local/bin/migrate_external_mounts': file { '/usr/local/bin/migrate_external_mounts':
ensure => file, ensure => file,
force => true, force => true,

View file

@ -38,8 +38,8 @@ define sunetdrive::cloudimage(
resolver => ['130.242.80.14', '130.242.80.99'], resolver => ['130.242.80.14', '130.242.80.99'],
search => $search, search => $search,
# #
repo => $facts['cosmos_repo_origin_url'], repo => $::cosmos_repo_origin_url,
tagpattern => $facts['cosmos_tag_pattern'], tagpattern => $::cosmos_tag_pattern,
# #
cpus => $cpus, cpus => $cpus,
memory => $memory, memory => $memory,

View file

@ -1,53 +0,0 @@
# Common class
class sunetdrive::common {
include sunet::tools
include sunet::motd
include apt
include apparmor
include sunet::packages::jq
if $::facts['sunet_nftables_enabled'] != 'yes' {
warning('Enabling UFW')
include ufw
} else {
if $facts['networking']['hostname'] =~ /^kube[wc]/ {
warning('Setting nftables to installed but disabled')
ensure_resource ('class','sunet::nftables::init', { enabled => false })
} else {
warning('Enabling nftables')
ensure_resource ('class','sunet::nftables::init', { })
}
}
package {'sysstat': ensure => 'latest'}
package {'needrestart': ensure => installed}
service {'sysstat': provider => 'systemd'}
file_line { 'enable_sa':
ensure => 'present',
line => 'ENABLED="true"',
path => '/etc/default/sysstat',
match => 'ENABLED="false"',
require => Package['sysstat'],
}
file_line { 'sa_cron_comment':
ensure => 'present',
line => '# Activity reports every 2 minutes everyday',
path => '/etc/cron.d/sysstat',
match => '^#\ Activity\ reports\ every\ 10\ minutes\ everyday',
require => Package['sysstat'],
notify => Service['sysstat'],
}
file_line { 'sa_cron':
ensure => 'present',
line => '*/2 * * * * root command -v debian-sa1 > /dev/null && debian-sa1 1 1',
path => '/etc/cron.d/sysstat',
match => '^5-55/10',
require => Package['sysstat'],
notify => Service['sysstat'],
}
if $::facts['os']['distro']['id'] == 'Debian' {
exec { 'sysstat_systemd_timer':
command => 'systemctl enable --now sysstat-collect.timer',
unless => 'systemctl is-enabled --quiet sysstat-collect.timer',
}
}
}

View file

@ -4,12 +4,22 @@ define sunetdrive::db_type(
$bootstrap=undef, $bootstrap=undef,
$location=undef, $location=undef,
$override_config = undef, $override_config = undef,
$override_compose = undef, $override_compose = undef)
)
{ {
# Config from group.yaml # Config from group.yaml
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$mariadb_version = hiera("mariadb_version_${environment}") $mariadb_version = hiera("mariadb_version_${environment}")
$is_multinode = (($override_config != undef) and ($override_compose != undef))
if $is_multinode {
$config = $override_config
$mysql_root_password = $config['mysql_root_password']
$mysql_user_password = $config['mysql_user_password']
$backup_password = $config['backup_password']
$mariadb_dir = $config['mariadb_dir']
$mycnf_path = $config['mycnf_path']
$server_id = '1000'
} else {
$config = hiera_hash($environment) $config = hiera_hash($environment)
$mysql_root_password = safe_hiera('mysql_root_password') $mysql_root_password = safe_hiera('mysql_root_password')
$backup_password = safe_hiera('backup_password') $backup_password = safe_hiera('backup_password')
@ -17,33 +27,29 @@ define sunetdrive::db_type(
$mysql_user_password = safe_hiera('mysql_user_password') $mysql_user_password = safe_hiera('mysql_user_password')
$mariadb_dir = '/etc/mariadb' $mariadb_dir = '/etc/mariadb'
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb' $mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
$server_id = 1000 + Integer($facts['networking']['hostname'][-1]) $server_id = 1000 + Integer($facts['hostname'][-1])
ensure_resource('file',$mariadb_dir, { ensure => directory, recurse => true } ) ensure_resource('file',"${mariadb_dir}", { ensure => directory, recurse => true } )
$dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ] $dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
$dirs.each |$dir| { $dirs.each |$dir| {
ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } ) ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
} }
}
$nextcloud_ip = $config['app'] $nextcloud_ip = $config['app']
unless $is_multinode {
$db_ip = $config['db'] $db_ip = $config['db']
$db_ipv6 = $config['db_v6'] $db_ipv6 = $config['db_v6']
$backup_ip = $config['backup'] $backup_ip = $config['backup']
$backup_ipv6 = $config['backup_v6'] $backup_ipv6 = $config['backup_v6']
$ports = [3306, 4444, 4567, 4568] $ports = [3306, 4444, 4567, 4568]
if $location =~ /^multinode/ {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['kube'] + $config['kube_v6']
} elsif $location == 'sunet-test' or $location == 'sunet-prod' {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6']
} else {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6
}
sunet::misc::ufw_allow { 'mariadb_ports': sunet::misc::ufw_allow { 'mariadb_ports':
from => $from, from => $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6,
port => $ports, port => $ports,
} }
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' } sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
}
if $location =~ /^lookup/ { if $location =~ /^lookup/ {
@ -84,17 +90,15 @@ define sunetdrive::db_type(
mode => '0744', mode => '0744',
} }
sunet::scriptherder::cronjob { 'purge_binlogs': sunet::scriptherder::cronjob { 'purge_binlogs':
cmd => '/usr/local/bin/purge-binlogs', cmd => "/usr/local/bin/purge-binlogs",
hour => '6', hour => '6',
minute => '0', minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'], ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'], warn_criteria => ['exit_status=1','max_age=3d'],
} }
file { '/usr/local/bin/mysql': if $is_multinode {
ensure => present, $docker_compose = $override_compose
content => template('sunetdrive/mariadb/mysql.erb.sh'), } else {
mode => '0744',
}
file { '/usr/local/bin/size-test': file { '/usr/local/bin/size-test':
ensure => present, ensure => present,
content => template('sunetdrive/mariadb/size-test.erb'), content => template('sunetdrive/mariadb/size-test.erb'),
@ -105,14 +109,14 @@ define sunetdrive::db_type(
content => template('sunetdrive/mariadb/status-test.erb'), content => template('sunetdrive/mariadb/status-test.erb'),
mode => '0744', mode => '0744',
} }
file { '/etc/sudoers.d/99-size-test': file { "/etc/sudoers.d/99-size-test":
ensure => file, ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/size-test\n", content => "script ALL=(root) NOPASSWD: /usr/local/bin/size-test\n",
mode => '0440', mode => '0440',
owner => 'root', owner => 'root',
group => 'root', group => 'root',
} }
file { '/etc/sudoers.d/99-status-test': file { "/etc/sudoers.d/99-status-test":
ensure => file, ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n", content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n",
mode => '0440', mode => '0440',
@ -126,4 +130,5 @@ define sunetdrive::db_type(
compose_filename => 'docker-compose.yml', compose_filename => 'docker-compose.yml',
description => 'Mariadb server', description => 'Mariadb server',
} }
}
} }

View file

@ -1,3 +1,4 @@
include apt
# Wrapper for sunet::dockerhost to do thiss specific things # Wrapper for sunet::dockerhost to do thiss specific things
class sunetdrive::dockerhost( class sunetdrive::dockerhost(
String $version = safe_hiera('docker_version'), String $version = safe_hiera('docker_version'),
@ -7,8 +8,6 @@ class sunetdrive::dockerhost(
String $docker_args = '', String $docker_args = '',
Optional[String] $docker_dns = undef, Optional[String] $docker_dns = undef,
String $storage_driver = 'aufs', String $storage_driver = 'aufs',
Boolean $write_daemon_config = false,
Boolean $enable_ipv6 = false,
) { ) {
if $version == 'NOT_SET_IN_HIERA' { if $version == 'NOT_SET_IN_HIERA' {
fail('Docker version not set in Hiera') fail('Docker version not set in Hiera')
@ -27,8 +26,6 @@ class sunetdrive::dockerhost(
storage_driver => $storage_driver, storage_driver => $storage_driver,
docker_network => true, # let docker choose a network for the 'docker' bridge docker_network => true, # let docker choose a network for the 'docker' bridge
compose_version => $compose_version, compose_version => $compose_version,
write_daemon_config => $write_daemon_config,
enable_ipv6 => $enable_ipv6,
} }
file { '/etc/apt/preferences.d/containerd.io.pref': file { '/etc/apt/preferences.d/containerd.io.pref':
ensure => absent, ensure => absent,

View file

@ -4,7 +4,7 @@ class sunetdrive::infra_script (
$location = undef $location = undef
) { ) {
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$customer = 'common' $customer = "common"
$config = hiera_hash($environment) $config = hiera_hash($environment)
$gss_backup_server = $config['gss_backup_server'] $gss_backup_server = $config['gss_backup_server']
$lookup_backup_server = $config['lookup_backup_server'] $lookup_backup_server = $config['lookup_backup_server']

View file

@ -1,38 +0,0 @@
#Class for SUNET-Drive-portal-Server
class sunetdrive::jupyter_site (
String $site_version = '0.0.1-1'
) {
$domain = 'jupyter.sunet.dev'
sunet::docker_compose { 'portal_compose':
content => template('sunetdrive/jupyter_site/docker-compose.erb.yaml'),
service_name => 'jupyter_site',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Web server',
}
exec { 'workaround_docker_compose_dir':
command => 'mkdir -p /opt/jupyter_site/nginx',
unless => 'test -d /opt/jupyter_site/nginx',
}
$nginx_dirs = ['acme', 'certs', 'conf', 'dhparam', 'html', 'vhost']
$nginx_dirs.each | $dir| {
file { "/opt/jupyter_site/nginx/${dir}":
ensure => directory,
owner => 'root',
group => 'root',
mode => '0751',
}
}
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
}
# For acme and redirect
sunet::misc::ufw_allow { 'http':
from => '0.0.0.0/0',
port => 80,
}
}

View file

@ -5,27 +5,22 @@ class sunetdrive::lookup (
) { ) {
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$config = lookup($environment, undef, undef, undef)
$public_url = "https://${config['site_name']}"
# Firewall settings # Firewall settings
$nextcloud_ip = hiera_array("${location}_app", []) $nextcloud_ip = hiera_array("${location}_app", [])
$tug_office = hiera_array('tug_office') $tug_office = hiera_array('tug_office')
$dbhost = '127.0.0.1' $dbhost = 'proxysql_proxysql_1'
$gss_jwt_key = safe_hiera('gss_jwt_key') $gss_jwt_key = safe_hiera('gss_jwt_key')
$replication_auth = safe_hiera('replication_auth')
$mysql_user_password = safe_hiera('mysql_user_password') $mysql_user_password = safe_hiera('mysql_user_password')
$lookup_version = hiera("lookup_version_${environment}") $lookup_version = hiera("lookup_version_${environment}")
$email_sender = $config['email_sender']
#Create users #Create users
user { 'www-data': ensure => present, system => true } user { 'www-data': ensure => present, system => true }
file { '/opt/lookup/config.php': file { '/opt/lookup/config.php':
ensure => file, ensure => present,
owner => 'www-data', owner => 'www-data',
group => 'root', group => 'root',
content => template('sunetdrive/lookup/config.php.erb'), content => template('sunetdrive/lookup/config.php.erb'),

View file

@ -4,10 +4,10 @@ class sunetdrive::mariadb (
$location = undef, $location = undef,
$tag_mariadb = undef, $tag_mariadb = undef,
$override_config = undef, $override_config = undef,
$override_compose = undef, $override_compose = undef
) { ) {
$quorum_id = $facts['networking']['fqdn'] $quorum_id = $::fqdn
$quorum_password = safe_hiera('quorum_password') $quorum_password = safe_hiera('quorum_password')
$db = sunetdrive::db_type { 'base_db': $db = sunetdrive::db_type { 'base_db':
bootstrap => $bootstrap, bootstrap => $bootstrap,

View file

@ -1,6 +1,5 @@
# This is a asyncronous replica of the Maria DB Cluster for SUNET Drive # This is a asyncronous replica of the Maria DB Cluster for SUNET Drive
class sunetdrive::mariadb_backup($tag_mariadb=undef, $location=undef) { class sunetdrive::mariadb_backup($tag_mariadb=undef, $location=undef) {
include sunet::packages::netcat_openbsd
$dirs = [ 'datadir', 'init', 'conf', 'backups' ] $dirs = [ 'datadir', 'init', 'conf', 'backups' ]
$dirs.each | $dir | { $dirs.each | $dir | {
ensure_resource('file',"/opt/mariadb_backup/${dir}", { ensure => directory, recurse => true } ) ensure_resource('file',"/opt/mariadb_backup/${dir}", { ensure => directory, recurse => true } )

View file

@ -5,7 +5,7 @@ class sunetdrive::multinode (
) )
{ {
include sunet::packages::yq include sunet::packages::yq
$myname = $facts['networking']['hostname'] $myname = $facts['hostname']
$is_multinode = true; $is_multinode = true;
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$lb_servers = hiera_hash($environment)['lb_servers'] $lb_servers = hiera_hash($environment)['lb_servers']
@ -14,8 +14,6 @@ class sunetdrive::multinode (
$db_ip = hiera_hash($environment)['db'] $db_ip = hiera_hash($environment)['db']
$admin_password = hiera('admin_password') $admin_password = hiera('admin_password')
$cluster_admin_password = hiera('cluster_admin_password') $cluster_admin_password = hiera('cluster_admin_password')
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
$gss_mode = hiera('gss_mode')
$twofactor_enforced_groups = [] $twofactor_enforced_groups = []
$twofactor_enforced_excluded_groups = [] $twofactor_enforced_excluded_groups = []
@ -211,16 +209,14 @@ MACAddressPolicy=none'
hour => '0', hour => '0',
weekday => '0', weekday => '0',
} }
# if $nodenumber == '2' { if $nodenumber == '2' {
# cron { 'add_back_bucket_for_karin_nordgren': cron { 'add_back_bucket_for_karin_nordgren':
# command => '(/usr/local/bin/occ nextcloud-kmh_app_1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh_app_1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true', command => '(/usr/local/bin/occ nextcloud-kmh_app_1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh_app_1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true',
# user => 'root', user => 'root',
# minute => '*/10', minute => '*/10',
# } }
# } }
$customers.each | $index, $customer | { $customers.each | $index, $customer | {
$customer_config_full = hiera_hash($customer)
$customer_config = $customer_config_full[$environment]
cron { "multinode_cron_${customer}": cron { "multinode_cron_${customer}":
command => "/opt/nextcloud/cron.sh nextcloud-${customer}_app_1", command => "/opt/nextcloud/cron.sh nextcloud-${customer}_app_1",
require => File['/opt/nextcloud/cron.sh'], require => File['/opt/nextcloud/cron.sh'],
@ -228,19 +224,11 @@ MACAddressPolicy=none'
minute => '*/10', minute => '*/10',
} }
if $environment == 'prod' { if $environment == 'prod' {
if 'primary_bucket' in $customer_config.keys() {
$s3_bucket = $customer_config['primary_bucket']
} else {
$s3_bucket = "primary-${customer}-drive.sunet.se" $s3_bucket = "primary-${customer}-drive.sunet.se"
}
$site_name = "${customer}.drive.sunet.se" $site_name = "${customer}.drive.sunet.se"
$trusted_proxies = ['lb1.drive.sunet.se','lb2.drive.sunet.se', 'lb3.drive.sunet.se', 'lb4.drive.sunet.se'] $trusted_proxies = ['lb1.drive.sunet.se','lb2.drive.sunet.se', 'lb3.drive.sunet.se', 'lb4.drive.sunet.se']
} else {
if 'primary_bucket' in $customer_config.keys() {
$s3_bucket = $customer_config['primary_bucket']
} else { } else {
$s3_bucket = "primary-${customer}-${environment}.sunet.se" $s3_bucket = "primary-${customer}-${environment}.sunet.se"
}
$site_name = "${customer}.drive.${environment}.sunet.se" $site_name = "${customer}.drive.${environment}.sunet.se"
$trusted_proxies = ["lb1.drive.${environment}.sunet.se","lb2.drive.${environment}.sunet.se", $trusted_proxies = ["lb1.drive.${environment}.sunet.se","lb2.drive.${environment}.sunet.se",
"lb3.drive.${environment}.sunet.se","lb4.drive.${environment}.sunet.se"] "lb3.drive.${environment}.sunet.se","lb4.drive.${environment}.sunet.se"]
@ -249,6 +237,8 @@ MACAddressPolicy=none'
$apache_error_path = "/opt/multinode/${customer}/404.html" $apache_error_path = "/opt/multinode/${customer}/404.html"
$config_php_path = "/opt/multinode/${customer}/config.php" $config_php_path = "/opt/multinode/${customer}/config.php"
$cron_log_path ="/opt/multinode/${customer}/cron.log" $cron_log_path ="/opt/multinode/${customer}/cron.log"
$customer_config_full = hiera_hash($customer)
$customer_config = $customer_config_full[$environment]
$dbhost = 'proxysql_proxysql_1' $dbhost = 'proxysql_proxysql_1'
$dbname = "nextcloud_${customer}" $dbname = "nextcloud_${customer}"
@ -264,12 +254,7 @@ MACAddressPolicy=none'
$mail_from_address = hiera("mail_from_address_${environment}") $mail_from_address = hiera("mail_from_address_${environment}")
$mail_smtphost = hiera("mail_smtphost_${environment}") $mail_smtphost = hiera("mail_smtphost_${environment}")
$nextcloud_log_path ="/opt/multinode/${customer}/nextcloud.log" $nextcloud_log_path ="/opt/multinode/${customer}/nextcloud.log"
$audit_log_path ="/opt/multinode/${customer}/audit.log"
if $customer_config['nextcloud_version'] {
$nextcloud_version = $customer_config['nextcloud_version']
} else {
$nextcloud_version = hiera("nextcloud_version_${environment}") $nextcloud_version = hiera("nextcloud_version_${environment}")
}
$nextcloud_version_string = split($nextcloud_version, '[-]')[0] $nextcloud_version_string = split($nextcloud_version, '[-]')[0]
$rclone_conf_path = "/opt/multinode/${customer}/rclone.conf" $rclone_conf_path = "/opt/multinode/${customer}/rclone.conf"
$redis_conf_dir = "/opt/multinode/${customer}/server" $redis_conf_dir = "/opt/multinode/${customer}/server"
@ -278,14 +263,8 @@ MACAddressPolicy=none'
$s3_host = $customer_config['s3_host'] $s3_host = $customer_config['s3_host']
$s3_usepath = hiera('s3_usepath') $s3_usepath = hiera('s3_usepath')
$smtpuser = hiera("smtp_user_${environment}") $smtpuser = hiera("smtp_user_${environment}")
$trusted_domains = [$site_name, $facts['networking']['fqdn'], 'localhost'] $trusted_domains = [$site_name, $facts['fqdn'], 'localhost']
$tug_office = hiera_array('tug_office') $tug_office = hiera_array('tug_office')
if $customer_config['twofactor_enforced_groups'] {
$twofactor_enforced_groups = $customer_config['twofactor_enforced_groups']
}
if $customer_config['twofactor_enforced_excluded_groups'] {
$twofactor_enforced_excluded_groups = $customer_config['twofactor_enforced_excluded_groups']
}
# Secrets from local.eyaml # Secrets from local.eyaml
$admin_password = safe_hiera("${customer}_admin_password") $admin_password = safe_hiera("${customer}_admin_password")
@ -385,13 +364,6 @@ MACAddressPolicy=none'
group => 'root', group => 'root',
mode => '0644', mode => '0644',
} }
file { $audit_log_path:
ensure => file,
force => true,
owner => 'www-data',
group => 'root',
mode => '0644',
}
file { $rclone_conf_path: file { $rclone_conf_path:
ensure => present, ensure => present,
owner => 'www-data', owner => 'www-data',

View file

@ -14,58 +14,4 @@ class sunetdrive::multinode_db(){
mode => '0744', mode => '0744',
} }
} }
if $facts["networking"]["fqdn"] =~ /^multinode-db1\.drive\.(test\.){1}sunet\.se$/ {
$statistics_secret = safe_hiera('statistics_secret')
notify { 'hostmessage':
message => 'We are on multinode-db1. Set up statistics environment.',
}
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
$local_path = '/tmp/rclone-current-linux-amd64.deb'
exec { 'rclone_deb':
command => "/usr/bin/wget -q ${rclone_url} -O ${local_path}",
creates => $local_path,
}
package { 'rclone':
ensure => installed,
provider => dpkg,
source => $local_path,
require => Exec['rclone_deb'],
}
file { '/root/.rclone.conf':
ensure => file,
content => template('sunetdrive/mariadb_backup/rclone.conf.erb'),
owner => 'root',
group => 'root',
mode => '0600',
}
file { '/root/tasks/listusersbydep.sh':
ensure => file,
content => template('sunetdrive/mariadb/listusersdep.sh.erb'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/genusersondepartmentlists.sh':
ensure => file,
content => template('sunetdrive/mariadb/genuserdeplists.sh.erb'),
owner => 'root',
group => 'root',
mode => '0700',
}
file {'/opt/mariadb/statistics/custdata.json':
ensure => file,
content => template('sunetdrive/mariadb/custconfig.json.erb'),
owner => 'root',
group => 'root',
mode => '0600',
}
sunet::scriptherder::cronjob { 'genuserdeplists':
cmd => '/root/tasks/genusersondepartmentlists.sh',
hour => '2',
minute => '5',
ok_criteria => ['exit_status=0','max_age=30h'],
warn_criteria => ['exit_status=1', 'max_age=60h'],
}
}
} }

View file

@ -1,79 +0,0 @@
# NRPE class
class sunetdrive::nrpe(
$loadw = '15,10,5',
$loadc = '30,25,20',
$procsw = 150,
$procsc = 200,
) {
require apt
class { 'sunet::nagios':
command_timeout => 600,
loadw => $loadw,
loadc => $loadc,
procsw => $procsw,
procsc => $procsc,
}
package {'nagios-plugins-contrib': ensure => latest}
if ($facts['os']['name'] == 'Ubuntu' and versioncmp($facts['os']['release']['full'], '22.04') >= 0 ){
$mem_w = '90'
$mem_c = '95'
} else {
$mem_w = '10'
$mem_c = '5'
}
$checks = ['nrpe_check_memory']
$checks.each |$check| {
ensure_resource("sunet::nagios::${check}", "nagios-nrpe-${check}")
}
sunet::nagios::nrpe_command {'check_entropy':
command_line => '/usr/lib/nagios/plugins/check_entropy -w 256'
}
sunet::nagios::nrpe_command {'check_ntp_time':
command_line => '/usr/lib/nagios/plugins/check_ntp_time -H ntp.se'
}
sunet::nagios::nrpe_command {'check_scriptherder':
command_line => '/usr/local/bin/scriptherder --mode check'
}
sunet::sudoer {'nagios_run_needrestart_command':
user_name => 'nagios',
collection => 'nrpe_needrestart_check',
command_line => '/usr/sbin/needrestart -p -l'
}
sunet::sudoer {'nagios_run_galera_command':
user_name => 'nagios',
collection => 'nrpe_galera_check',
command_line => '/usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
}
sunet::nagios::nrpe_command {'check_galera_cluster':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
}
sunet::sudoer {'nagios_run_proxysql_command':
user_name => 'nagios',
collection => 'nrpe_proxysql_check',
command_line => '/usr/lib/nagios/plugins/check_proxysql_server, /usr/lib/nagios/plugins/check_mysql_server_status'
}
sunet::nagios::nrpe_command {'check_proxysql_server':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_proxysql_server'
}
sunet::sudoer {'nagios_run_replication_command':
user_name => 'nagios',
collection => 'nrpe_replication_check',
command_line => '/usr/local/bin/check_replication'
}
sunet::nagios::nrpe_command {'check_async_replication':
command_line => '/usr/bin/sudo /usr/local/bin/check_replication'
}
sunet::sudoer {'nagios_run_backup_command':
user_name => 'nagios',
collection => 'nrpe_backup_check',
command_line => '/usr/local/bin/check_backups'
}
sunet::nagios::nrpe_command {'check_backups':
command_line => '/usr/bin/sudo /usr/local/bin/check_backups'
}
sunet::nagios::nrpe_command {'check_mysql_server_status':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_mysql_server_status'
}
}

View file

@ -3,8 +3,6 @@ class sunetdrive::onlyoffice () {
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$extra_hosts = hiera_hash($environment)['extra_hosts'] $extra_hosts = hiera_hash($environment)['extra_hosts']
$docker_tag = hiera_hash($environment)['collabora_tag'] $docker_tag = hiera_hash($environment)['collabora_tag']
$token_name = 'Sunet'
$access_token = safe_hiera('collabora_access_token')
$customers = hiera('fullnodes') $customers = hiera('fullnodes')
$multinode_customers = keys(hiera_hash('multinode_mapping')) $multinode_customers = keys(hiera_hash('multinode_mapping'))
if $environment == 'prod' { if $environment == 'prod' {
@ -12,15 +10,11 @@ class sunetdrive::onlyoffice () {
} else { } else {
$domain = 'drive.test.sunet.se' $domain = 'drive.test.sunet.se'
} }
exec { 'collabora_docker_login':
command => "docker login registry.gitlab.collabora.com -u ${token_name} -p ${access_token}",
}
sunet::collabora::docs { 'sunet-onlyoffice': sunet::collabora::docs { 'sunet-onlyoffice':
dns => [ '89.32.32.32' ], dns => [ '89.32.32.32' ],
extra_hosts => $extra_hosts, extra_hosts => $extra_hosts,
extra_volumes => ['/opt/collabora/coolwsd.xml:/etc/coolwsd/coolwsd.xml'], extra_volumes => ['/opt/collabora/coolwsd.xml:/etc/coolwsd/coolwsd.xml'],
docker_tag => $docker_tag, docker_tag => $docker_tag,
docker_image => 'registry.gitlab.collabora.com/productivity/collabora-online-nc',
} }
file {'/opt/collabora/coolwsd.xml': file {'/opt/collabora/coolwsd.xml':
ensure => present, ensure => present,

View file

@ -1,48 +0,0 @@
#Class for SUNET-Drive-portal-Server
class sunetdrive::portal (
String $portal_version = '0.0.1-1'
) {
$environment = sunetdrive::get_environment()
if $environment == 'prod' {
$domain = 'drive.sunet.se'
} else {
$domain = 'drive.test.sunet.se'
}
sunet::docker_compose { 'portal_compose':
content => template('sunetdrive/portal/docker-compose.erb.yaml'),
service_name => 'portal',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Portal server',
}
exec { 'workaround_docker_compose_dir':
command => 'mkdir -p /opt/portal/nginx',
unless => 'test -d /opt/portal/nginx',
}
$nginx_dirs = ['acme', 'certs', 'conf', 'dhparam', 'html', 'vhost']
$nginx_dirs.each | $dir| {
file { "/opt/portal/nginx/${dir}":
ensure => directory,
owner => 'root',
group => 'root',
mode => '0751',
}
}
file { '/opt/portal/config.yaml':
ensure => present,
content => template('sunetdrive/portal/config.erb.yaml'),
mode => '0644',
}
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
}
# For acme and redirect
sunet::misc::ufw_allow { 'http':
from => '0.0.0.0/0',
port => 80,
}
}

View file

@ -26,9 +26,6 @@ class sunetdrive::proxysql (
$mysql_user = safe_hiera('mysql_user') $mysql_user = safe_hiera('mysql_user')
$transaction_persistent = 1 $transaction_persistent = 1
if $::facts['dockerhost2'] == 'yes' {
$hostnet = true
}
file { '/usr/local/bin/proxysql': file { '/usr/local/bin/proxysql':
ensure => file, ensure => file,

View file

@ -17,10 +17,10 @@ class sunetdrive::redis_cluster (
content => template('sunetdrive/redis_cluster/reset_cluster.erb.sh'), content => template('sunetdrive/redis_cluster/reset_cluster.erb.sh'),
mode => '0700', mode => '0700',
} }
exec { 'set_permissions_directory': file { '/root/.bashrc':
command => 'chown -R 999:root /opt/redis/node-*' ensure => present,
} content => template('sunetdrive/redis_cluster/bashrc.erb'),
exec { 'set_permissions_files': mode => '0644',
command => 'chown -R 999:999 /opt/redis/node-*/*'
} }
} }

View file

@ -1,16 +1,12 @@
#Class for SUNET-Drive-Lookup-Server #Class for SUNET-Drive-Lookup-Server
class sunetdrive::reva ( class sunetdrive::reva (
String $domain = 'drive.test.sunet.se', String $domain = '',
String $customer = 'sunet', String $reva_domain = ''
String $reva_domain = "${customer}-reva.${domain}",
String $reva_version = 'v1.26.0',
) { ) {
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$shared_secret = safe_hiera('shared_secret') $shared_secret = safe_hiera('shared_secret')
$statistics_secret = safe_hiera('statistics_secret')
$iopsecret = safe_hiera('iopsecret') $iopsecret = safe_hiera('iopsecret')
$smtp_credentials = safe_hiera('smtp_credentials')
# Firewall settings # Firewall settings
#Create users #Create users
@ -23,17 +19,18 @@ class sunetdrive::reva (
content => template('sunetdrive/reva/revad.toml.erb'), content => template('sunetdrive/reva/revad.toml.erb'),
mode => '0644', mode => '0644',
} }
file { '/opt/reva/rclone.conf':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/reva/rclone.conf.erb'),
mode => '0644',
}
file { '/opt/reva/data': file { '/opt/reva/data':
ensure => directory, ensure => directory,
owner => 'www-data', owner => 'www-data',
} }
file { '/opt/reva/ocm-providers.json':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/reva/ocm-providers.json.erb'),
mode => '0644',
}
sunet::docker_compose { 'drive_reva_docker_compose': sunet::docker_compose { 'drive_reva_docker_compose':
content => template('sunetdrive/reva/docker-compose.yml.erb'), content => template('sunetdrive/reva/docker-compose.yml.erb'),
service_name => 'reva', service_name => 'reva',
@ -41,12 +38,9 @@ class sunetdrive::reva (
compose_filename => 'docker-compose.yml', compose_filename => 'docker-compose.yml',
description => 'Sciencemesh reva server', description => 'Sciencemesh reva server',
} }
$ports = [443,19000]
$ports.each | $port|{
sunet::misc::ufw_allow { "reva_${port}":
from => '0.0.0.0/0',
port => $port,
}
}
sunet::misc::ufw_allow { 'https_reva':
from => '0.0.0.0/0',
port => 443,
}
} }

View file

@ -18,9 +18,7 @@ class sunetdrive::script (
$backup_server = $config['backup_server'] $backup_server = $config['backup_server']
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb' $rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
$local_path = '/tmp/rclone-current-linux-amd64.deb' $local_path = '/tmp/rclone-current-linux-amd64.deb'
$singlenodes = lookup('singlenodes') $singlenodes = hiera('singlenodes')
$multinodes = keys(lookup('multinode_mapping'))
$extra_backup_jobs = pick($config['extra_backup_jobs'], {})
if $customer == 'mdu' { if $customer == 'mdu' {
$eppn_suffix = 'mdh.se' $eppn_suffix = 'mdh.se'
@ -56,17 +54,6 @@ class sunetdrive::script (
} }
$site_name = $config['site_name'] $site_name = $config['site_name']
$user_bucket_name = $config['user_bucket_name'] $user_bucket_name = $config['user_bucket_name']
if $config['user_scans'] {
$config['user_scans'].each |$job| {
sunet::scriptherder::cronjob { $job['name']:
cmd => "ssh -t -l script ${job['server']} /usr/bin/sudo /usr/local/bin/occ ${job['container']} files:scan ${job['user']}",
hour => $job['hour'],
minute => $job['minute'],
ok_criteria => ['exit_status=0','max_age=1d'],
warn_criteria => ['exit_status=1','max_age=2d'],
}
}
}
# It is a start that will get us user buckets and primary buckets # It is a start that will get us user buckets and primary buckets
$backup_projects = $location $backup_projects = $location
@ -80,11 +67,7 @@ class sunetdrive::script (
source => $local_path, source => $local_path,
require => Exec['rclone_deb'], require => Exec['rclone_deb'],
} }
package { 'fuse3': package { 'python3.9':
ensure => installed,
provider => apt,
}
package { 'python3':
ensure => installed, ensure => installed,
provider => apt, provider => apt,
} }
@ -92,29 +75,11 @@ class sunetdrive::script (
ensure => installed, ensure => installed,
provider => apt, provider => apt,
} }
package { 'xmlstarlet':
ensure => installed,
provider => apt,
}
$drive_version = '0.3.1' $drive_version = '0.3.1'
if $facts['os']['distro']['id'] == 'Debian' {
$pip_cmd = 'pip3 install --break-system-packages'
} else {
$pip_cmd = 'python3 -m pip install'
}
exec { 'drive-utils': exec { 'drive-utils':
command => "${pip_cmd} https://pypi.sunet.se/packages/drive-utils-${drive_version}.tar.gz", command => "python3.9 -m pip install https://pypi.sunet.se/packages/drive-utils-${drive_version}.tar.gz",
unless => "python3 -m pip list | grep drive-utils | grep ${drive_version}", unless => "python3.9 -m pip list | grep drive-utils | grep ${drive_version}",
require => Package['python3'], require => Package['python3.9'],
}
file { '/opt/backups':
ensure => directory,
mode => '0700'
}
file { '/opt/backups/scripts':
ensure => directory,
mode => '0700'
} }
file { '/root/.ssh/': file { '/root/.ssh/':
ensure => directory, ensure => directory,
@ -265,13 +230,8 @@ class sunetdrive::script (
warn_criteria => ['exit_status=1','max_age=31d'], warn_criteria => ['exit_status=1','max_age=31d'],
} }
} }
# Opt out of userbuckets, also customers that ended the contract # Opt out of userbuckets
if $customer in ['extern', 'gih', 'hkr', 'suni', 'common', 'su', 'lnu'] { unless $customer in ['extern', 'gih', 'suni', 'common'] {
sunet::scriptherder::cronjob { 'makebuckets':
ensure => absent,
cmd => 'bin/true',
}
} else {
sunet::scriptherder::cronjob { 'makebuckets': sunet::scriptherder::cronjob { 'makebuckets':
cmd => '/root/tasks/makebuckets.sh', cmd => '/root/tasks/makebuckets.sh',
minute => '*/5', minute => '*/5',
@ -337,16 +297,6 @@ class sunetdrive::script (
} }
} }
if $customer == 'common' { if $customer == 'common' {
$multinode_passwords = $multinodes.map | $index, $customer | {
safe_hiera("${customer}_admin_app_password")
}
file { '/root/tasks/announce.sh':
ensure => file,
content => template('sunetdrive/script/multinodeannounce.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/backupmultinodedb.sh': file { '/root/tasks/backupmultinodedb.sh':
ensure => file, ensure => file,
content => template('sunetdrive/script/backupmultinodedb.erb.sh'), content => template('sunetdrive/script/backupmultinodedb.erb.sh'),
@ -354,13 +304,6 @@ class sunetdrive::script (
group => 'root', group => 'root',
mode => '0700', mode => '0700',
} }
file { '/opt/backups/scripts/hb.sh':
ensure => file,
content => template('sunetdrive/script/backup-hb.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
sunet::scriptherder::cronjob { 'backupmultinodedb': sunet::scriptherder::cronjob { 'backupmultinodedb':
cmd => '/root/tasks/backupmultinodedb.sh', cmd => '/root/tasks/backupmultinodedb.sh',
hour => '2', hour => '2',
@ -434,21 +377,6 @@ class sunetdrive::script (
warn_criteria => ['exit_status=1','max_age=3d'], warn_criteria => ['exit_status=1','max_age=3d'],
} }
} else { } else {
$admin_app_password = safe_hiera('admin_app_password')
file { '/root/tasks/announce.sh':
ensure => file,
content => template('sunetdrive/script/announce.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/delete_announcement_with_subject.sh':
ensure => file,
content => template('sunetdrive/script/delete_announcement_with_subject.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/backupmultinodedb.sh': file { '/root/tasks/backupmultinodedb.sh':
ensure => absent, ensure => absent,
} }

View file

@ -23,6 +23,7 @@ class sunetdrive::scriptreceiver()
owner => 'script', owner => 'script',
group => 'script', group => 'script',
} }
$kano_shell = ['89.46.21.246','2001:6b0:6c::1bc'] $kano_shell = ['89.46.21.246','2001:6b0:6c::1bc']
sunet::misc::ufw_allow { 'script_port': sunet::misc::ufw_allow { 'script_port':
from => $script_ipv4 + $script_ipv6 + $kano_shell, from => $script_ipv4 + $script_ipv6 + $kano_shell,
@ -48,28 +49,14 @@ class sunetdrive::scriptreceiver()
owner => 'root', owner => 'root',
group => 'root', group => 'root',
} }
file { '/usr/local/bin/get_drive_customers':
ensure => file,
content => template('sunetdrive/scriptreceiver/get_drive_customers.erb.sh'),
mode => '0744',
owner => 'root',
group => 'root',
}
file { '/usr/local/bin/safer_reboot': file { '/usr/local/bin/safer_reboot':
ensure => file, ensure => file,
content => template('sunetdrive/scriptreceiver/safer_reboot.erb'), content => template('sunetdrive/scriptreceiver/safer_reboot.erb'),
mode => '0744', mode => '0740',
owner => 'root', owner => 'root',
group => 'root', group => 'root',
} }
file { '/root/.bashrc': file { "/etc/sudoers.d/99-safer_reboot":
ensure => file,
content => template('sunetdrive/scriptreceiver/bashrc.erb.sh'),
mode => '0644',
owner => 'root',
group => 'root',
}
file { '/etc/sudoers.d/99-safer_reboot':
ensure => file, ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/safer_reboot\n", content => "script ALL=(root) NOPASSWD: /usr/local/bin/safer_reboot\n",
mode => '0440', mode => '0440',

View file

@ -7,7 +7,7 @@ class sunetdrive::sitemonitornaemon() {
$tls_servers_with_port = hiera_array('tls_servers_with_port') $tls_servers_with_port = hiera_array('tls_servers_with_port')
$nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0] $nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0]
$nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0] $nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0]
$monitorhost = $facts['networking']['fqdn'] $monitorhost = $::fqdn
$environment = sunetdrive::get_environment() $environment = sunetdrive::get_environment()
$influx_passwd = safe_hiera('influx_passwd') $influx_passwd = safe_hiera('influx_passwd')
$slack_url = safe_hiera('slack_url') $slack_url = safe_hiera('slack_url')
@ -42,15 +42,11 @@ class sunetdrive::sitemonitornaemon() {
} }
file { '/etc/naemon/conf.d/sunetdrive_thruk_templates.conf': file { '/etc/naemon/conf.d/sunetdrive_thruk_templates.conf':
ensure => present, ensure => present,
owner => 'naemon',
group => 'naemon',
content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'), content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'),
mode => '0644', mode => '0644',
} }
nagioscfg::service {'check_scriptherder':
hostgroup_name => ['sunetdrive::nrpe'],
check_command => 'check_nrpe_1arg_to30!check_scriptherder',
description => 'Scriptherder Status',
contact_groups => ['naemon-admins'],
}
nagioscfg::service {'check_galera_cluster': nagioscfg::service {'check_galera_cluster':
hostgroup_name => ['galera_monitor'], hostgroup_name => ['galera_monitor'],
check_command => 'check_nrpe_1arg!check_galera_cluster', check_command => 'check_nrpe_1arg!check_galera_cluster',

View file

@ -1,10 +0,0 @@
# Class for Ubuntu 20.04
class sunetdrive::ubuntu_2004() {
if $facts['os']['name'] == 'Ubuntu' and $facts['os']['distro']['release']['full'] == '20.04' {
# Hide deprecation warnings for Ubuntu 2004
file_line {'env_rubyopt':
path => '/etc/environment',
line => 'RUBYOPT=\'-W0\'',
}
}
}

View file

@ -1,14 +1,13 @@
Welcome to your personal space for research data storage! Hello and welcome to your personal space for research data storage,
Researchers and PhD-students at MDU have access to a free storage space of 200 GB. It is important that research data is managed in a secure and careful manner, so that it is protected from damage and destruction. Mälardalen University offers its researchers and doctoral students a secure and stable IT service that makes it possible to store research data. Furthermore, it enables research collaboration internally and externally since researchers can easily share their data within the project or with other individual researchers at MDU in a secure way. This technical infrastructure entails a good collaboration space for researchers, a secure handling of data, the possibility for the researcher to be able to store data in such a way that data management according to the FAIR principles can be complied with at MDU.
Your storage space is in the “Your storage space” folder that appears a few minutes after opening SUNET Drive for the first time. To meet the general need for research data storage, MDU has purchased Sunet Drive's S3 research data storage service.  Each researcher will be offered a space of 200GB per person. To simplify administration, all spaces will be appropriated in advance in the same way, regardless of the research domain or total amount of research data.
The “Your storage space” folder has an ABG structure which means that there are folders for Arbetsmaterial (work material), Bevarande (retention) and Gallringsbart (disposable). 200 GB will not correspond to all of MDU researchers' data storage needs; therefore, the researcher will have the possibility to purchase additional data space via a digital form. The additional purchase function is done by internal billing and the purchased space is then visible at the next login in the researcher's personal space.   
It is possible to create subfolders within these main folders themselves. If there is a need for a specific project space for research data storage and/or sharing, this is also possible. Such a space is created by an administrator after ordering project space through the same form as when purchasing additional storage space.  The project space will then be visible to project members in the personal storage space at the next login.
All research data, except for military material and security-classified information, can be stored on SUNET Drive. Can a MDU student be included in the research project? It is possible to assign a student a time-limited access/sharing opportunity to specific folders in either the project space or the researcher's personal space. Contact the Data Access Unit (DAU) to enable such a feature.
If your data needs to undergo an export control, please refer to the check list on the internal portal.
A user guide and a FAQ regarding SUNET Drive is available on the internal portal. If you have any questions, please contact dau@mdu.se If you have any questions about the storage space, please contact dau@mdu.se.

View file

@ -1,160 +0,0 @@
[PHP]
allow_url_fopen = On
allow_url_include = Off
auto_append_file =
auto_globals_jit = On
auto_prepend_file =
default_charset = "UTF-8"
default_mimetype = "text/html"
default_socket_timeout = 60
disable_classes =
disable_functions = pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_get_handler,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,pcntl_async_signals,pcntl_unshare,
display_errors = Off
display_startup_errors = Off
doc_root =
enable_dl = Off
engine = On
error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
expose_php = Off
file_uploads = On
ignore_repeated_errors = Off
ignore_repeated_source = Off
implicit_flush = Off
log_errors = On
log_errors_max_len = 1024
max_execution_time = 86400
max_file_uploads = 20
max_input_time = 86400
memory_limit = <%= @php_memory_limit_mb %>M
output_buffering = Off
post_max_size = 30G
precision = 14
register_argc_argv = Off
report_memleaks = On
request_order = "GP"
serialize_precision = -1
short_open_tag = Off
unserialize_callback_func =
upload_max_filesize = 30G
user_dir =
variables_order = "GPCS"
zend.enable_gc = On
zend.exception_ignore_args = On
zlib.output_compression = Off
<% if @customer == "kau" -%>
upload_tmp_dir = /opt/tmp/
<% end %>
[CLI Server]
cli_server.color = On
[Date]
; Nothing here
[filter]
; Nothing here
[iconv]
; Nothing here
[imap]
; Nothing here
[intl]
; Nothing here
[sqlite3]
; Nothing here
[Pcre]
; Nothing here
[Pdo]
; Nothing here
[Pdo_mysql]
pdo_mysql.default_socket=
[Phar]
; Nothing here
[mail function]
SMTP = localhost
smtp_port = 25
mail.add_x_header = Off
[ODBC]
odbc.allow_persistent = On
odbc.check_persistent = On
odbc.max_persistent = -1
odbc.max_links = -1
odbc.defaultlrl = 4096
odbc.defaultbinmode = 1
[MySQLi]
mysqli.max_persistent = -1
mysqli.allow_persistent = On
mysqli.max_links = -1
mysqli.default_port = 3306
mysqli.default_socket =
mysqli.default_host =
mysqli.default_user =
mysqli.default_pw =
mysqli.reconnect = Off
[mysqlnd]
mysqlnd.collect_statistics = On
mysqlnd.collect_memory_statistics = Off
[OCI8]
; Nothing here
[PostgreSQL]
pgsql.allow_persistent = On
pgsql.auto_reset_persistent = Off
pgsql.max_persistent = -1
pgsql.max_links = -1
pgsql.ignore_notice = 0
pgsql.log_notice = 0
[bcmath]
bcmath.scale = 0
[browscap]
; Nothing here
[Session]
session.save_handler = files
session.use_strict_mode = 0
session.use_cookies = 1
session.use_only_cookies = 1
session.name = PHPSESSID
session.auto_start = 0
session.cookie_lifetime = 0
session.cookie_path = /
session.cookie_domain =
session.cookie_httponly =
session.cookie_samesite =
session.serialize_handler = php
session.gc_probability = 0
session.gc_divisor = 1000
session.gc_maxlifetime = 1440
session.referer_check =
session.cache_limiter = nocache
session.cache_expire = 180
session.use_trans_sid = 0
session.sid_length = 26
session.trans_sid_tags = "a=href,area=href,frame=src,form="
session.sid_bits_per_character = 5
[Assertion]
zend.assertions = -1
[COM]
; Nothing here
[mbstring]
; Nothing here
[gd]
; Nothing here
[exif]
; Nothing here
[Tidy]
tidy.clean_output = Off
[soap]
soap.wsdl_cache_enabled=1
soap.wsdl_cache_dir="/tmp"
soap.wsdl_cache_ttl=86400
soap.wsdl_cache_limit = 5
[sysvshm]
; Nothing here
[ldap]
ldap.max_links = -1
[dba]
; Nothing here
[opcache]
opcache.interned_strings_buffer=32
[curl]
; Nothing here
[openssl]
; Nothing here
[ffi]
; Nothing here

View file

@ -1,2 +0,0 @@
extension=apcu.so
apc.enable_cli=1

View file

@ -1,158 +0,0 @@
[PHP]
allow_url_fopen = On
allow_url_include = Off
auto_append_file =
auto_globals_jit = On
auto_prepend_file =
default_charset = "UTF-8"
default_mimetype = "text/html"
default_socket_timeout = 60
disable_classes =
disable_functions =
display_errors = Off
display_startup_errors = Off
doc_root =
enable_dl = Off
engine = On
error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
expose_php = On
file_uploads = On
ignore_repeated_errors = Off
ignore_repeated_source = Off
implicit_flush = Off
log_errors = On
log_errors_max_len = 1024
max_execution_time = 86400
max_file_uploads = 20
max_input_time = 86400
memory_limit = -1
output_buffering = Off
post_max_size = 16G
precision = 14
register_argc_argv = Off
report_memleaks = On
request_order = "GP"
serialize_precision = -1
short_open_tag = Off
unserialize_callback_func =
upload_max_filesize = 16G
user_dir =
variables_order = "GPCS"
zend.enable_gc = On
zend.exception_ignore_args = On
zlib.output_compression = Off
[CLI Server]
cli_server.color = On
[Date]
; Nothing here
[filter]
; Nothing here
[iconv]
; Nothing here
[imap]
; Nothing here
[intl]
; Nothing here
[sqlite3]
; Nothing here
[Pcre]
; Nothing here
[Pdo]
; Nothing here
[Pdo_mysql]
pdo_mysql.default_socket=
[Phar]
; Nothing here
[mail function]
SMTP = localhost
smtp_port = 25
mail.add_x_header = Off
[ODBC]
odbc.allow_persistent = On
odbc.check_persistent = On
odbc.max_persistent = -1
odbc.max_links = -1
odbc.defaultlrl = 4096
odbc.defaultbinmode = 1
[MySQLi]
mysqli.max_persistent = -1
mysqli.allow_persistent = On
mysqli.max_links = -1
mysqli.default_port = 3306
mysqli.default_socket =
mysqli.default_host =
mysqli.default_user =
mysqli.default_pw =
mysqli.reconnect = Off
[mysqlnd]
mysqlnd.collect_statistics = On
mysqlnd.collect_memory_statistics = Off
[OCI8]
; Nothing here
[PostgreSQL]
pgsql.allow_persistent = On
pgsql.auto_reset_persistent = Off
pgsql.max_persistent = -1
pgsql.max_links = -1
pgsql.ignore_notice = 0
pgsql.log_notice = 0
[bcmath]
bcmath.scale = 0
[browscap]
; Nothing here
[Session]
session.save_handler = files
session.use_strict_mode = 0
session.use_cookies = 1
session.use_only_cookies = 1
session.name = PHPSESSID
session.auto_start = 0
session.cookie_lifetime = 0
session.cookie_path = /
session.cookie_domain =
session.cookie_httponly =
session.cookie_samesite =
session.serialize_handler = php
session.gc_probability = 0
session.gc_divisor = 1000
session.gc_maxlifetime = 1440
session.referer_check =
session.cache_limiter = nocache
session.cache_expire = 180
session.use_trans_sid = 0
session.sid_length = 26
session.trans_sid_tags = "a=href,area=href,frame=src,form="
session.sid_bits_per_character = 5
[Assertion]
zend.assertions = -1
[COM]
; Nothing here
[mbstring]
; Nothing here
[gd]
; Nothing here
[exif]
; Nothing here
[Tidy]
tidy.clean_output = Off
[soap]
soap.wsdl_cache_enabled=1
soap.wsdl_cache_dir="/tmp"
soap.wsdl_cache_ttl=86400
soap.wsdl_cache_limit = 5
[sysvshm]
; Nothing here
[ldap]
ldap.max_links = -1
[dba]
; Nothing here
[opcache]
opcache.interned_strings_buffer=16
opcache.validate_timestamps=0
opcache.memory_consumption=128
[curl]
; Nothing here
[openssl]
; Nothing here
[ffi]
; Nothing here

View file

@ -3,6 +3,7 @@
config_php='/var/www/html/config/config.php' config_php='/var/www/html/config/config.php'
dbhost="<%= @dbhost %>" dbhost="<%= @dbhost %>"
mysql_user_password="<%= @mysql_user_password %>" mysql_user_password="<%= @mysql_user_password %>"
admin_password="<%= @admin_password %>"
location="<%= @location %>" location="<%= @location %>"
bucket="<%= @s3_bucket %>" bucket="<%= @s3_bucket %>"
@ -13,9 +14,6 @@ if [[ "${user_input}" == "IKnowWhatIAmDoing" ]]; then
echo "WARNING: This will delete everything in the database and reinstall Nextcloud." echo "WARNING: This will delete everything in the database and reinstall Nextcloud."
echo "You have 10 seconds to abort by hitting CTRL/C" echo "You have 10 seconds to abort by hitting CTRL/C"
sleep 10s sleep 10s
echo "Setting temp admin password"
apt update && apt install -y apg
admin_password="$(apg -m 40 | head -1)"
echo "Ok, proceeding." echo "Ok, proceeding."
echo "Dropping database in 3 seconds" echo "Dropping database in 3 seconds"
sleep 3s sleep 3s
@ -50,11 +48,6 @@ EOF
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}') instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}') secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}') passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
echo "Now delete the admin user:"
echo " occ user:delete admin"
echo "and then create a new admin user:"
echo " /usr/local/bin/add_admin_user <username> <email address>"
echo ""
echo "Please use edit-secrets to add these variables to all Nextcloud servers:" echo "Please use edit-secrets to add these variables to all Nextcloud servers:"
echo "instanceid: DEC::PKCS7[${instanceid}]!" echo "instanceid: DEC::PKCS7[${instanceid}]!"
echo "secret: DEC::PKCS7[${secret}]!" echo "secret: DEC::PKCS7[${secret}]!"

View file

@ -24,12 +24,7 @@ $CONFIG = array (
'config_is_read_only' => true, 'config_is_read_only' => true,
'csrf.disabled' => true, 'csrf.disabled' => true,
'datadirectory' => '/var/www/html/data', 'datadirectory' => '/var/www/html/data',
'davstorage.request_timeout' => 86401,
<%- if @hostnet -%>
'dbhost' => '127.0.0.1',
<%- else -%>
'dbhost' => '<%= @dbhost %>', 'dbhost' => '<%= @dbhost %>',
<%- end -%>
'dbname' => '<%= @dbname %>', 'dbname' => '<%= @dbname %>',
'dbpassword' => '<%= @mysql_user_password %>', 'dbpassword' => '<%= @mysql_user_password %>',
'dbport' => '3306', 'dbport' => '3306',
@ -43,7 +38,6 @@ $CONFIG = array (
<% if @location == 'sunet-test' -%> <% if @location == 'sunet-test' -%>
'filelocking.debug' => true, 'filelocking.debug' => true,
<% end -%> <% end -%>
'files_external_allow_create_new_local' => false,
'forcessl' => true, 'forcessl' => true,
'gs.enabled' => '<%= @gs_enabled %>', 'gs.enabled' => '<%= @gs_enabled %>',
'gs.federation' => '<%= @gs_federation %>', 'gs.federation' => '<%= @gs_federation %>',
@ -63,12 +57,11 @@ $CONFIG = array (
'gss.master.url' => '<%= @gss_master_url %>', 'gss.master.url' => '<%= @gss_master_url %>',
'gss.mode' => '<%= @gss_mode %>', 'gss.mode' => '<%= @gss_mode %>',
'gss.user.discovery.module' => '\\OCA\\GlobalSiteSelector\\UserDiscoveryModules\\ManualUserMapping', 'gss.user.discovery.module' => '\\OCA\\GlobalSiteSelector\\UserDiscoveryModules\\ManualUserMapping',
'gss.username_format' => 'sanitize',
'installed' => true, 'installed' => true,
'instanceid' => '<%= @instanceid %>', 'instanceid' => '<%= @instanceid %>',
'integrity.check.disabled' => true, 'integrity.check.disabled' => true,
'log_type' => 'file', 'log_type' => 'file',
'loglevel' => 1, 'loglevel' => 0,
'lookup_server' => '<%= @lookup_server %>', 'lookup_server' => '<%= @lookup_server %>',
'mail_domain' => '<%= @mail_domain %>', 'mail_domain' => '<%= @mail_domain %>',
'mail_from_address' => '<%= @mail_from_address %>', 'mail_from_address' => '<%= @mail_from_address %>',
@ -97,7 +90,6 @@ $CONFIG = array (
'region' => 'us-east-1', 'region' => 'us-east-1',
'hostname' => '<%= @s3_host %>', 'hostname' => '<%= @s3_host %>',
'port' => '', 'port' => '',
'useMultipartCopy' => false,
'objectPrefix' => 'urn:oid:', 'objectPrefix' => 'urn:oid:',
'autocreate' => false, 'autocreate' => false,
'use_ssl' => true, 'use_ssl' => true,
@ -109,75 +101,15 @@ $CONFIG = array (
'overwritehost' => '<%= @site_name %>', 'overwritehost' => '<%= @site_name %>',
'overwriteprotocol' => 'https', 'overwriteprotocol' => 'https',
'passwordsalt' => '<%= @passwordsalt %>', 'passwordsalt' => '<%= @passwordsalt %>',
<% if @location == 'gss-test' -%> <% if ! @is_multinode -%>
'redis.cluster' => [ 'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR, 'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>', 'password' => '<%= @redis_cluster_password %>',
'read_timeout' => 0.0, 'read_timeout' => 0.0,
'seeds' => [ 'seeds' => [
'redis1.drive.test.sunet.se:6379', <% @redis_seeds.each do |seed| -%>
'redis2.drive.test.sunet.se:6379', '<%= seed['host'] %>:<%= seed['port'] %>',
'redis3.drive.test.sunet.se:6379', <% end -%>
'redis1.drive.test.sunet.se:6380',
'redis2.drive.test.sunet.se:6380',
'redis3.drive.test.sunet.se:6380',
'redis1.drive.test.sunet.se:6381',
'redis2.drive.test.sunet.se:6381',
'redis3.drive.test.sunet.se:6381'
],
'timeout' => 1.1
],
<% elsif @location == 'gss-prod' -%>
'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>',
'read_timeout' => 0.0,
'seeds' => [
'redis1.drive.sunet.se:6379',
'redis2.drive.sunet.se:6379',
'redis3.drive.sunet.se:6379',
'redis1.drive.sunet.se:6380',
'redis2.drive.sunet.se:6380',
'redis3.drive.sunet.se:6380',
'redis1.drive.sunet.se:6381',
'redis2.drive.sunet.se:6381',
'redis3.drive.sunet.se:6381'
],
'timeout' => 1.1
],
<% elsif @environment == 'test' && ! @is_multinode -%>
'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>',
'read_timeout' => 0.0,
'seeds' => [
'redis1.<%= @customer %>.drive.test.sunet.se:6379',
'redis2.<%= @customer %>.drive.test.sunet.se:6379',
'redis3.<%= @customer %>.drive.test.sunet.se:6379',
'redis1.<%= @customer %>.drive.test.sunet.se:6380',
'redis2.<%= @customer %>.drive.test.sunet.se:6380',
'redis3.<%= @customer %>.drive.test.sunet.se:6380',
'redis1.<%= @customer %>.drive.test.sunet.se:6381',
'redis2.<%= @customer %>.drive.test.sunet.se:6381',
'redis3.<%= @customer %>.drive.test.sunet.se:6381'
],
'timeout' => 1.1
],
<% elsif @environment == 'prod' && ! @is_multinode && @location != 'gss-prod' -%>
'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>',
'read_timeout' => 0.0,
'seeds' => [
'redis1.<%= @customer %>.drive.sunet.se:6379',
'redis2.<%= @customer %>.drive.sunet.se:6379',
'redis3.<%= @customer %>.drive.sunet.se:6379',
'redis1.<%= @customer %>.drive.sunet.se:6380',
'redis2.<%= @customer %>.drive.sunet.se:6380',
'redis3.<%= @customer %>.drive.sunet.se:6380',
'redis1.<%= @customer %>.drive.sunet.se:6381',
'redis2.<%= @customer %>.drive.sunet.se:6381',
'redis3.<%= @customer %>.drive.sunet.se:6381'
], ],
'timeout' => 1.1 'timeout' => 1.1
], ],
@ -213,24 +145,19 @@ $CONFIG = array (
'twofactor_enforced_groups' => 'twofactor_enforced_groups' =>
array ( array (
0 => 'admin', 0 => 'admin',
1 => 'forcemfa', <%- index = 1 -%>
<%- if @twofactor_enforced_groups -%>
<%- index = 2 -%>
<%- @twofactor_enforced_groups.each do |item| -%> <%- @twofactor_enforced_groups.each do |item| -%>
<%= index %> => '<%= item %>', <%= index %> => '<%= item %>',
<%- index += 1 -%> <%- index += 1 -%>
<%- end -%> <%- end -%>
<%- end -%>
), ),
'twofactor_enforced_excluded_groups' => 'twofactor_enforced_excluded_groups' =>
array ( array (
<%- if @twofactor_enforced_excluded_groups -%>
<%- index = 0 -%> <%- index = 0 -%>
<%- @twofactor_enforced_excluded_groups.each do |item| -%> <%- @twofactor_enforced_excluded_groups.each do |item| -%>
<%= index %> => '<%= item %>', <%= index %> => '<%= item %>',
<%- index += 1 -%> <%- index += 1 -%>
<%- end -%> <%- end -%>
<%- end -%>
), ),
'updatechecker' => false, 'updatechecker' => false,
'version' => '<%= @nextcloud_version_string %>', 'version' => '<%= @nextcloud_version_string %>',

View file

@ -5,21 +5,14 @@ services:
app: app:
image: docker.sunet.se/drive/nextcloud-custom:<%= @nextcloud_version %> image: docker.sunet.se/drive/nextcloud-custom:<%= @nextcloud_version %>
restart: always restart: always
container_name: nextcloud_app_1
<%- if @hostnet -%>
network_mode: host
<%- end -%>
volumes: volumes:
- /opt/nextcloud/000-default.conf:/etc/apache2/sites-enabled/000-default.conf - /opt/nextcloud/000-default.conf:/etc/apache2/sites-enabled/000-default.conf
- /opt/nextcloud/mpm_prefork.conf:/etc/apache2/mods-available/mpm_prefork.conf - /opt/nextcloud/mpm_prefork.conf:/etc/apache2/mods-available/mpm_prefork.conf
- /opt/nextcloud/404.html:/var/www/html/404.html - /opt/nextcloud/404.html:/var/www/html/404.html
- /opt/nextcloud/apache.php.ini:/etc/php/8.0/apache2/php.ini - /opt/nextcloud/nce.ini:/usr/local/etc/php/conf.d/nce.ini
- /opt/nextcloud/apcu.ini:/etc/php/8.0/mods-available/apcu.ini
- /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini
- /opt/nextcloud/complete_reinstall.sh:/complete_reinstall.sh - /opt/nextcloud/complete_reinstall.sh:/complete_reinstall.sh
- /opt/nextcloud/config.php:/var/www/html/config/config.php - /opt/nextcloud/config.php:/var/www/html/config/config.php
- /opt/nextcloud/nextcloud.log:/var/www/html/data/nextcloud.log - /opt/nextcloud/nextcloud.log:/var/www/html/data/nextcloud.log
- /opt/nextcloud/audit.log:/var/www/html/data/audit.log
- /opt/nextcloud/rclone.conf:/rclone.conf - /opt/nextcloud/rclone.conf:/rclone.conf
<%- if @skeletondirectory -%> <%- if @skeletondirectory -%>
- /opt/nextcloud/skeleton:<%= @skeletondirectory %> - /opt/nextcloud/skeleton:<%= @skeletondirectory %>
@ -30,22 +23,18 @@ services:
<% if @location =~ /^kau/ -%> <% if @location =~ /^kau/ -%>
- /mnt:/opt/tmp/ - /mnt:/opt/tmp/
<%- end -%> <%- end -%>
<%- if ! @hostnet -%>
networks: networks:
- default - default
- proxysql_proxysql - proxysql_proxysql
<%- end -%>
dns: dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32 - 89.32.32.32
<%- if !@hostnet -%>
ports: ports:
- 443:443 - 443:443
<%- end -%> command: apachectl -D FOREGROUND
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
tty: true tty: true
<%- if !@hostnet -%>
networks: networks:
proxysql_proxysql: proxysql_proxysql:
external: true external: true
<%- end -%>

View file

@ -1,58 +1,58 @@
{ {
"/(\\w+\\.)*antagning.se$/": "antagning.drive.sunet.se", "/antagning.se$/": "antagning.drive.sunet.se",
"/(\\w+\\.)*bth.se$/": "bth.drive.sunet.se", "/bth.se$/": "bth.drive.sunet.se",
"/(\\w+\\.)*chalmers.se$/": "chalmers.drive.sunet.se", "/chalmers.se$/": "chalmers.drive.sunet.se",
"/(\\w+\\.)*du.se$/": "du.drive.sunet.se", "/du.se$/": "du.drive.sunet.se",
"/(\\w+\\.)*eduid.se$/": "extern.drive.sunet.se", "/eduid.se$/": "extern.drive.sunet.se",
"/(\\w+\\.)*esh.se$/": "esh.drive.sunet.se", "/esh.se$/": "esh.drive.sunet.se",
"/(\\w+\\.)*fhs.se$/": "fhs.drive.sunet.se", "/fhs.se$/": "fhs.drive.sunet.se",
"/(\\w+\\.)*gih.se$/": "gih.drive.sunet.se", "/gih.se$/": "gih.drive.sunet.se",
"/(\\w+\\.)*gu.se$/": "gu.drive.sunet.se", "/gu.se$/": "gu.drive.sunet.se",
"/(\\w+\\.)*hb.se$/": "hb.drive.sunet.se", "/hb.se$/": "hb.drive.sunet.se",
"/(\\w+\\.)*shh.se$/": "shh.drive.sunet.se", "/@shh.se$/": "shh.drive.sunet.se",
"/(\\w+\\.)*hh.se$/": "hh.drive.sunet.se", "/@hh.se$/": "hh.drive.sunet.se",
"/(\\w+\\.)*hhs.se$/": "hhs.drive.sunet.se", "/hhs.se$/": "hhs.drive.sunet.se",
"/(\\w+\\.)*hig.se$/": "hig.drive.sunet.se", "/hig.se$/": "hig.drive.sunet.se",
"/(\\w+\\.)*his.se$/": "his.drive.sunet.se", "/his.se$/": "his.drive.sunet.se",
"/(\\w+\\.)*hj.se$/": "hj.drive.sunet.se", "/hj.se$/": "hj.drive.sunet.se",
"/(\\w+\\.)*hkr.se$/": "hkr.drive.sunet.se", "/hkr.se$/": "hkr.drive.sunet.se",
"/(\\w+\\.)*hv.se$/": "hv.drive.sunet.se", "/hv.se$/": "hv.drive.sunet.se",
"/(\\w+\\.)*irf.se$/": "irf.drive.sunet.se", "/irf.se$/": "irf.drive.sunet.se",
"/(\\w+\\.)*kb.se$/": "kb.drive.sunet.se", "/kb.se$/": "kb.drive.sunet.se",
"/(\\w+\\.)*ki.se$/": "ki.drive.sunet.se", "/ki.se$/": "ki.drive.sunet.se",
"/(\\w+\\.)*kkh.se$/": "kkh.drive.sunet.se", "/kkh.se$/": "kkh.drive.sunet.se",
"/(\\w+\\.)*kmh.se$/": "kmh.drive.sunet.se", "/kmh.se$/": "kmh.drive.sunet.se",
"/(\\w+\\.)*konstfack.se$/": "konstfack.drive.sunet.se", "/konstfack.se$/": "konstfack.drive.sunet.se",
"/(\\w+\\.)*kth.se$/": "kth.drive.sunet.se", "/kth.se$/": "kth.drive.sunet.se",
"/(\\w+\\.)*kva.se$/": "kva.drive.sunet.se", "/kva.se$/": "kva.drive.sunet.se",
"/(\\w+\\.)*liu.se$/": "liu.drive.sunet.se", "/liu.se$/": "liu.drive.sunet.se",
"/(\\w+\\.)*lnu.se$/": "lnu.drive.sunet.se", "/lnu.se$/": "lnu.drive.sunet.se",
"/(\\w+\\.)*ltu.se$/": "ltu.drive.sunet.se", "/ltu.se$/": "ltu.drive.sunet.se",
"/(\\w+\\.)*lu.se$/": "lu.drive.sunet.se", "/lu.se$/": "lu.drive.sunet.se",
"/(\\w+\\.)*mah.se$/": "mau.drive.sunet.se", "/mah.se$/": "mau.drive.sunet.se",
"/(\\w+\\.)*mau.se$/": "mau.drive.sunet.se", "/mau.se$/": "mau.drive.sunet.se",
"/(\\w+\\.)*mdh.se$/": "mdu.drive.sunet.se", "/mdh.se$/": "mdu.drive.sunet.se",
"/(\\w+\\.)*mdu.se$/": "mdu.drive.sunet.se", "/mdu.se$/": "mdu.drive.sunet.se",
"/(\\w+\\.)*miun.se$/": "miun.drive.sunet.se", "/miun.se$/": "miun.drive.sunet.se",
"/(\\w+\\.)*nordunet.se$/": "nordunet.drive.sunet.se", "/nordunet.se$/": "nordunet.drive.sunet.se",
"/(\\w+\\.)*nordu.net$/": "nordunet.drive.sunet.se", "/nordu.net$/": "nordunet.drive.sunet.se",
"/(\\w+\\.)*nrm.se$/": "nrm.drive.sunet.se", "/nrm.se$/": "nrm.drive.sunet.se",
"/(\\w+\\.)*oru.se$/": "oru.drive.sunet.se", "/oru.se$/": "oru.drive.sunet.se",
"/(\\w+\\.)*rkh.se$/": "rkh.drive.sunet.se", "/rkh.se$/": "rkh.drive.sunet.se",
"/(\\w+\\.)*sics.se$/": "sics.drive.sunet.se", "/sics.se$/": "sics.drive.sunet.se",
"/(\\w+\\.)*slu.se$/": "slu.drive.sunet.se", "/slu.se$/": "slu.drive.sunet.se",
"/(\\w+\\.)*smhi.se$/": "smhi.drive.sunet.se", "/smhi.se$/": "smhi.drive.sunet.se",
"/(\\w+\\.)*sp.se$/": "sp.drive.sunet.se", "/sp.se$/": "sp.drive.sunet.se",
"/(\\w+\\.)*su.se$/": "su.drive.sunet.se", "/su.se$/": "su.drive.sunet.se",
"/(\\w+\\.)*sunet.se$/": "sunet.drive.sunet.se", "/sunet.se$/": "sunet.drive.sunet.se",
"/(\\w+\\.)*suni.se$/": "suni.drive.sunet.se", "/suni.se$/": "suni.drive.sunet.se",
"/(\\w+\\.)*swamid.se$/": "swamid.drive.sunet.se", "/swamid.se$/": "swamid.drive.sunet.se",
"/(\\w+\\.)*ths.se$/": "ths.drive.sunet.se", "/ths.se$/": "ths.drive.sunet.se",
"/(\\w+\\.)*uhr.se$/": "uhr.drive.sunet.se", "/uhr.se$/": "uhr.drive.sunet.se",
"/(\\w+\\.)*umu.se$/": "umu.drive.sunet.se", "/umu.se$/": "umu.drive.sunet.se",
"/(\\w+\\.)*uniarts.se$/": "uniarts.drive.sunet.se", "/uniarts.se$/": "uniarts.drive.sunet.se",
"/(\\w+\\.)*uu.se$/": "uu.drive.sunet.se", "/uu.se$/": "uu.drive.sunet.se",
"/(\\w+\\.)*vinnova.se$/": "vinnova.drive.sunet.se", "/vinnova.se$/": "vinnova.drive.sunet.se",
"/(\\w+\\.)*vr.se$/": "vr.drive.sunet.se", "/vr.se$/": "vr.drive.sunet.se",
"/$/": "extern.drive.sunet.se" "/$/": "extern.drive.sunet.se"
} }

View file

@ -1,58 +1,58 @@
{ {
"/(\\w+\\.)*antagning.se$/": "antagning.drive.test.sunet.se", "/antagning.se$/": "antagning.drive.test.sunet.se",
"/(\\w+\\.)*bth.se$/": "bth.drive.test.sunet.se", "/bth.se$/": "bth.drive.test.sunet.se",
"/(\\w+\\.)*chalmers.se$/": "chalmers.drive.test.sunet.se", "/chalmers.se$/": "chalmers.drive.test.sunet.se",
"/(\\w+\\.)*du.se$/": "du.drive.test.sunet.se", "/du.se$/": "du.drive.test.sunet.se",
"/(\\w+\\.)*eduid.se$/": "extern.drive.test.sunet.se", "/eduid.se$/": "extern.drive.test.sunet.se",
"/(\\w+\\.)*esh.se$/": "esh.drive.test.sunet.se", "/esh.se$/": "esh.drive.test.sunet.se",
"/(\\w+\\.)*fhs.se$/": "fhs.drive.test.sunet.se", "/fhs.se$/": "fhs.drive.test.sunet.se",
"/(\\w+\\.)*gih.se$/": "gih.drive.test.sunet.se", "/gih.se$/": "gih.drive.test.sunet.se",
"/(\\w+\\.)*gu.se$/": "gu.drive.test.sunet.se", "/gu.se$/": "gu.drive.test.sunet.se",
"/(\\w+\\.)*hb.se$/": "hb.drive.test.sunet.se", "/hb.se$/": "hb.drive.test.sunet.se",
"/(\\w+\\.)*shh.se$/": "shh.drive.test.sunet.se", "/@shh.se$/": "shh.drive.test.sunet.se",
"/(\\w+\\.)*hh.se$/": "hh.drive.test.sunet.se", "/@hh.se$/": "hh.drive.test.sunet.se",
"/(\\w+\\.)*hhs.se$/": "hhs.drive.test.sunet.se", "/hhs.se$/": "hhs.drive.test.sunet.se",
"/(\\w+\\.)*hig.se$/": "hig.drive.test.sunet.se", "/hig.se$/": "hig.drive.test.sunet.se",
"/(\\w+\\.)*his.se$/": "his.drive.test.sunet.se", "/his.se$/": "his.drive.test.sunet.se",
"/(\\w+\\.)*hj.se$/": "hj.drive.test.sunet.se", "/hj.se$/": "hj.drive.test.sunet.se",
"/(\\w+\\.)*hkr.se$/": "hkr.drive.test.sunet.se", "/hkr.se$/": "hkr.drive.test.sunet.se",
"/(\\w+\\.)*hv.se$/": "hv.drive.test.sunet.se", "/hv.se$/": "hv.drive.test.sunet.se",
"/(\\w+\\.)*irf.se$/": "irf.drive.test.sunet.se", "/irf.se$/": "irf.drive.test.sunet.se",
"/(\\w+\\.)*kb.se$/": "kb.drive.test.sunet.se", "/kb.se$/": "kb.drive.test.sunet.se",
"/(\\w+\\.)*ki.se$/": "ki.drive.test.sunet.se", "/ki.se$/": "ki.drive.test.sunet.se",
"/(\\w+\\.)*kkh.se$/": "kkh.drive.test.sunet.se", "/kkh.se$/": "kkh.drive.test.sunet.se",
"/(\\w+\\.)*kmh.se$/": "kmh.drive.test.sunet.se", "/kmh.se$/": "kmh.drive.test.sunet.se",
"/(\\w+\\.)*konstfack.se$/": "konstfack.drive.test.sunet.se", "/konstfack.se$/": "konstfack.drive.test.sunet.se",
"/(\\w+\\.)*kth.se$/": "kth.drive.test.sunet.se", "/kth.se$/": "kth.drive.test.sunet.se",
"/(\\w+\\.)*kva.se$/": "kva.drive.test.sunet.se", "/kva.se$/": "kva.drive.test.sunet.se",
"/(\\w+\\.)*liu.se$/": "liu.drive.test.sunet.se", "/liu.se$/": "liu.drive.test.sunet.se",
"/(\\w+\\.)*lnu.se$/": "lnu.drive.test.sunet.se", "/lnu.se$/": "lnu.drive.test.sunet.se",
"/(\\w+\\.)*ltu.se$/": "ltu.drive.test.sunet.se", "/ltu.se$/": "ltu.drive.test.sunet.se",
"/(\\w+\\.)*lu.se$/": "lu.drive.test.sunet.se", "/lu.se$/": "lu.drive.test.sunet.se",
"/(\\w+\\.)*mah.se$/": "mau.drive.test.sunet.se", "/mah.se$/": "mau.drive.test.sunet.se",
"/(\\w+\\.)*mau.se$/": "mau.drive.test.sunet.se", "/mau.se$/": "mau.drive.test.sunet.se",
"/(\\w+\\.)*mdh.se$/": "mdu.drive.test.sunet.se", "/mdh.se$/": "mdu.drive.test.sunet.se",
"/(\\w+\\.)*mdu.se$/": "mdu.drive.test.sunet.se", "/mdu.se$/": "mdu.drive.test.sunet.se",
"/(\\w+\\.)*miun.se$/": "miun.drive.test.sunet.se", "/miun.se$/": "miun.drive.test.sunet.se",
"/(\\w+\\.)*nordunet.se$/": "nordunet.drive.test.sunet.se", "/nordunet.se$/": "nordunet.drive.test.sunet.se",
"/(\\w+\\.)*nordu.net$/": "nordunet.drive.test.sunet.se", "/nordu.net$/": "nordunet.drive.test.sunet.se",
"/(\\w+\\.)*nrm.se$/": "nrm.drive.test.sunet.se", "/nrm.se$/": "nrm.drive.test.sunet.se",
"/(\\w+\\.)*oru.se$/": "oru.drive.test.sunet.se", "/oru.se$/": "oru.drive.test.sunet.se",
"/(\\w+\\.)*rkh.se$/": "rkh.drive.test.sunet.se", "/rkh.se$/": "rkh.drive.test.sunet.se",
"/(\\w+\\.)*sics.se$/": "sics.drive.test.sunet.se", "/sics.se$/": "sics.drive.test.sunet.se",
"/(\\w+\\.)*slu.se$/": "slu.drive.test.sunet.se", "/slu.se$/": "slu.drive.test.sunet.se",
"/(\\w+\\.)*smhi.se$/": "smhi.drive.test.sunet.se", "/smhi.se$/": "smhi.drive.test.sunet.se",
"/(\\w+\\.)*sp.se$/": "sp.drive.test.sunet.se", "/sp.se$/": "sp.drive.test.sunet.se",
"/(\\w+\\.)*su.se$/": "su.drive.test.sunet.se", "/su.se$/": "su.drive.test.sunet.se",
"/(\\w+\\.)*sunet.se$/": "sunet.drive.test.sunet.se", "/sunet.se$/": "sunet.drive.test.sunet.se",
"/(\\w+\\.)*suni.se$/": "suni.drive.test.sunet.se", "/suni.se$/": "suni.drive.test.sunet.se",
"/(\\w+\\.)*swamid.se$/": "swamid.drive.test.sunet.se", "/swamid.se$/": "swamid.drive.test.sunet.se",
"/(\\w+\\.)*ths.se$/": "ths.drive.test.sunet.se", "/ths.se$/": "ths.drive.test.sunet.se",
"/(\\w+\\.)*uhr.se$/": "uhr.drive.test.sunet.se", "/uhr.se$/": "uhr.drive.test.sunet.se",
"/(\\w+\\.)*umu.se$/": "umu.drive.test.sunet.se", "/umu.se$/": "umu.drive.test.sunet.se",
"/(\\w+\\.)*uniarts.se$/": "uniarts.drive.test.sunet.se", "/uniarts.se$/": "uniarts.drive.test.sunet.se",
"/(\\w+\\.)*uu.se$/": "uu.drive.test.sunet.se", "/uu.se$/": "uu.drive.test.sunet.se",
"/(\\w+\\.)*vinnova.se$/": "vinnova.drive.test.sunet.se", "/vinnova.se$/": "vinnova.drive.test.sunet.se",
"/(\\w+\\.)*vr.se$/": "vr.drive.test.sunet.se", "/vr.se$/": "vr.drive.test.sunet.se",
"/$/": "extern.drive.test.sunet.se" "/$/": "extern.drive.test.sunet.se"
} }

View file

@ -0,0 +1,9 @@
memory_limit=2048M
max_file_uploads=20
upload_max_filesize=30G
post_max_size=30G
max_execution_time=86400
max_input_time=86400
expose_php = off
session.save_handler = <%= @session_save_handler %>
session.save_path = "<%= @session_save_path %>"

View file

@ -15,19 +15,18 @@ if [[ "x${oc_list}" != "x" ]]; then
done done
fi fi
if [[ ${1} == 'config:editable' ]]; then if [[ ${1} == 'config:editable' ]] && [[ ${2} == '--on' ]]; then
echo "config:editable is deprecated" docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => true,/config_is_read_only\1 => false,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp'
docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php
docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp
exit 0 exit 0
fi fi
docker exec ${container} chmod u+w /var/www/html/config/ if [[ ${1} == 'config:editable' ]] && [[ ${2} == '--off' ]]; then
docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => true,/config_is_read_only\1 => false,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp' docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => false,/config_is_read_only\1 => true,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp'
docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php
docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp
exit 0
fi
docker exec -ti ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@" docker exec -ti ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => false,/config_is_read_only\1 => true,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp'
docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php
docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp
docker exec ${container} chmod u-w /var/www/html/config/
exit 0

View file

@ -1,79 +0,0 @@
#!/bin/bash
mountid="${1}"
user="${2}"
container="${3}"
if [[ -z ${mountid} ]] || [[ -z ${user} ]]; then
echo "We need a valid mount id and user to proceed"
echo "Usage: ${0} <mountid> <user> [<container>]"
exit
fi
if [[ -z ${container} ]]; then
container="nextcloud_app_1"
fi
occ="/usr/local/bin/occ ${container}"
function get_config {
${occ} files_external:config ${mountid} ${1} | tr -d '\n\t\r'
}
echo "Gathering information, hang tight."
echo -n "."
bucket="$(get_config bucket)"
echo -n "."
hostname="$(get_config hostname)"
echo -n "."
key="$(get_config key)"
echo -n "."
region="$(get_config region)"
echo -n "."
secret="$(get_config secret)"
jsonfile="/tmp/${user}-user-bucket.json"
mount_point="${user/@/-}"
mount_point="${mount_point/./-}-user-bucket"
echo "This will remount the user bucket with mountid ${mountid} for ${user} as project bucket with mountpoint ${mount_point}."
read -r -p "Press enter to continue"
echo '
[
{
"mount_point": "\/'${mount_point}'",
"storage": "\\OCA\\Files_External\\Lib\\Storage\\AmazonS3",
"authentication_type": "amazons3::accesskey",
"configuration": {
"bucket": "'${bucket}'",
"hostname": "'${hostname}'",
"key": "'${key}'",
"legacy_auth": false,
"port": "443",
"region": "'${region}'",
"secret": "'${secret}'",
"storageClass": "",
"useMultipartCopy": false,
"use_path_style": true,
"use_ssl": true
},
"options": {
"encrypt": true,
"previews": true,
"enable_sharing": true,
"filesystem_check_changes": 0,
"encoding_compatibility": false,
"readonly": false
},
"applicable_users": [
],
"applicable_groups": ["admin"]
}
]
' > "${jsonfile}"
docker cp ${jsonfile} ${container}:/${jsonfile}
${occ} files_external:import /${jsonfile}
docker exec ${container} rm /${jsonfile}
rm ${jsonfile}
${occ} files_external:delete ${mountid}

View file

@ -1,69 +0,0 @@
version: "3.7"
services:
nginx:
image: docker.io/nginxproxy/nginx-proxy:latest
container_name: nginx
networks:
- internal_network
- external_network
dns:
- 89.32.32.32
ports:
- "80:80"
- "443:443"
labels:
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: "true"
volumes:
- /opt/jupyter_site/nginx/certs:/etc/nginx/certs:ro
- /opt/jupyter_site/nginx/conf:/etc/nginx/conf.d
- /opt/jupyter_site/nginx/dhparam:/etc/nginx/dhparam
- /opt/jupyter_site/nginx/html:/usr/share/nginx/html
- /opt/jupyter_site/nginx/vhost:/etc/nginx/vhost.d
- /var/run/docker.sock:/tmp/docker.sock:ro
environment:
- ENABLE_IPV6=true
restart: unless-stopped
acme:
image: docker.io/nginxproxy/acme-companion:latest
container_name: acme
networks:
- external_network
dns:
- 89.32.32.32
volumes:
- /opt/jupyter_site/nginx/acme:/etc/acme.sh
- /opt/jupyter_site/nginx/certs:/etc/nginx/certs:rw
- /opt/jupyter_site/nginx/conf:/etc/nginx/conf.d
- /opt/jupyter_site/nginx/dhparam:/etc/nginx/dhparam
- /opt/jupyter_site/nginx/html:/usr/share/nginx/html
- /opt/jupyter_site/nginx/vhost:/etc/nginx/vhost.d:rw
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- NGINX_PROXY_CONTAINER=nginx
- DEFAULT_EMAIL=drive@sunet.se
depends_on:
- nginx
restart: unless-stopped
web:
image: docker.sunet.se/drive/jupyter-site:<%= @site_version %>
container_name: web
restart: always
networks:
- internal_network
ports:
- "127.0.0.1:3000:3000"
dns:
- 89.32.32.32
environment:
- VIRTUAL_HOST=<%= @domain %>
- VIRTUAL_PATH=/
- VIRTUAL_PORT=3000
- LETSENCRYPT_HOST=<%= @domain %>
networks:
external_network:
internal_network:
internal: true

View file

@ -1,29 +1,16 @@
<?php <?php
$CONFIG = [ $CONFIG = [
'AUTH_KEY' => "<%= @gss_jwt_key %>",
'DB' => [ 'DB' => [
'host' => "<%= @dbhost %>", 'host' => "<%= @dbhost %>",
'db' => "lookup" , 'db' => "lookup" ,
'user' => "lookup", 'user' => "lookup",
'pass' => "<%= @mysql_user_password %>", 'pass' => "<%= @mysql_user_password %>",
], ],
'EMAIL_SENDER' => '<%= @email_sender %>',
'ERROR_VERBOSE' => false,
'GLOBAL_SCALE' => true, 'GLOBAL_SCALE' => true,
'IP_BLACKLIST' => [
], 'AUTH_KEY' => "<%= @gss_jwt_key %>",
'MAX_REQUESTS' => 10000,
'MAX_SEARCH_PAGE' => 10,
'PUBLIC_URL' => '<%= @public_url %>',
'REPLICATION_AUTH' => '<%= @replication_auth %>',
'REPLICATION_HOSTS' => [
],
'SPAM_BLACKLIST' => [
],
'TWITTER' => [
'CONSUMER_KEY' => '',
'CONSUMER_SECRET' => '',
'ACCESS_TOKEN' => '',
'ACCESS_TOKEN_SECRET' => '',
],
]; ];

View file

@ -3,15 +3,22 @@ version: '3.2'
services: services:
app: app:
container_name: lookup_app_1
image: docker.sunet.se/drive/nextcloud-lookup:<%= @lookup_version %> image: docker.sunet.se/drive/nextcloud-lookup:<%= @lookup_version %>
restart: always restart: always
volumes: volumes:
- /opt/lookup/config.php:/var/www/html/config/config.php - /opt/lookup/config.php:/var/www/html/config/config.php
network_mode: host networks:
- default
- proxysql_proxysql
dns: dns:
- 89.46.20.75 - 89.46.20.75
- 89.46.21.29 - 89.46.21.29
- 89.32.32.32 - 89.32.32.32
ports:
- 443:443
command: apachectl -D FOREGROUND command: apachectl -D FOREGROUND
tty: true tty: true
networks:
proxysql_proxysql:
external: true

View file

@ -1 +0,0 @@
<%= @custdata.to_json %>

View file

@ -4,7 +4,6 @@ services:
db: db:
image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %> image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %>
container_name: mariadb_db_1
restart: always restart: always
volumes: volumes:
- /etc/mariadb/backups:/backups - /etc/mariadb/backups:/backups
@ -21,6 +20,6 @@ services:
- MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %> - MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %>
- BOOTSTRAP=<%= @bootstrap %> - BOOTSTRAP=<%= @bootstrap %>
- FORCE_BOOTSTRAP=0 - FORCE_BOOTSTRAP=0
command: "--wsrep_cluster_address=gcomm://<%= @db_ip.join(',') %>" command: "--wsrep_cluster_address=gcomm://<%= @db_ip[0] %>,<%= @db_ip[1] %>,<%= @db_ip[2] %>"
tty: true tty: true

View file

@ -1,32 +0,0 @@
#!/bin/bash
<% basedir="statistics:drive-server-coms" -%>
<% cupath="/opt/mariadb/statistics/users/" -%>
<% custdata="/opt/mariadb/statistics/custdata.json" -%>
status=0
<% @custdata.each do |cust,data| -%>
#Customer <%= cust %> has no billing departments.
<% if defined?(data[@environment]["billdomains"]) && data[@environment]["billdomains"] -%>
mkdir -p /opt/mariadb/statistics/users/<%= cust %>
chmod '0700' /opt/mariadb/statistics/users/<%= cust %>
<% data[@environment]["billdomains"].each do |dom| -%>
/root/tasks/listusersbydep.sh <%= cust %> <%= dom %> > /opt/mariadb/statistics/users/<%= cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json
if jq . <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json &>/dev/null
then
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json <%= basedir%>/<%= cust %>-<%= @environment%>/
[[ $? -eq 0 ]] || { status=1 ; echo "Error: Upload of user data failed." ; }
else
echo "Error in json data"
status=1
fi
<% end -%>
<% end -%>
<% end -%>
if [[ -f <%= custdata %> ]]
then
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= custdata %> <%= basedir%>/
fi
exit ${status}

View file

@ -1,24 +0,0 @@
#!/bin/bash
function usage () {
printf "Usage:\t%s <customer> <department email domain>\n" "${0##*/}"
printf "Example:\t%s uu int.uu.se\n" "${0##*/}"
}
[[ -z "${1}" || -z "${2}" || ! "${1}" =~ ^[a-zA-Z0-9.]{1,200}$ || ! "${2}" =~ ^[a-zA-Z0-9.]{1,200}$ ]] && usage && exit 1
depdom="${2}"
customer="${1}"
docker exec mariadb_db_1 /bin/bash -c 'mysql -p${MYSQL_ROOT_PASSWORD} -NB -e '\
$'\'select JSON_PRETTY(JSON_OBJECTAGG(uid,displayname)) from ('\
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_global_scale_users.displayname'\
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_global_scale_users'\
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_global_scale_users.uid'\
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\""'\
$' UNION'\
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_user_saml_users.displayname'\
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_user_saml_users'\
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_user_saml_users.uid'\
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\"") as users\'' \
| sed 's/\\n/\n/g'

View file

@ -1,4 +0,0 @@
#!/bin/bash
pw=$(yq -r '.services.db.environment[0]' /opt/mariadb/docker-compose.yml | awk -F '=' '{print $2}')
docker exec -ti mariadb_db_1 mysql -u root -p"${pw}" "${@}"

View file

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
result="$(docker exec -u root mariadb_backup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')" result="$(docker exec mariadb_backup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
if [[ "${result}" == "Slave_running ON" ]]; then if [[ "${result}" == "Slave_running ON" ]]; then
echo "OK: Replica running" echo "OK: Replica running"
exit 0 exit 0

View file

@ -4,7 +4,6 @@ services:
mariadb_backup: mariadb_backup:
image: docker.sunet.se/drive/mariadb image: docker.sunet.se/drive/mariadb
container_name: mariadb_backup_mariadb_backup_1
dns: dns:
- 89.46.20.75 - 89.46.20.75
- 89.46.21.29 - 89.46.21.29

View file

@ -15,16 +15,9 @@ password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}')
mysql="${dexec} mysql -p${password}" mysql="${dexec} mysql -p${password}"
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_global_scale_users")')" == "1" ]] users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
then users="${users}
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')" $(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
fi
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_user_saml_users")')" == "1" ]]
then
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_user_saml_users')"
fi
users="$(echo "${users}" | sort | uniq)"
project="statistics" project="statistics"
bucket="drive-server-coms" bucket="drive-server-coms"
@ -32,7 +25,7 @@ base_dir="${project}:${bucket}"
mountpoint="/opt/statistics" mountpoint="/opt/statistics"
customer_dir="${mountpoint}/${location}" customer_dir="${mountpoint}/${location}"
mkdir -p "${customer_dir}" mkdir -p "${customer_dir}"
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}" rclone mkdir "${base_dir}/${location}"
echo "${users}" | awk 'BEGIN{print "{"} {print t "\""$1"\": \""$2"\""} {t=","} END{print "}"}' | jq . >"${customer_dir}/users.json" echo "${users}" | awk 'BEGIN{print "{"} {print t "\""$1"\": \""$2"\""} {t=","} END{print "}"}' | jq . >"${customer_dir}/users.json"
status=0 status=0

View file

@ -1,6 +1,6 @@
[statistics] [statistics]
type = webdav type = webdav
url = https://89.45.236.246/remote.php/dav/files/_script/ url = https://89.46.20.42/remote.php/dav/files/_script/
vendor = nextcloud vendor = nextcloud
user = _script user = _script
pass = <%= @statistics_secret %> pass = <%= @statistics_secret %>

View file

@ -3,12 +3,10 @@
config_php='/var/www/html/config/config.php' config_php='/var/www/html/config/config.php'
dbhost="<%= @dbhost %>" dbhost="<%= @dbhost %>"
mysql_user_password="<%= @mysql_user_password %>" mysql_user_password="<%= @mysql_user_password %>"
admin_password="<%= @admin_password %>"
location="<%= @location %>" location="<%= @location %>"
bucket="<%= @s3_bucket %>" bucket="<%= @s3_bucket %>"
customer="<%= @customer %>" customer="<%= @customer %>"
echo "Setting temp admin password"
apt update && apt install -y apg
admin_password="$(apg -m 40 | head -1)"
/usr/bin/mysql -e "drop database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1 /usr/bin/mysql -e "drop database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
/usr/bin/mysql -e "create database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1 /usr/bin/mysql -e "create database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
@ -37,11 +35,6 @@ EOF
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}') instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}') secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}') passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
echo "Now delete the admin user:"
echo " occ <container> user:delete admin"
echo "and then create a new admin user:"
echo " /usr/local/bin/add_admin_user <username> <email address> <container>"
echo ""
echo "${customer}_instanceid: DEC::PKCS7[${instanceid}]!" echo "${customer}_instanceid: DEC::PKCS7[${instanceid}]!"
echo "${customer}_secret: DEC::PKCS7[${secret}]!" echo "${customer}_secret: DEC::PKCS7[${secret}]!"
echo "${customer}_passwordsalt: DEC::PKCS7[${passwordsalt}]!" echo "${customer}_passwordsalt: DEC::PKCS7[${passwordsalt}]!"

View file

@ -5,7 +5,7 @@ no_files=30 # Keep this many files as an archive, script is run once a week
# a specific host, but will differ between hosts # a specific host, but will differ between hosts
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
for logfile in $(ls /opt/multinode/*/{nextcloud.log,audit.log,server/server.log}); do for logfile in $(ls /opt/multinode/*/{nextcloud.log,server/server.log}); do
if [[ -f ${logfile}.gz.${no_files} ]]; then if [[ -f ${logfile}.gz.${no_files} ]]; then
rm ${logfile}.gz.${no_files} rm ${logfile}.gz.${no_files}
fi fi

View file

@ -13,9 +13,7 @@ services:
- /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini - /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini
- <%= @config_php_path %>:/var/www/html/config/config.php - <%= @config_php_path %>:/var/www/html/config/config.php
- <%= @nextcloud_log_path %>:/var/www/html/data/nextcloud.log - <%= @nextcloud_log_path %>:/var/www/html/data/nextcloud.log
- <%= @audit_log_path %>:/var/www/html/data/audit.log
- <%= @rclone_conf_path %>:/rclone.conf - <%= @rclone_conf_path %>:/rclone.conf
networks: networks:
- default - default
- proxysql_proxysql - proxysql_proxysql
@ -25,7 +23,7 @@ services:
- 89.32.32.32 - 89.32.32.32
ports: ports:
- <%= @https_port %>:443 - <%= @https_port %>:443
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND' command: apachectl -D FOREGROUND
tty: true tty: true
networks: networks:

View file

@ -1,2 +0,0 @@
---
domain: "<%= @domain %>"

View file

@ -1,71 +0,0 @@
version: "3.7"
services:
nginx:
image: docker.io/nginxproxy/nginx-proxy:latest
container_name: nginx
networks:
- internal_network
- external_network
dns:
- 89.32.32.32
ports:
- "80:80"
- "443:443"
labels:
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: "true"
volumes:
- /opt/portal/nginx/certs:/etc/nginx/certs:ro
- /opt/portal/nginx/conf:/etc/nginx/conf.d
- /opt/portal/nginx/dhparam:/etc/nginx/dhparam
- /opt/portal/nginx/html:/usr/share/nginx/html
- /opt/portal/nginx/vhost:/etc/nginx/vhost.d
- /var/run/docker.sock:/tmp/docker.sock:ro
environment:
- ENABLE_IPV6=true
restart: unless-stopped
acme:
image: docker.io/nginxproxy/acme-companion:latest
container_name: acme
networks:
- external_network
dns:
- 89.32.32.32
volumes:
- /opt/portal/nginx/acme:/etc/acme.sh
- /opt/portal/nginx/certs:/etc/nginx/certs:rw
- /opt/portal/nginx/conf:/etc/nginx/conf.d
- /opt/portal/nginx/dhparam:/etc/nginx/dhparam
- /opt/portal/nginx/html:/usr/share/nginx/html
- /opt/portal/nginx/vhost:/etc/nginx/vhost.d:rw
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- NGINX_PROXY_CONTAINER=nginx
- DEFAULT_EMAIL=noc@sunet.se
depends_on:
- nginx
restart: unless-stopped
portal:
image: docker.sunet.se/drive/portal:<%= @portal_version %>
container_name: portal
restart: always
networks:
- internal_network
ports:
- "127.0.0.1:8080:8080"
dns:
- 89.32.32.32
volumes:
- /opt/portal/config.yaml:/app/config.yaml
environment:
- VIRTUAL_HOST=portal.<%= @domain %>
- VIRTUAL_PATH=/
- VIRTUAL_PORT=8080
- LETSENCRYPT_HOST=portal.<%= @domain %>
networks:
external_network:
internal_network:
internal: true

View file

@ -4,25 +4,18 @@ services:
proxysql: proxysql:
image: docker.sunet.se/drive/proxysql:<%= @proxysql_version %> image: docker.sunet.se/drive/proxysql:<%= @proxysql_version %>
container_name: proxysql_proxysql_1
<%- if @hostnet -%>
network_mode: host
<%- else -%>
ports: ports:
- 3306:3306 - 3306:3306
- 6032:6032 - 6032:6032
- 6080:6080 - 6080:6080
<%- end -%>
environment: environment:
INITIALIZE: 1 INITIALIZE: 1
dns: dns:
- 89.46.20.75 - 89.46.20.75
- 89.46.21.29 - 89.46.21.29
- 89.32.32.32 - 89.32.32.32
<%- if !@hostnet -%>
networks: networks:
- proxysql - proxysql
<%- end -%>
restart: unless-stopped restart: unless-stopped
volumes: volumes:
- proxysql:/var/lib/proxysql - proxysql:/var/lib/proxysql
@ -32,8 +25,6 @@ services:
volumes: volumes:
proxysql: proxysql:
<%- if !@hostnet -%>
networks: networks:
proxysql: proxysql:
driver: bridge driver: bridge
<%- end -%>

View file

@ -0,0 +1,58 @@
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoredups:ignorespace
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"

View file

@ -3,20 +3,17 @@ version: '3.2'
services: services:
reva-server: reva-server:
image: docker.sunet.se/drive/sciencemesh-reva:<%= @reva_version %> # image: docker.sunet.se/drive/sciencemesh-reva:v2.12.0
image: michielbdejong/reva:mentix-fixes
network_mode: host network_mode: host
security_opt:
- seccomp:unconfined
dns: dns:
- 89.46.20.75 - 89.46.20.75
- 89.46.21.29 - 89.46.21.29
- 89.32.32.32 - 89.32.32.32
environment:
- CGO_ENABLED=1
volumes: volumes:
- /opt/reva/revad.toml:/etc/revad/revad.toml - /opt/reva/revad.toml:/etc/revad/revad.toml
- /opt/reva/data:/var/tmp/reva - /opt/reva/data:/var/tmp/reva
- /opt/reva/rclone.conf:/root/.rclone.conf - /opt/reva/ocm-providers.json:/etc/revad/ocm-providers.json
- /opt/reva/<%= @environment %>/<%= @domain %>.crt:/etc/revad/tls/<%= @domain %>.crt - /opt/reva/test/drive.test.sunet.se.crt:/etc/revad/tls/drive.test.sunet.se.crt
- /opt/reva/<%= @environment %>/<%= @domain %>.key:/etc/revad/tls/<%= @domain %>.key - /opt/reva/test/drive.test.sunet.se.key:/etc/revad/tls/drive.test.sunet.se.key
restart: always restart: always

View file

@ -0,0 +1,14 @@
[
{ "domain": "mesh.pondersource.org", "services": [
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://mesh.pondersource.org/ocm/" }, "host": "https://mesh.pondersource.org" },
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://cloud.pondersource.org/remote.php/webdav/" }, "host": "https://cloud.pondersource.org" }
] },
{ "domain": "cs3mesh-iop.apps.dcw1.paas.psnc.pl", "services": [
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://cs3mesh-iop.apps.dcw1.paas.psnc.pl/ocm/" }, "host": "https://cs3mesh-iop.apps.dcw1.paas.psnc.pl" },
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://cs3mesh-drive.apps.dcw1.paas.psnc.pl/remote.php/webdav/" }, "host": "https://cs3mesh-drive.apps.dcw1.paas.psnc.pl" }
] },
{ "domain": "<%= @reva_domain %>", "services": [
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://<%= @reva_domain%>/ocm/" }, "host": "https://<%= @reva_domain %>" },
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://<%= @domain %>/remote.php/webdav/" }, "host": "https://<%= @domain %>" }
] }
]

View file

@ -1,6 +0,0 @@
[statistics]
type = webdav
url = https://89.45.236.246/remote.php/dav/files/_script/
vendor = nextcloud
user = _script
pass = <%= @statistics_secret %>

View file

@ -1,83 +1,48 @@
[vars]
internal_gateway = "<%= @reva_domain %>"
provider_domain = "<%= @reva_domain %>"
external_reva_endpoint = "https://<%= @reva_domain %>" # append here any route if applicable
efss_sciencemesh_endpoint = "https://<%= @customer %>.<%= @domain %>/index.php/apps/sciencemesh/"
machine_api_key = "<%= @iopsecret %>"
efss_shared_secret = "<%= @shared_secret %>"
[http]
certfile = "/etc/revad/tls/<%= @domain %>.crt"
keyfile = "/etc/revad/tls/<%= @domain %>.key"
[log] [log]
level = "debug" level = "debug"
[shared] [shared]
gatewaysvc = "{{ vars.internal_gateway }}:19000" gatewaysvc = "<%= @reva_domain %>:19000"
# gatewaycertfile = "/etc/revad/tls/revanc1.crt"
# [registry]
# driver = "static"
#
# [registry.static]
# services = ["authprovider","userprovider"]
#
# [registry.static.authprovider]
# bearer = ["localhost:0123"]
# basic = ["localhost:1234"]
# publiclink = ["localhost:9876"]
[grpc]
address = "0.0.0.0:19000"
# certfile = "/etc/revad/tls/revanc1.crt"
# keyfile = "/etc/revad/tls/revanc1.key"
[grpc.services.gateway] [grpc.services.gateway]
address = ":19000" authregistrysvc = "<%= @reva_domain %>:19000"
authregistrysvc = "{{ grpc.services.authregistry.address }}" appprovidersvc = "<%= @reva_domain %>:19000"
appregistrysvc = "{{ grpc.services.appregistry.address }}" appregistry = "<%= @reva_domain %>:19000"
storageregistrysvc = "{{ grpc.services.storageregistry.address }}" storageregistrysvc = "<%= @reva_domain %>:19000"
preferencessvc = "{{ grpc.services.userprovider.address }}" preferencessvc = "<%= @reva_domain %>:19000"
userprovidersvc = "{{ grpc.services.userprovider.address }}" userprovidersvc = "<%= @reva_domain %>:19000"
usershareprovidersvc = "{{ grpc.services.usershareprovider.address }}" usershareprovidersvc = "<%= @reva_domain %>:19000"
ocmcoresvc = "{{ grpc.services.ocmcore.address }}" publicshareprovidersvc = "<%= @reva_domain %>:19000"
ocmshareprovidersvc = "{{ grpc.services.ocmshareprovider.address }}" ocmcoresvc = "<%= @reva_domain %>:19000"
ocminvitemanagersvc = "{{ grpc.services.ocminvitemanager.address }}" ocmshareprovidersvc = "<%= @reva_domain %>:19000"
ocmproviderauthorizersvc = "{{ grpc.services.ocmproviderauthorizer.address }}" ocminvitemanagersvc = "<%= @reva_domain %>:19000"
datagateway = "https://{{ http.services.datagateway.address }}/data" ocmproviderauthorizersvc = "<%= @reva_domain %>:19000"
commit_share_to_storage_grant = false
datagateway = "https://<%= @reva_domain %>/data"
transfer_expires = 6 # give it a moment transfer_expires = 6 # give it a moment
commit_share_to_storage_grant = true
commit_share_to_storage_ref = true
[grpc.services.appregistry]
driver = "static"
[grpc.services.appregistry.drivers.static]
mime_types = [
{"mime_type" = "text/plain", "extension" = "txt", "name" = "Text file", "description" = "Text file", "allow_creation" = true},
{"mime_type" = "text/markdown", "extension" = "md", "name" = "Markdown file", "description" = "Markdown file", "allow_creation" = true},
{"mime_type" = "application/vnd.oasis.opendocument.text", "extension" = "odt", "name" = "OpenDocument", "description" = "OpenDocument text document", "default_app" = "Collabora", "allow_creation" = true},
{"mime_type" = "application/vnd.oasis.opendocument.spreadsheet", "extension" = "ods", "name" = "OpenSpreadsheet", "description" = "OpenDocument spreadsheet document", "default_app" = "Collabora", "allow_creation" = true},
{"mime_type" = "application/vnd.oasis.opendocument.presentation", "extension" = "odp", "name" = "OpenPresentation", "description" = "OpenDocument presentation document", "default_app" = "Collabora", "allow_creation" = true},
{"mime_type" = "application/vnd.jupyter", "extension" = "ipynb", "name" = "Jupyter Notebook", "description" = "Jupyter Notebook"}
]
### AUTH PROVIDERS ###
[grpc.services.authregistry] [grpc.services.authregistry]
driver = "static" driver = "static"
[grpc.services.authregistry.drivers.static.rules] [grpc.services.authregistry.drivers.static.rules]
basic = "{{ grpc.services.authprovider[0].address }}" basic = "<%= @reva_domain %>:19000"
machine = "{{ grpc.services.authprovider[1].address }}"
ocmshares = "{{ grpc.services.authprovider[2].address }}"
[[grpc.services.authprovider]]
auth_manager = "nextcloud"
[grpc.services.authprovider.auth_managers.nextcloud]
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
mock_http = false
[[grpc.services.authprovider]]
auth_manager = "machine"
[grpc.services.authprovider.auth_managers.machine]
api_key = "{{ vars.machine_api_key }}"
gateway_addr = "{{ vars.internal_gateway }}:19000"
[[grpc.services.authprovider]]
auth_manager = "ocmshares"
### STORAGE PROVIDERS ###
[grpc.services.storageregistry] [grpc.services.storageregistry]
driver = "static" driver = "static"
@ -86,36 +51,8 @@ driver = "static"
home_provider = "/home" home_provider = "/home"
[grpc.services.storageregistry.drivers.static.rules] [grpc.services.storageregistry.drivers.static.rules]
"/home" = {"address" = "{{ grpc.services.storageprovider[0].address }}"} "/home" = {"address" = "<%= @reva_domain %>:19000"}
"nextcloud" = {"address" = "{{ grpc.services.storageprovider[0].address }}"} "123e4567-e89b-12d3-a456-426655440000" = {"address" = "<%= @reva_domain %>:19000"}
"/ocm" = {"address" = "{{ grpc.services.storageprovider[1].address }}"}
"ocm" = {"address" = "{{ grpc.services.storageprovider[1].address }}"}
[[grpc.services.storageprovider]]
driver = "nextcloud"
mount_id = "nextcloud"
expose_data_server = true
enable_home_creation = false
data_server_url = "https://localhost:{{ http.services.dataprovider[0].address.port }}/data"
[grpc.services.storageprovider.drivers.nextcloud]
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
mock_http = false
[[grpc.services.storageprovider]]
driver = "ocmoutcoming"
mount_id = "ocm"
mount_path = "/ocm"
expose_data_server = true
enable_home_creation = false
data_server_url = "{{ vars.external_reva_endpoint }}/data"
[grpc.services.storageprovider.drivers.ocmoutcoming]
machine_secret = "{{ vars.machine_api_key }}"
### OTHER PROVIDERS ###
[grpc.services.usershareprovider] [grpc.services.usershareprovider]
driver = "memory" driver = "memory"
@ -124,148 +61,121 @@ driver = "memory"
driver = "nextcloud" driver = "nextcloud"
[grpc.services.ocmcore.drivers.nextcloud] [grpc.services.ocmcore.drivers.nextcloud]
host = "{{ vars.external_reva_endpoint }}" webdav_host = "https://<%= @domain %>/"
endpoint = "{{ vars.efss_sciencemesh_endpoint }}" endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "{{ vars.efss_shared_secret }}" shared_secret = "<%= @shared_secret %>"
mock_http = false mock_http = false
[grpc.services.ocminvitemanager] [grpc.services.ocminvitemanager]
# TODO the driver should be "nextcloud" once it is implemented
driver = "json" driver = "json"
provider_domain = "{{ vars.provider_domain }}"
[grpc.services.ocmshareprovider] [grpc.services.ocmshareprovider]
driver = "nextcloud" driver = "nextcloud"
provider_domain = "{{ vars.provider_domain }}"
webdav_endpoint = "{{ vars.external_reva_endpoint }}"
webdav_prefix = "{{ vars.external_reva_endpoint }}/remote.php/dav/files"
# TODO the following should become {{ vars.external_reva_endpoint }}/external/{{.Token}}/...
webapp_template = "https://your.revad.org/external/sciencemesh/{{.Token}}/{relative-path-to-shared-resource}"
[grpc.services.ocmshareprovider.drivers.nextcloud] [grpc.services.ocmshareprovider.drivers.nextcloud]
webdav_host = "{{ vars.external_reva_endpoint }}" webdav_host = "https://<%= @domain %>/"
endpoint = "{{ vars.efss_sciencemesh_endpoint }}" endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "{{ vars.efss_shared_secret }}" shared_secret = "<%= @shared_secret %>"
mock_http = false mock_http = false
mount_id = "nextcloud"
[grpc.services.ocmproviderauthorizer] [grpc.services.ocmproviderauthorizer]
driver = "mentix" #driver = "mentix"
driver = "open"
[grpc.services.ocmproviderauthorizer.drivers.mentix] [grpc.services.ocmproviderauthorizer.drivers.mentix]
url = "https://iop.sciencemesh.uni-muenster.de/iop/mentix/cs3" url = "https://iop.sciencemesh.uni-muenster.de/iop/mentix/cs3"
verify_request_hostname = true verify_request_hostname = false
insecure = false insecure = false
timeout = 10 timeout = 10
refresh = 900 refresh = 900
[grpc.services.ocmproviderauthorizer.drivers.json] [grpc.services.publicshareprovider]
# this is used by the docker-based test deployment, not in production driver = "memory"
providers = "providers.testnet.json"
verify_request_hostname = true [grpc.services.appprovider]
driver = "demo"
iopsecret = "<%= @iopsecret %>"
wopiurl = "http://0.0.0.0:8880/"
wopibridgeurl = "http://localhost:8000/wopib"
[grpc.services.appregistry]
driver = "static"
[grpc.services.appregistry.static.rules]
"text/plain" = "<%= @reva_domain %>:19000"
"text/markdown" = "<%= @reva_domain %>:19000"
"application/compressed-markdown" = "<%= @reva_domain %>:19000"
"application/vnd.oasis.opendocument.text" = "<%= @reva_domain %>:19000"
"application/vnd.oasis.opendocument.spreadsheet" = "<%= @reva_domain %>:19000"
"application/vnd.oasis.opendocument.presentation" = "<%= @reva_domain %>:19000"
[grpc.services.storageprovider]
driver = "nextcloud"
expose_data_server = true
data_server_url = "https://<%= @reva_domain %>/data"
enable_home_creation = true
[grpc.services.storageprovider.drivers.nextcloud]
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
mock_http = false
[grpc.services.authprovider]
auth_manager = "nextcloud"
[grpc.services.authprovider.auth_managers.nextcloud]
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
mock_http = false
[grpc.services.userprovider] [grpc.services.userprovider]
driver = "nextcloud" driver = "nextcloud"
[grpc.services.userprovider.drivers.nextcloud] [grpc.services.userprovider.drivers.nextcloud]
endpoint = "{{ vars.efss_sciencemesh_endpoint }}" endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "{{ vars.efss_shared_secret }}" shared_secret = "<%= @shared_secret %>"
mock_http = false mock_http = false
[grpc.services.datatx] [http]
txdriver = "rclone" enabled_services = ["ocmd"]
storagedriver = "json" enabled_middlewares = ["providerauthorizer", "cors"]
remove_transfer_on_cancel = true address = "0.0.0.0:443"
certfile = "/etc/revad/tls/drive.test.sunet.se.crt"
keyfile = "/etc/revad/tls/drive.test.sunet.se.key"
[grpc.services.datatx.txdrivers.rclone] [http.services.dataprovider]
# rclone endpoint
endpoint = "http://rclone.docker"
# basic auth is used
auth_user = "rcloneuser"
auth_pass = "eilohtho9oTahsuongeeTh7reedahPo1Ohwi3aek"
auth_header = "x-access-token"
job_status_check_interval = 2000
job_timeout = 120000
storagedriver = "json"
remove_transfer_job_on_cancel = true
[grpc.services.datatx.storagedrivers.json]
file = ""
[grpc.services.datatx.txdrivers.rclone.storagedrivers.json]
file = ""
### HTTP ENDPOINTS ###
[http.services.appprovider]
address = ":443"
insecure = true
[http.services.datagateway]
address = ":443"
[[http.services.dataprovider]]
driver = "nextcloud" driver = "nextcloud"
[http.services.prometheus]
[http.services.sysinfo]
[http.services.dataprovider.drivers.nextcloud] [http.services.dataprovider.drivers.nextcloud]
endpoint = "{{ vars.efss_sciencemesh_endpoint }}" endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "{{ vars.efss_shared_secret }}" shared_secret = "<%= @shared_secret %>"
mock_http = false mock_http = false
[[http.services.dataprovider]]
address = ":443"
driver = "ocmoutcoming"
[http.services.dataprovider.drivers.ocmoutcoming]
machine_secret = "{{ vars.machine_api_key }}"
[http.services.sciencemesh]
address = ":443"
provider_domain = "{{ vars.provider_domain }}"
mesh_directory_url = "https://sciencemesh.cesnet.cz/iop/meshdir"
ocm_mount_point = "/sciencemesh"
[http.services.sciencemesh.smtp_credentials]
disable_auth = false
sender_mail = "noreply@<%= @domain %>"
sender_login = "noreply@<%= @domain %>"
sender_password = "<%= @smtp_credentials %>"
smtp_server = "smtp.sunet.se"
smtp_port = 587
[http.services.ocmprovider]
address = ":443"
ocm_prefix = "ocm"
provider = "Reva for ownCloud/Nextcloud"
endpoint = "{{ vars.external_reva_endpoint }}"
enable_webapp = true
enable_datatx = true
[http.services.ocmd] [http.services.ocmd]
address = ":443"
prefix = "ocm" prefix = "ocm"
[http.services.ocmd.config] [http.services.ocmd.config]
host = "{{ vars.provider_domain }}" host = "<%= @reva_domain %>"
provider = "test-revanc1"
[http.middlewares.providerauthorizer]
#driver = "mentix"
driver = "open"
[http.middlewares.providerauthorizer.drivers.mentix]
url = "https://iop.sciencemesh.uni-muenster.de/iop/mentix/cs3"
verify_request_hostname = false
insecure = false
timeout = 10
refresh = 900
[http.services.ocs] [http.services.ocs]
address = ":443"
prefix = "ocs" prefix = "ocs"
[http.services.ocdav] [http.services.ocdav]
address = ":443" prefix = "ocdav"
[http.services.prometheus]
address = ":443"
[http.services.metrics]
address = ":443"
metrics_data_driver_type = "json"
metrics_data_location = "/etc/revad/metrics.json"
metrics_record_interval = 5000
[http.services.sysinfo]
[http.middlewares.cors] [http.middlewares.cors]
[http.middlewares.log]

View file

@ -1,75 +0,0 @@
#!/bin/bash
VALID_ARGS=$(getopt -o cdghi:m:s: --long create,delete,get,help,id:,message:,subject: -- "$@")
# shellcheck disable=SC2181
if [[ ${?} -ne 0 ]]; then
exit 1;
fi
usage () {
echo "${0}: -c|--create -m|--message <'Your announcement goes here'> -s|--subject <Your subject goes here>"
echo "${0}: -d|--delete -i|--id <announcement_id>"
echo "${0}: -g|--get"
exit 1
}
eval set -- "${VALID_ARGS}"
# shellcheck disable=SC2078
while [ : ]; do
case "$1" in
-c | --create)
method='POST'
shift
;;
-d | --delete)
method='DELETE'
shift
;;
-g | --get)
method='GET'
shift
;;
-h | --help)
usage
;;
-i | --id)
argument="${2}"
shift 2
;;
-m | --message)
message="${2}"
shift 2
;;
-s | --subject)
subject="${2}"
shift 2
;;
*)
break
;;
esac
done
if [[ ${method} == 'DELETE' ]] && [[ -z ${argument} ]]; then
usage
fi
if [[ ${method} == 'POST' ]]; then
if [[ -z ${message} ]] || [[ -z ${subject} ]]; then
usage
fi
argument='{"subject":"'${subject}'","message":"'${message}'", "plainMessage":"'${message}'", "groups": [], "userId": "admin", "activities": false, "notifications": true, "emails": false, "comments": false }'
fi
curl_cmd(){
local method="${1}"
if [[ ${method} == 'POST' ]] && [[ -n ${2} ]]; then
local payload=(-d "${2}" -H "Content-Type: application/json")
elif [[ ${method} == 'DELETE' ]] && [[ -n ${2} ]]; then
local id="/${2}"
fi
local admin_app_password="<%= @admin_app_password %>"
domain="$(hostname -d)"
curl -s -X "${method}" -u "admin:${admin_app_password}" "${payload[@]}" -H 'OCS-APIRequest: true' "https://${domain}/ocs/v2.php/apps/announcementcenter/api/v1/announcements${id}"
}
curl_cmd "${method}" "${argument}"

View file

@ -6,14 +6,6 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
number_of_full_to_keep='<%= @full_backup_retention %>' number_of_full_to_keep='<%= @full_backup_retention %>'
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
customer="<%= @customer %>" customer="<%= @customer %>"
environment="<%= @environment %>"
declare -A extra_backup_jobs
#<% if @extra_backup_jobs.any? %>
#<% @extra_backup_jobs.each do |client, job| %>
extra_backup_jobs["<%= client %>"]="<%= job %>"
#<% end %>
#<% end %>
#<% if @location.start_with?('common') %> #<% if @location.start_with?('common') %>
declare -a sixmonths=('mau') declare -a sixmonths=('mau')
if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
@ -21,20 +13,20 @@ if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
fi fi
declare -a projects declare -a projects
#<% @singlenodes.each do |singlenode| %> #<% @singlenodes.each do |singlenode| %>
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %> <%= singlenode %>") projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %>")
#<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %> #<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %>
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= singlenode %>") projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
#<% end %> #<% end %>
#<% end %> #<% end %>
#<% else %> #<% else %>
declare -a projects=("<%= @primary_project %> <%= @mirror_project %> ${customer}") declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
#<% @assigned_projects.each do |project| %> #<% @assigned_projects.each do |project| %>
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> ${customer}") projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
#<% end %> #<% end %>
#<% end %> #<% end %>
if [[ ${customer} == 'common' ]]; then if [[ ${customer} == 'common' ]]; then
projects+=("<%= @location %> <%= @location %>-mirror ${customer}") projects+=("<%= @location %> <%= @location %>-mirror")
fi fi
@ -42,30 +34,23 @@ function do_backup {
local project="${1}" local project="${1}"
local mirror="${2}" local mirror="${2}"
local bucket="${3}" local bucket="${3}"
local customer="${4}"
local mirrorbucket="${bucket}-mirror" local mirrorbucket="${bucket}-mirror"
local mountpoint="/opt/backupmounts/${bucket}" local mountpoint="/opt/backupmounts/${bucket}"
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
local oktorun=$? # 1 == this bucket has no other bakup process in progress
if [[ ${oktorun} -ne 0 ]]; then
mkdir -p ${mountpoint} mkdir -p ${mountpoint}
rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
rclone mkdir ${mirror}:${mirrorbucket} rclone mkdir ${mirror}:${mirrorbucket}
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket} duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \
if [[ "${extra_backup_jobs[${customer}]:+found}" == "found" ]] && [[ -f "${extra_backup_jobs[${customer}]}" ]]; then --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
${extra_backup_jobs[${customer}]} ${project} ${mirror} ${bucket} ${customer} ${environment}
fi
umount ${mountpoint} umount ${mountpoint}
rmdir ${mountpoint} rmdir ${mountpoint}
# Clean up # Clean up
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${mirrorbucket} duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
fi --force rclone://${mirror}:/${mirrorbucket}
} }
for entry in "${projects[@]}"; do for entry in "${projects[@]}"; do
project=$(echo ${entry} | awk '{print $1}') project=$(echo ${entry} | awk '{print $1}')
mirror=$(echo ${entry} | awk '{print $2}') mirror=$(echo ${entry} | awk '{print $2}')
customer=$(echo ${entry} | awk '{print $3}')
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
maybesize=$(timeout 30s rclone size --json ${project}:${bucket}) maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
if [[ ${?} -eq 124 ]]; then if [[ ${?} -eq 124 ]]; then
@ -75,9 +60,9 @@ for entry in "${projects[@]}"; do
fi fi
# If bucket is above 50 GB we fork # If bucket is above 50 GB we fork
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
do_backup ${project} ${mirror} ${bucket} ${customer} & do_backup ${project} ${mirror} ${bucket} &
else else
do_backup ${project} ${mirror} ${bucket} ${customer} do_backup ${project} ${mirror} ${bucket}
fi fi
done done
done done

View file

@ -0,0 +1,86 @@
#!/bin/bash
# Backup all buckets
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
# a specific host, but will differ between hosts
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
number_of_full_to_keep="<%= @full_backup_retention %>"
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
split_limit=1000 #in GB, if bigger than this number, we fork backup of each directory to it's own process
declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
#<% @assigned_projects.each do |project| %>
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
#<% end %>
function do_huge_backup {
local project="${1}"
local mirror="${2}"
local bucket="${3}"
declare -a directories
declare -a empty
for dir in $(rclone lsd ${project}:${bucket} | awk '{print $NF}'); do
directories+=("${dir}")
mountpoint="/opt/backupmounts/${bucket}-${dir}"
do_backup ${project} ${mirror} ${bucket} ${mountpoint} ${dir} ${empty} &
done
mountpoint="/opt/backupmounts/${bucket}"
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${directories[@]} &
}
function do_backup {
local project="${1}"
shift
local mirror="${1}"
shift
local bucket="${1}"
shift
local mountpoint="${1}"
shift
local dire="${1}"
shift
declare -a exclude
exclude=( "${@}" )
suffix=""
opts=""
if [[ "${dire}" != "none" ]]; then
suffix="/${dire}"
fi
if ((${#exclude[@]})); then
for dir in "${exclude[@]}"; do
opts="${opts} --exclude /${dir}"
done
fi
local mirrorbucket="${bucket}-mirror"
mkdir -p ${mountpoint}
rclone mount ${project}:${bucket}${suffix} ${mountpoint}/ --daemon --allow-other
rclone mkdir ${mirror}:${mirrorbucket}${suffix}
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt ${opts} \
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}${suffix}
umount ${mountpoint}
rmdir ${mountpoint}
# Clean up
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
--force rclone://${mirror}:/${mirrorbucket}${suffix}
}
for entry in "${projects[@]}"; do
project=$(echo ${entry} | awk '{print $1}')
mirror=$(echo ${entry} | awk '{print $2}')
declare -a empty
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
size=$(rclone size --json ${project}:${bucket} | jq -r '.bytes')
mirrorbucket="${bucket}-mirror"
mountpoint="/opt/backupmounts/${bucket}"
# If bucket is above ${split_limit} we fork and do backup per directory
if [[ ${size} -gt $((${split_limit} * 1000000000)) ]]; then
do_huge_backup ${project} ${mirror} ${bucket} &
# If bucket is above ${fork_limit} we fork and do backup for bucket
elif [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${empty} &
else
# If bucket is below ${fork_limit} we do not fork and do backup for bucket
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${empty}
fi
done
done

View file

@ -1,8 +0,0 @@
#!/bin/bash
project="${1}"
mirror="${2}"
bucket="${3}"
customer="${4}"
environment="${5}"
rsync -e "ssh -i ${HOME}/.ssh/id_script" -avz "/opt/backupmounts/${bucket}" "sd-${environment}@sd-${environment}-backup.hb.se:~/sd-${environment}/${bucket}"

View file

@ -6,14 +6,11 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
number_of_full_to_keep="<%= @full_backup_retention %>" number_of_full_to_keep="<%= @full_backup_retention %>"
backup="${1}" backup="${1}"
if [[ -z ${backup} ]]; then
backup="backup1.$(hostname -d)"
fi
if ! [[ ${backup} =~ backup1.*sunet.se$ ]]; then if ! [[ ${backup} =~ backup1.*sunet.se$ ]]; then
echo "Usage: ${0} <fqdn of backup server>" echo "Usage: ${0} <fqdn of backup server>"
echo "Example: ${0} backup1.sunet.drive.sunet.se" echo "Example: ${0} backup1.sunet.drive.sunet.se"
fi fi
backup_dir="/opt/backups/backup-files" backup_dir="/opt/backups"
bucket="db-backups" bucket="db-backups"
mirror="<%= @customer %>-<%= @environment %>-mirror" mirror="<%= @customer %>-<%= @environment %>-mirror"
if [[ ${mirror} =~ common-(test|prod)-mirror ]]; then if [[ ${mirror} =~ common-(test|prod)-mirror ]]; then
@ -21,15 +18,16 @@ if [[ ${mirror} =~ common-(test|prod)-mirror ]]; then
bucket="${bucket}-${suffix}" bucket="${bucket}-${suffix}"
backup_dir="${backup_dir}-${suffix}" backup_dir="${backup_dir}-${suffix}"
fi fi
mkdir -p ${backup_dir}
echo "Backing up database for ${backup}" echo "Backing up database for ${backup}"
ssh ${backup} "sudo /home/script/bin/backup_db.sh" ssh ${backup} "sudo /home/script/bin/backup_db.sh"
echo "Cleaning up old backups for ${backup}" echo "Cleaning up old backups for ${backup}"
ssh ${backup} "sudo /home/script/bin/purge_backups.sh /opt/mariadb_backup/backups/" ssh ${backup} "sudo /home/script/bin/purge_backups.sh /opt/mariadb_backup/backups/"
echo "Copying backups here" echo "Copying backups here"
mkdir -p ${backup_dir}
scp script@${backup}:/opt/mariadb_backup/backups/$(date +%Y/%m/%d)/*.gz ${backup_dir} scp script@${backup}:/opt/mariadb_backup/backups/$(date +%Y/%m/%d)/*.gz ${backup_dir}
echo "Copying backups to remote bucket" echo "Copying backups to remote bucket"
rclone mkdir ${mirror}:${bucket} rclone mkdir ${mirror}:${bucket}
duplicity --allow-source-mismatch --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption ${backup_dir} rclone://${mirror}:/${bucket} duplicity --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption ${backup_dir} rclone://${mirror}:/${bucket}
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${bucket} duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${bucket}
rm -rf "${backup_dir}" echo "cleaning up"
rm -r ${backup_dir}

View file

@ -6,7 +6,7 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
number_of_full_to_keep=7 number_of_full_to_keep=7
backup="multinode-db1.$(hostname -d)" backup="multinode-db1.$(hostname -d)"
remote_backup_dir="/etc/mariadb/backups" remote_backup_dir="/etc/mariadb/backups"
backup_dir="/opt/backups/multinode" backup_dir="/opt/backups"
bucket="db-backups-multinode" bucket="db-backups-multinode"
mirror="common-<%= @environment %>-mirror" mirror="common-<%= @environment %>-mirror"
echo "Backing up all databases for for multinode customer" echo "Backing up all databases for for multinode customer"
@ -16,7 +16,7 @@ mkdir -p ${backup_dir}
scp "script@${backup}:${remote_backup_dir}/mariadb-dump*.sql.gz" "${backup_dir}" scp "script@${backup}:${remote_backup_dir}/mariadb-dump*.sql.gz" "${backup_dir}"
echo "Copying backups to remote bucket" echo "Copying backups to remote bucket"
rclone mkdir "${mirror}:${bucket}" rclone mkdir "${mirror}:${bucket}"
duplicity --allow-source-mismatch --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption "${backup_dir}" "rclone://${mirror}:/${bucket}" duplicity --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption "${backup_dir}" "rclone://${mirror}:/${bucket}"
duplicity remove-all-but-n-full "${number_of_full_to_keep}" --tempdir /mnt --archive-dir /mnt --force "rclone://${mirror}:/${bucket}" duplicity remove-all-but-n-full "${number_of_full_to_keep}" --tempdir /mnt --archive-dir /mnt --force "rclone://${mirror}:/${bucket}"
echo "cleaning up" echo "cleaning up"
rm -r "${backup_dir}" rm -r "${backup_dir}"

View file

@ -2,7 +2,7 @@
project="${1}" project="${1}"
bucket="${2}" bucket="${2}"
declare -a sixmonths=('multinode') declare -a sixmonths=('mau')
output_status="OK" output_status="OK"
exit_status=0 exit_status=0
problems="" problems=""

View file

@ -25,7 +25,7 @@ dirty=0
primary='' primary=''
declare -a users=( 'admin' ) declare -a users=( 'admin' )
cd "${tempdir}" || echo "Could not cd to tempdir" cd "${tempdir}" || echo "Could not cd to tempdir"
declare -a projects=( $("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml) ) declare -a projects=( "${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml )
if [[ "${include_userbuckets}" == "true" ]]; then if [[ "${include_userbuckets}" == "true" ]]; then
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml) primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
projects+=( "${primary}" ) projects+=( "${primary}" )

View file

@ -1,35 +0,0 @@
#!/bin/bash
VALID_ARGS=$(getopt -o s: --long subject: -- "$@")
# shellcheck disable=SC2181
if [[ ${?} -ne 0 ]]; then
exit 1;
fi
usage () {
echo "${0}: -s|--subject <subject>"
exit 1
}
eval set -- "${VALID_ARGS}"
# shellcheck disable=SC2078
while [ : ]; do
case "$1" in
-s | --subject)
subject="${2}"
shift 2
;;
*)
break
;;
esac
done
if [[ -z ${subject} ]]; then
usage
fi
id=$(/root/tasks/announce.sh --get | xmlstarlet sel -t -i '//subject="'"${subject}"'"' -m "/ocs/data/element/id" -v .)
if [[ -n ${id} ]]; then
/root/tasks/announce.sh --delete --id "${id}"
fi

View file

@ -11,7 +11,7 @@ base_dir="${project}:${bucket}"
stat_dir="/opt/statistics" stat_dir="/opt/statistics"
customer_dir="${stat_dir}/${location}" customer_dir="${stat_dir}/${location}"
mkdir -p "${customer_dir}" mkdir -p "${customer_dir}"
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}" rclone mkdir "${base_dir}/${location}"
echo "${userjson}" | jq . >"${customer_dir}/users.json" echo "${userjson}" | jq . >"${customer_dir}/users.json"
status=${?} status=${?}
if [[ ${status} -eq 0 ]]; then if [[ ${status} -eq 0 ]]; then

View file

@ -1,81 +0,0 @@
#!/bin/bash
VALID_ARGS=$(getopt -o cdghi:m:s: --long create,delete,get,help,id:,message:,subject: -- "$@")
# shellcheck disable=SC2181
if [[ ${?} -ne 0 ]]; then
exit 1;
fi
usage () {
echo "${0}: -c|--create -m|--message <'Your announcement goes here'> -s|--subject <Your subject goes here>"
echo "${0}: -d|--delete -i|--id <announcement_id>"
echo "${0}: -g|--get"
exit 1
}
eval set -- "${VALID_ARGS}"
# shellcheck disable=SC2078
while [ : ]; do
case "$1" in
-c | --create)
method='POST'
shift
;;
-d | --delete)
method='DELETE'
shift
;;
-g | --get)
method='GET'
shift
;;
-h | --help)
usage
;;
-i | --id)
argument="${2}"
shift 2
;;
-m | --message)
message="${2}"
shift 2
;;
-s | --subject)
subject="${2}"
shift 2
;;
*)
break
;;
esac
done
if [[ ${method} == 'DELETE' ]] && [[ -z ${argument} ]]; then
usage
fi
if [[ ${method} == 'POST' ]]; then
if [[ -z ${message} ]] || [[ -z ${subject} ]]; then
usage
fi
argument='{"subject":"'${subject}'","message":"'${message}'", "plainMessage":"'${message}'", "groups": [], "userId": "admin", "activities": false, "notifications": true, "emails": false, "comments": false }'
fi
curl_cmd(){
local admin_app_password="${1}"
local customer="${2}"
local method="${3}"
if [[ ${method} == 'POST' ]] && [[ -n ${4} ]]; then
local payload=(-d "${4}" -H "Content-Type: application/json")
elif [[ ${method} == 'DELETE' ]] && [[ -n ${4} ]]; then
local id="/${4}"
fi
domain="$(hostname -d)"
curl -X "${method}" -u "admin:${admin_app_password}" "${payload[@]}" -H 'OCS-APIRequest: true' "https://${customer}.${domain}/ocs/v2.php/apps/announcementcenter/api/v1/announcements${id}"
}
#<%- index = 0 %>
#<%- @multinodes.each do |customer| %>
curl_cmd "<%= @multinode_passwords[index] %>" "<%= customer %>" "${method}" "${argument}"
#<%- index += 1 %>
#<%- end %>

View file

@ -14,7 +14,7 @@ endpoint = <%= @s3_host_mirror %>
acl = private acl = private
[statistics] [statistics]
type = webdav type = webdav
url = https://89.45.236.246/remote.php/dav/files/_script/ url = https://89.46.20.42/remote.php/dav/files/_script/
vendor = nextcloud vendor = nextcloud
user = _script user = _script
pass = <%= @statistics_secret %> pass = <%= @statistics_secret %>

View file

@ -1,12 +1,12 @@
#!/usr/bin/env python3 #!/usr/bin/env python3.9
# vim: set filetype=python: # vim: set filetype=python:
import subprocess
import sys import sys
import time import time
import urllib.parse import urllib.parse
import requests import requests
from drive_utils import (build_fqdn, get_ips_for_hostname, run_remote_command, from drive_utils import (build_fqdn, get_ips_for_hostname, run_remote_command,
smoketest_db_node) smoketest_db_node)
@ -30,10 +30,7 @@ def add_downtime(fqdn: str,
post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format( post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format(
monitor_host, fqdn, action) monitor_host, fqdn, action)
headers = {'X-Thruk-Auth-Key': apikey} headers = {'X-Thruk-Auth-Key': apikey}
try:
requests.post(post_url, data=data, headers=headers) requests.post(post_url, data=data, headers=headers)
except Exception:
print("Failed to add downtime for {}".format(fqdn))
def remove_downtime(fqdn: str, def remove_downtime(fqdn: str,
@ -45,78 +42,51 @@ def remove_downtime(fqdn: str,
get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format( get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format(
monitor_host, fqdn) monitor_host, fqdn)
headers = {'X-Thruk-Auth-Key': apikey} headers = {'X-Thruk-Auth-Key': apikey}
try:
req = requests.get(get_url, headers=headers) req = requests.get(get_url, headers=headers)
action = 'del_active_service_downtimes' action = 'del_active_service_downtimes'
for service in req.json()[0]['services']: for service in req.json()[0]['services']:
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format( post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
monitor_host, fqdn, urllib.parse.quote(service), action) monitor_host, fqdn, urllib.parse.quote(service), action)
requests.post(post_url, headers=headers) requests.post(post_url, headers=headers)
except Exception:
print("Failed to remove downtime for {}".format(fqdn))
def main() -> int: def main() -> int:
customers = ["<%= @customer %>"] customer = "<%= @customer %>"
environment = "<%= @environment %>" environment = "<%= @environment %>"
apikey_test = "<%= @apikey_test %>" apikey_test = "<%= @apikey_test %>"
apikey_prod = "<%= @apikey_prod %>" apikey_prod = "<%= @apikey_prod %>"
user = "script" user = "script"
backup_command = ['sudo /home/script/bin/backup_db.sh']
reboot_command = ['sudo /usr/local/bin/safer_reboot'] reboot_command = ['sudo /usr/local/bin/safer_reboot']
if customers[0] == "common":
customers = ["gss", "lookup", "multinode"]
for customer in customers:
backup_type = "backup"
if customer == "gss":
backup_type = "gssbackup"
elif customer == "lookup":
backup_type = "lookupbackup"
elif customer == "multinode":
backup_command = ['sudo /home/script/bin/backup_multinode_db.sh']
backup_type = "multinode-db"
backup = build_fqdn(customer, environment, 1, backup_type)
print("\tRunning backup command at {}".format(backup))
run_remote_command(backup,
backup_command,
user="script",
output=subprocess.DEVNULL)
for number in reversed(range(1, 4)): for number in reversed(range(1, 4)):
fqdn = build_fqdn(customer, environment, number) fqdn = build_fqdn(customer, environment, number)
ipv4, _ = get_ips_for_hostname(fqdn) ipv4, _ = get_ips_for_hostname(fqdn)
ip = ipv4[0] ip = ipv4[0]
print("Upgrading: {} with ip: {}".format(fqdn, ip)) print("Upgrading: {} with ip: {}".format(fqdn, ip))
add_downtime(fqdn, apikey_test) add_downtime(fqdn, apikey_test)
add_downtime(fqdn, add_downtime(fqdn, apikey_prod, monitor_host="monitor.drive.sunet.se")
apikey_prod,
monitor_host="monitor.drive.sunet.se")
run_remote_command(fqdn, reboot_command, user=user) run_remote_command(fqdn, reboot_command, user = user)
success = False success = False
for testnumber in reversed(range(1, 32, 2)): for testnumber in reversed(range(1, 32, 2)):
print( print("\tSleeping for {} seconds before smoketest on {}".format(
"\tSleeping for {} seconds before smoketest on {}".format(
testnumber, fqdn)) testnumber, fqdn))
time.sleep(testnumber) time.sleep(testnumber)
if smoketest_db_node(fqdn, user=user): if smoketest_db_node(fqdn, user = user):
success = True success = True
break break
remove_downtime(fqdn, apikey_test) remove_downtime(fqdn, apikey_test)
remove_downtime(fqdn, remove_downtime(fqdn, apikey_prod, monitor_host="monitor.drive.sunet.se")
apikey_prod,
monitor_host="monitor.drive.sunet.se")
if success: if success:
print("Upgrade cycle succeeded on {} ".format(fqdn)) print("Upgrade cycle succeeded on {} ".format(fqdn))
else: else:
print("Smoketest failed on {} after server reboot command". print("Smoketest failed on {} after server reboot command".format(
format(fqdn)) fqdn))
return 5 return 5
print("All {}-servers successfully upgraded for {}".format( print("All {}-servers successfully upgraded for {}".format(

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3.9
# vim: set filetype=python: # vim: set filetype=python:
import subprocess import subprocess
@ -30,10 +30,7 @@ def add_downtime(fqdn: str,
post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format( post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format(
monitor_host, fqdn, action) monitor_host, fqdn, action)
headers = {'X-Thruk-Auth-Key': apikey} headers = {'X-Thruk-Auth-Key': apikey}
try:
requests.post(post_url, data=data, headers=headers) requests.post(post_url, data=data, headers=headers)
except Exception:
print("Failed to add downtime for: {}".format(fqdn))
def remove_downtime(fqdn: str, def remove_downtime(fqdn: str,
@ -45,18 +42,15 @@ def remove_downtime(fqdn: str,
get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format( get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format(
monitor_host, fqdn) monitor_host, fqdn)
headers = {'X-Thruk-Auth-Key': apikey} headers = {'X-Thruk-Auth-Key': apikey}
try:
req = requests.get(get_url, headers=headers) req = requests.get(get_url, headers=headers)
action = 'del_active_service_downtimes' action = 'del_active_service_downtimes'
for service in req.json()[0]['services']: for service in req.json()[0]['services']:
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format( post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
monitor_host, fqdn, urllib.parse.quote(service), action) monitor_host, fqdn, urllib.parse.quote(service), action)
requests.post(post_url, headers=headers) requests.post(post_url, headers=headers)
except Exception:
print("Failed to remove downtime for: {}".format(fqdn))
def run_command(command: list) -> tuple: def run_command(command: list[str]) -> tuple:
with subprocess.Popen(command, with subprocess.Popen(command,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc: stderr=subprocess.PIPE) as proc:
@ -74,7 +68,6 @@ def main() -> int:
apikey_test = "<%= @apikey_test %>" apikey_test = "<%= @apikey_test %>"
apikey_prod = "<%= @apikey_prod %>" apikey_prod = "<%= @apikey_prod %>"
backup_command = ['sudo /home/script/bin/backup_db.sh']
cosmos_command = ['sudo run-cosmos'] cosmos_command = ['sudo run-cosmos']
nc_upgrade_command = 'sudo /usr/local/bin/occ config:editable --on ' nc_upgrade_command = 'sudo /usr/local/bin/occ config:editable --on '
nc_upgrade_command += '&& sudo /usr/local/bin/occ upgrade ' nc_upgrade_command += '&& sudo /usr/local/bin/occ upgrade '
@ -87,18 +80,9 @@ def main() -> int:
repair_command += '&& sudo /usr/local/bin/occ db:add-missing-primary-keys' repair_command += '&& sudo /usr/local/bin/occ db:add-missing-primary-keys'
reboot_command = ['sudo /usr/local/bin/safer_reboot'] reboot_command = ['sudo /usr/local/bin/safer_reboot']
server_type = "node" server_type = "node"
backup_type = "backup"
if customer == "common": if customer == "common":
customer = "gss" customer = "gss"
server_type = "gss" server_type = "gss"
backup_type = "gssbackup"
backup = build_fqdn(customer, environment, 1, backup_type)
print("\tRunning backup command at {}".format(backup))
run_remote_command(backup,
backup_command,
user="script",
output=subprocess.DEVNULL)
for number in reversed(range(1, 4)): for number in reversed(range(1, 4)):
fqdn = build_fqdn(customer, environment, number, server_type) fqdn = build_fqdn(customer, environment, number, server_type)

View file

@ -11,7 +11,5 @@ else
fi fi
docker exec ${container} /do_backup.sh ${customer} docker exec ${container} /do_backup.sh ${customer}
chown root:script /opt/mariadb_backup/
chmod 750 /opt/mariadb_backup/
chmod 755 ${backupdir} chmod 755 ${backupdir}
chown -R script:root ${backupdir} chown -R script:root ${backupdir}

View file

@ -1,107 +0,0 @@
# THIS FILE IS MANAGED BY PUPPET
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoredups:ignorespace
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes ;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@$(hostname -f)\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@$(hostname -f):\w\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm* | rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@$(hostname -f): \w\a\]$PS1"
;;
*) ;;
esac
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# some more ls aliases
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
if [[ $(hostname) =~ redis ]]; then
if [ -f /opt/redis/node-0/server.conf ]; then
alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
else
alias redis-cli="redis-cli -a $(grep requirepass /opt/redict/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
fi
fi
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
#if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
# . /etc/bash_completion
#fi

View file

@ -1,31 +0,0 @@
#!/usr/bin/env python3
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--warning',
help='Warning threashold',
required=True)
parser.add_argument('-c', '--critical',
help='Critical threashold',
required=True)
args = parser.parse_args()
warning = int(args.warning)
critical = int(args.critical)
with open('/proc/uptime', 'r') as f:
uptime_seconds = float(f.readline().split()[0])
days = int(uptime_seconds / 86400)
status = "OK"
exit = 0
if days > warning:
status = "WARNING"
exit = 1
if days > critical:
status = "CRITICAL"
exit = 2
print(f"{status}: uptime {days} days | uptime={days};{warning};{critical};")
sys.exit(exit)

View file

@ -38,7 +38,7 @@ preexisting="$(docker exec -u www-data -i "${container}" php --define apc.enable
if [[ -z ${preexisting} ]]; then if [[ -z ${preexisting} ]]; then
docker exec -u www-data -i "${container}" php --define apc.enable_cli=1 /var/www/html/occ files_external:create "${user_bucket_name}" \ docker exec -u www-data -i "${container}" php --define apc.enable_cli=1 /var/www/html/occ files_external:create "${user_bucket_name}" \
amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \ amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
-c useMultipartCopy=false amazons3::accesskey --user ${user} amazons3::accesskey --user ${user}
for shareid in $(docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:export ${user} | jq -r '.[].mount_id'); do for shareid in $(docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:export ${user} | jq -r '.[].mount_id'); do
docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:option ${shareid} enable_sharing true docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:option ${shareid} enable_sharing true
done done

View file

@ -7,7 +7,7 @@ bucket=${4}
user=${5} user=${5}
/usr/local/bin/occ files_external:create "${bucket}" \ /usr/local/bin/occ files_external:create "${bucket}" \
amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \ amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
-c useMultipartCopy=false amazons3::accesskey --user "${user}" amazons3::accesskey --user "${user}"
for shareid in $(/usr/local/bin/occ files_external:export "${user}" | jq -r '.[].mount_id'); do for shareid in $(/usr/local/bin/occ files_external:export "${user}" | jq -r '.[].mount_id'); do
/usr/local/bin/occ files_external:option "${shareid}" enable_sharing true /usr/local/bin/occ files_external:option "${shareid}" enable_sharing true
done done

View file

@ -1,12 +0,0 @@
#!/bin/bash
mode=${1}
if [[ "${mode}" == "multi" ]]; then
filter='.multinode_mapping| keys | join("\n")'
elif [[ "${mode}" == "single" ]]; then
filter='.singlenodes| join("\n")'
else
filter='.fullnodes | join("\n")'
fi
yq -r "${filter}" < /etc/hiera/data/common.yaml