Compare commits

..

No commits in common. "main" and "stable-2023-02-15-v01" have entirely different histories.

118 changed files with 1307 additions and 2883 deletions

View file

@ -1,33 +0,0 @@
#!/bin/bash
if ! [[ $(hostname) =~ monitor ]]; then
exit 0
fi
repo="/var/cache/cosmos/repo"
common="${repo}/global/overlay/etc/hiera/data/common.yaml"
function print_fact {
customer=${1}
environment=${2}
version=${3}
if [[ ${version} != 'null' ]]; then
echo "nextcloud_version_${environment}_${customer}=${version}"
else
echo "nextcloud_version_${environment}_${customer}=$(yq -r ".${key}" "${common}")"
fi
}
for environment in test prod; do
key="nextcloud_version_${environment}"
for customer in $(yq -r '.multinode_mapping | keys| .[]' "${common}"); do
group="${repo}/multinode-common/overlay/etc/hiera/data/group.yaml"
version=$(yq -r ".${key}" "${group}")
print_fact "${customer}" "${environment}" "${version}"
done
for customer in $(yq -r '.fullnodes[]' "${common}"); do
group="${repo}/${customer}-common/overlay/etc/hiera/data/group.yaml"
version=$(yq -r ".${key}" "${group}")
print_fact "${customer}" "${environment}" "${version}"
done
done

View file

@ -1,6 +0,0 @@
net.core.rmem_max=67108864
net.core.wmem_max=67108864
net.ipv4.tcp_rmem=4096 87380 33554432
net.ipv4.tcp_wmem=4096 87380 33554432
net.core.default_qdisc=fq
net.ipv4.tcp_congestion_control=bbr

View file

@ -1,8 +1,14 @@
# Lets determin who the customer is by looking at the hostname
function sunetdrive::get_customer() >> String {
$hostnameparts = split($facts['networking']['fqdn'],'\.')
$hostnameparts = split($facts['fqdn'],'\.')
if $hostnameparts[1] == 'drive' {
return 'common'
if $hostnameparts[0] =~ /^gss/ {
return 'gss'
} elsif $hostnameparts[0] =~ /^lookup/ {
return 'lookup'
} else {
return 'common'
}
} elsif $hostnameparts[0] =~ /idp-proxy/ {
return 'common'
}

View file

@ -1,6 +1,6 @@
# Lets determin where we are by looking at the hostname
function sunetdrive::get_environment() >> String {
$hostname = $facts['networking']['fqdn']
$hostname = $facts['fqdn']
if $hostname =~ /^.*\.drive\.sunet\.se$/ {
if $hostname =~ /^.*\.pilot\.drive\.sunet\.se$/ {
return 'pilot'

View file

@ -1,4 +1,4 @@
# Lets determin where we are by looking at the hostname
function sunetdrive::get_node_number() >> Integer {
Integer(regsubst($facts['networking']['fqdn'], /^[a-zA-Z\-]+(\d).*$/, '\\1'))
Integer(regsubst($::fqdn, /^[a-zA-Z\-]+(\d).*$/, '\\1'))
}

View file

@ -5,7 +5,6 @@ define sunetdrive::app_type (
$override_config = undef,
$override_compose = undef
) {
include sunet::packages::netcat_openbsd
# Config from group.yaml and customer specific conf
$environment = sunetdrive::get_environment()
$customer = sunetdrive::get_customer()
@ -15,9 +14,8 @@ define sunetdrive::app_type (
# The config used
$config = $override_config
# Other settings
$admin_password = $config[ 'admin_password' ]
$dbhost = $config[ 'dbhost' ]
$dbname = $config[ 'dbname' ]
$dbuser = $config[ 'dbuser' ]
$instanceid = $config[ 'instanceid' ]
$mysql_user_password = $config[ 'mysql_user_password' ]
$passwordsalt = $config[ 'passwordsalt' ]
@ -30,10 +28,8 @@ define sunetdrive::app_type (
$config = hiera_hash($environment)
$skeletondirectory = $config['skeletondirectory']
# Other settings
$admin_password = safe_hiera('admin_password')
$dbhost = 'proxysql_proxysql_1'
$dbname = 'nextcloud'
$dbuser = 'nextcloud'
$full_backup_retention = hiera('full_backup_retention')
$instanceid = safe_hiera('instanceid')
$mysql_user_password = safe_hiera('mysql_user_password')
$passwordsalt = safe_hiera('passwordsalt')
@ -43,8 +39,8 @@ define sunetdrive::app_type (
$s3_secret = safe_hiera('s3_secret')
$secret = safe_hiera('secret')
}
$twofactor_enforced_groups = $config['twofactor_enforced_groups']
$twofactor_enforced_excluded_groups = $config['twofactor_enforced_excluded_groups']
$twofactor_enforced_groups = hiera_array('twofactor_enforced_groups')
$twofactor_enforced_excluded_groups = hiera_array('twofactor_enforced_excluded_groups')
$nextcloud_version = hiera("nextcloud_version_${environment}")
$nextcloud_version_string = split($nextcloud_version, '[-]')[0]
# Common settings for multinode and full nodes
@ -55,22 +51,18 @@ define sunetdrive::app_type (
$site_name = $config['site_name']
$trusted_domains = $config['trusted_domains']
$trusted_proxies = $config['trusted_proxies']
if $location == 'kau-prod' {
$php_memory_limit_mb = 2048
} else {
$php_memory_limit_mb = 512
}
if $::facts['dockerhost2'] == 'yes' {
$hostnet = true
}
# These are encrypted values from local.eyaml
$gss_jwt_key = safe_hiera('gss_jwt_key')
$smtppassword = safe_hiera('smtp_password')
#These are global values from common.yaml
$gs_enabled = hiera('gs_enabled')
$gs_federation = hiera('gs_federation')
$gss_master_admin = hiera_array('gss_master_admin')
$gss_master_url = hiera("gss_master_url_${environment}")
$lookup_server = hiera("lookup_server_${environment}")
$mail_domain = hiera("mail_domain_${environment}")
$mail_smtphost = hiera("mail_smtphost_${environment}")
$mail_from_address = hiera("mail_from_address_${environment}")
@ -78,63 +70,17 @@ define sunetdrive::app_type (
$smtpuser = hiera("smtp_user_${environment}")
$tug_office = hiera_array('tug_office')
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
$gss_mode = hiera('gss_mode')
# These are global values from common.yaml but can be overridden in group.yaml
$drive_email_template_text_left = $config['drive_email_template_text_left']
$drive_email_template_plain_text_left = $config['drive_email_template_plain_text_left']
$drive_email_template_url_left = $config['drive_email_template_url_left']
$lb_servers = hiera_hash($environment)['lb_servers']
$document_servers = hiera_hash($environment)['document_servers']
# Calculate some values
$expiration_months = max(12, $full_backup_retention)
$expiration_days_min = $expiration_months * 31
$expiration_days_max = $expiration_months * 31 + 93
unless $is_multinode{
user { 'www-data': ensure => present, system => true }
file { '/usr/local/bin/get_containers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/get_containers'),
mode => '0744',
}
if ($nodenumber == 3) {
file { '/usr/lib/nagios/plugins/check_nextcloud_mounts.py':
ensure => present,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/check_nextcloud_mounts.py'),
mode => '0744',
}
sunet::sudoer {'nagios_run_nextcloud_mounts_command':
user_name => 'nagios',
collection => 'nrpe_nextcloud_mounts_check',
command_line => '/usr/lib/nagios/plugins/check_nextcloud_mounts.py'
}
sunet::nagios::nrpe_command {'check_nextcloud_mounts':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_nextcloud_mounts.py'
}
}
if ($nodenumber == 3) {
file { '/usr/local/bin/scan_external_mounts':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/scan_external_mounts.sh'),
mode => '0744',
}
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
cmd => '/usr/local/bin/scan_external_mounts',
hour => '1',
minute => '20',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
}
# set up cronjob on node3
if $::fqdn[0,5] == 'node3' {
file { '/opt/nextcloud/cron.sh':
ensure => file,
owner => 'root',
@ -147,24 +93,26 @@ define sunetdrive::app_type (
user => 'root',
minute => '*/5',
}
file { '/opt/nextcloud/user-sync.sh':
ensure => absent,
}
file { '/usr/local/bin/nocc':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/nocc.erb'),
mode => '0740',
}
file { '/etc/sudoers.d/99-nocc':
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
mode => '0440',
owner => 'root',
group => 'root',
if $location =~ /^extern/ {
file { '/opt/nextcloud/user-sync.sh':
ensure => file,
owner => 'root',
group => 'root',
mode => '0700',
content => template('sunetdrive/application/user-sync.erb.sh'),
}
-> cron { 'gss_user_sync':
command => '/opt/nextcloud/user-sync.sh',
user => 'root',
minute => '*/5',
}
}
}
#Create users
unless $is_multinode{
user { 'www-data': ensure => present, system => true }
package { 'aufs-tools': ensure => latest, provider => 'apt' }
file { '/usr/local/bin/occ':
ensure => present,
force => true,
@ -188,21 +136,12 @@ define sunetdrive::app_type (
content => template('sunetdrive/application/upgrade23-25.erb.sh'),
mode => '0744',
}
file { '/usr/local/bin/remount_user_bucket_as_project.sh':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/remount_user_bucket_as_project.sh'),
mode => '0744',
}
file { '/opt/rotate/conf.d/nextcloud.conf':
ensure => file,
force => true,
owner => 'root',
group => 'root',
content => "#This file is managed by puppet
#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n/opt/nextcloud/audit.log:180:256\n",
content => "#This file is managed by puppet\n#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n",
mode => '0644',
}
file { '/opt/rotate/conf.d/redis.conf':
@ -251,14 +190,7 @@ define sunetdrive::app_type (
force => true,
owner => 'www-data',
group => 'root',
mode => '0640',
}
file { '/opt/nextcloud/audit.log':
ensure => file,
force => true,
owner => 'www-data',
group => 'root',
mode => '0640',
mode => '0644',
}
file { '/opt/nextcloud/rclone.conf':
ensure => file,
@ -355,7 +287,23 @@ define sunetdrive::app_type (
mode => '0744',
}
}
if $location =~ /^kau/ {
if $location =~ /^gss-test/ {
file { '/opt/nextcloud/mappingfile.json':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/application/mappingfile-test.json.erb'),
mode => '0644',
}
} elsif $location =~ /^gss/ {
file { '/opt/nextcloud/mappingfile.json':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/application/mappingfile-prod.json.erb'),
mode => '0644',
}
} elsif $location =~ /^kau/ {
file { '/mnt':
ensure => directory,
owner => 'www-data',
@ -392,17 +340,10 @@ define sunetdrive::app_type (
compose_filename => 'docker-compose.yml',
description => 'Nextcloud application',
}
if $::facts['sunet_nftables_enabled'] == 'yes' {
sunet::nftables::docker_expose { 'https':
allow_clients => ['any'],
port => 443,
iif => 'ens3',
}
} else {
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
}
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
}
}
}

View file

@ -38,8 +38,8 @@ define sunetdrive::cloudimage(
resolver => ['130.242.80.14', '130.242.80.99'],
search => $search,
#
repo => $facts['cosmos_repo_origin_url'],
tagpattern => $facts['cosmos_tag_pattern'],
repo => $::cosmos_repo_origin_url,
tagpattern => $::cosmos_tag_pattern,
#
cpus => $cpus,
memory => $memory,

View file

@ -1,53 +0,0 @@
# Common class
class sunetdrive::common {
include sunet::tools
include sunet::motd
include apt
include apparmor
include sunet::packages::jq
if $::facts['sunet_nftables_enabled'] != 'yes' {
warning('Enabling UFW')
include ufw
} else {
if $facts['networking']['hostname'] =~ /^kube[wc]/ {
warning('Setting nftables to installed but disabled')
ensure_resource ('class','sunet::nftables::init', { enabled => false })
} else {
warning('Enabling nftables')
ensure_resource ('class','sunet::nftables::init', { })
}
}
package {'sysstat': ensure => 'latest'}
package {'needrestart': ensure => installed}
service {'sysstat': provider => 'systemd'}
file_line { 'enable_sa':
ensure => 'present',
line => 'ENABLED="true"',
path => '/etc/default/sysstat',
match => 'ENABLED="false"',
require => Package['sysstat'],
}
file_line { 'sa_cron_comment':
ensure => 'present',
line => '# Activity reports every 2 minutes everyday',
path => '/etc/cron.d/sysstat',
match => '^#\ Activity\ reports\ every\ 10\ minutes\ everyday',
require => Package['sysstat'],
notify => Service['sysstat'],
}
file_line { 'sa_cron':
ensure => 'present',
line => '*/2 * * * * root command -v debian-sa1 > /dev/null && debian-sa1 1 1',
path => '/etc/cron.d/sysstat',
match => '^5-55/10',
require => Package['sysstat'],
notify => Service['sysstat'],
}
if $::facts['os']['distro']['id'] == 'Debian' {
exec { 'sysstat_systemd_timer':
command => 'systemctl enable --now sysstat-collect.timer',
unless => 'systemctl is-enabled --quiet sysstat-collect.timer',
}
}
}

View file

@ -4,52 +4,59 @@ define sunetdrive::db_type(
$bootstrap=undef,
$location=undef,
$override_config = undef,
$override_compose = undef,
)
$override_compose = undef)
{
# Config from group.yaml
$environment = sunetdrive::get_environment()
$mariadb_version = hiera("mariadb_version_${environment}")
$config = hiera_hash($environment)
$mysql_root_password = safe_hiera('mysql_root_password')
$backup_password = safe_hiera('backup_password')
$proxysql_password = safe_hiera('proxysql_password')
$mysql_user_password = safe_hiera('mysql_user_password')
$roundcube_password = safe_hiera('roundcube_password')
$mariadb_dir = '/etc/mariadb'
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
$server_id = 1000 + Integer($facts['networking']['hostname'][-1])
ensure_resource('file',$mariadb_dir, { ensure => directory } )
$dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
$dirs.each |$dir| {
ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
$is_multinode = (($override_config != undef) and ($override_compose != undef))
if $is_multinode {
$config = $override_config
$mysql_root_password = $config['mysql_root_password']
$mysql_user_password = $config['mysql_user_password']
$backup_password = $config['backup_password']
$mariadb_dir = $config['mariadb_dir']
$mycnf_path = $config['mycnf_path']
$server_id = '1000'
} else {
$config = hiera_hash($environment)
$mysql_root_password = safe_hiera('mysql_root_password')
$backup_password = safe_hiera('backup_password')
$proxysql_password = safe_hiera('proxysql_password')
$mysql_user_password = safe_hiera('mysql_user_password')
$mariadb_dir = '/etc/mariadb'
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
$server_id = 1000 + Integer($facts['hostname'][-1])
ensure_resource('file',"${mariadb_dir}", { ensure => directory, recurse => true } )
$dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
$dirs.each |$dir| {
ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
}
}
$nextcloud_ip = $config['app']
$db_ip = $config['db']
$db_ipv6 = $config['db_v6']
$backup_ip = $config['backup']
$backup_ipv6 = $config['backup_v6']
$ports = [3306, 4444, 4567, 4568]
if $location =~ /^multinode/ {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['kube'] + $config['kube_v6']
} elsif $location == 'sunet-prod' {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6'] + $config['webmail'] + $config['webmail_v6']
} elsif $location == 'sunet-test' {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6'] + $config['webmail'] + $config['webmail_v6'] + $config['calendar'] + $config['calendar_v6']
unless $is_multinode {
$db_ip = $config['db']
$db_ipv6 = $config['db_v6']
$backup_ip = $config['backup']
$backup_ipv6 = $config['backup_v6']
$ports = [3306, 4444, 4567, 4568]
sunet::misc::ufw_allow { 'mariadb_ports':
from => $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6,
port => $ports,
}
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
}
if $location =~ /^lookup/ {
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '05-lookup.sql']
} else {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql']
}
sunet::misc::ufw_allow { 'mariadb_ports':
from => $from,
port => $ports,
}
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql', '05-roundcube.sql']
$sql_files.each |$sql_file|{
file { "${mariadb_dir}/init/${sql_file}":
ensure => present,
@ -67,11 +74,6 @@ define sunetdrive::db_type(
content => template($mycnf_path),
mode => '0744',
}
file { '/usr/local/bin/purge-binlogs':
ensure => present,
content => template('sunetdrive/mariadb/purge-binlogs.erb.sh'),
mode => '0744',
}
file { "${mariadb_dir}/scripts/run_manual_backup_dump.sh":
ensure => present,
content => template('sunetdrive/mariadb/run_manual_backup_dump.erb.sh'),
@ -82,47 +84,39 @@ define sunetdrive::db_type(
content => template('sunetdrive/mariadb/rename-docker.sh'),
mode => '0744',
}
sunet::scriptherder::cronjob { 'purge_binlogs':
cmd => '/usr/local/bin/purge-binlogs',
hour => '6',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
file { '/usr/local/bin/mysql':
ensure => present,
content => template('sunetdrive/mariadb/mysql.erb.sh'),
mode => '0744',
}
file { '/usr/local/bin/size-test':
ensure => present,
content => template('sunetdrive/mariadb/size-test.erb'),
mode => '0744',
}
file { '/usr/local/bin/status-test':
ensure => present,
content => template('sunetdrive/mariadb/status-test.erb'),
mode => '0744',
}
file { '/etc/sudoers.d/99-size-test':
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/size-test\n",
mode => '0440',
owner => 'root',
group => 'root',
}
file { '/etc/sudoers.d/99-status-test':
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n",
mode => '0440',
owner => 'root',
group => 'root',
}
$docker_compose = sunet::docker_compose { 'drive_mariadb_docker_compose':
content => template('sunetdrive/mariadb/docker-compose_mariadb.yml.erb'),
service_name => 'mariadb',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Mariadb server',
if $is_multinode {
$docker_compose = $override_compose
} else {
file { '/usr/local/bin/size-test':
ensure => present,
content => template('sunetdrive/mariadb/size-test.erb'),
mode => '0744',
}
file { '/usr/local/bin/status-test':
ensure => present,
content => template('sunetdrive/mariadb/status-test.erb'),
mode => '0744',
}
file { "/etc/sudoers.d/99-size-test":
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/size-test\n",
mode => '0440',
owner => 'root',
group => 'root',
}
file { "/etc/sudoers.d/99-status-test":
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n",
mode => '0440',
owner => 'root',
group => 'root',
}
$docker_compose = sunet::docker_compose { 'drive_mariadb_docker_compose':
content => template('sunetdrive/mariadb/docker-compose_mariadb.yml.erb'),
service_name => 'mariadb',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Mariadb server',
}
}
}

View file

@ -1,3 +1,4 @@
include apt
# Wrapper for sunet::dockerhost to do thiss specific things
class sunetdrive::dockerhost(
String $version = safe_hiera('docker_version'),
@ -7,8 +8,6 @@ class sunetdrive::dockerhost(
String $docker_args = '',
Optional[String] $docker_dns = undef,
String $storage_driver = 'aufs',
Boolean $write_daemon_config = false,
Boolean $enable_ipv6 = false,
) {
if $version == 'NOT_SET_IN_HIERA' {
fail('Docker version not set in Hiera')
@ -27,8 +26,6 @@ class sunetdrive::dockerhost(
storage_driver => $storage_driver,
docker_network => true, # let docker choose a network for the 'docker' bridge
compose_version => $compose_version,
write_daemon_config => $write_daemon_config,
enable_ipv6 => $enable_ipv6,
}
file { '/etc/apt/preferences.d/containerd.io.pref':
ensure => absent,

View file

@ -4,8 +4,10 @@ class sunetdrive::infra_script (
$location = undef
) {
$environment = sunetdrive::get_environment()
$customer = 'common'
$customer = "common"
$config = hiera_hash($environment)
$gss_backup_server = $config['gss_backup_server']
$lookup_backup_server = $config['lookup_backup_server']
$ssh_config = "Host *.sunet.se
User script
IdentityFile /root/.ssh/id_script"
@ -57,16 +59,14 @@ class sunetdrive::infra_script (
mode => '0700',
}
sunet::scriptherder::cronjob { 'backupgssdb':
ensure => 'absent',
cmd => 'true',
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
sunet::scriptherder::cronjob { 'backuplookupdb':
ensure => 'absent',
cmd => 'true',
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],

View file

@ -1,38 +0,0 @@
#Class for SUNET-Drive-portal-Server
class sunetdrive::jupyter_site (
String $site_version = '0.0.1-1'
) {
$domain = 'jupyter.sunet.dev'
sunet::docker_compose { 'portal_compose':
content => template('sunetdrive/jupyter_site/docker-compose.erb.yaml'),
service_name => 'jupyter_site',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Web server',
}
exec { 'workaround_docker_compose_dir':
command => 'mkdir -p /opt/jupyter_site/nginx',
unless => 'test -d /opt/jupyter_site/nginx',
}
$nginx_dirs = ['acme', 'certs', 'conf', 'dhparam', 'html', 'vhost']
$nginx_dirs.each | $dir| {
file { "/opt/jupyter_site/nginx/${dir}":
ensure => directory,
owner => 'root',
group => 'root',
mode => '0751',
}
}
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
}
# For acme and redirect
sunet::misc::ufw_allow { 'http':
from => '0.0.0.0/0',
port => 80,
}
}

42
manifests/lookup.pp Normal file
View file

@ -0,0 +1,42 @@
#Class for SUNET-Drive-Lookup-Server
class sunetdrive::lookup (
$bootstrap = undef,
$location = undef
) {
$environment = sunetdrive::get_environment()
# Firewall settings
$nextcloud_ip = hiera_array("${location}_app", [])
$tug_office = hiera_array('tug_office')
$dbhost = 'proxysql_proxysql_1'
$gss_jwt_key = safe_hiera('gss_jwt_key')
$mysql_user_password = safe_hiera('mysql_user_password')
$lookup_version = hiera("lookup_version_${environment}")
#Create users
user { 'www-data': ensure => present, system => true }
file { '/opt/lookup/config.php':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/lookup/config.php.erb'),
mode => '0644',
}
sunet::docker_compose { 'drive_lookup_docker_compose':
content => template('sunetdrive/lookup/docker-compose_lookup.yml.erb'),
service_name => 'lookup',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Lookup server',
}
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
}
}

View file

@ -4,10 +4,10 @@ class sunetdrive::mariadb (
$location = undef,
$tag_mariadb = undef,
$override_config = undef,
$override_compose = undef,
$override_compose = undef
) {
$quorum_id = $facts['networking']['fqdn']
$quorum_id = $::fqdn
$quorum_password = safe_hiera('quorum_password')
$db = sunetdrive::db_type { 'base_db':
bootstrap => $bootstrap,

View file

@ -1,13 +1,11 @@
# This is a asyncronous replica of the Maria DB Cluster for SUNET Drive
class sunetdrive::mariadb_backup($tag_mariadb=undef, $location=undef) {
include sunet::packages::netcat_openbsd
$dirs = [ 'datadir', 'init', 'conf', 'backups' ]
$dirs.each | $dir | {
ensure_resource('file',"/opt/mariadb_backup/${dir}", { ensure => directory, recurse => true } )
}
# Config from group.yaml
$environment = sunetdrive::get_environment()
$mariadb_version = hiera("mariadb_version_${environment}")
$config = hiera_hash($environment)
$first_db = $config['first_db']

View file

@ -4,16 +4,11 @@ class sunetdrive::multinode (
$location = undef
)
{
include sunet::packages::yq
$myname = $facts['networking']['hostname']
$myname = $facts['hostname']
$is_multinode = true;
$environment = sunetdrive::get_environment()
$lb_servers = hiera_hash($environment)['lb_servers']
$document_servers = hiera_hash($environment)['document_servers']
$nextcloud_ip = hiera_hash($environment)['app']
$db_ip = hiera_hash($environment)['db']
$admin_password = hiera('admin_password')
$cluster_admin_password = hiera('cluster_admin_password')
$twofactor_enforced_groups = []
$twofactor_enforced_excluded_groups = []
@ -27,70 +22,11 @@ class sunetdrive::multinode (
nil
}
}
$php_memory_limit_mb = 512
$nodenumber = $::fqdn[9,1]
$customers = $tempcustomers - nil
$passwords = $allnames.map | $index, $customer | {
hiera("${customer}_mysql_user_password")
}
$transaction_persistent = 1
$monitor_password = hiera('proxysql_password')
user { 'www-data': ensure => present, system => true }
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
ensure_resource('file', '/opt/nextcloud' , { ensure => directory, recurse => true } )
file { '/usr/local/bin/get_containers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/get_containers'),
mode => '0744',
}
file { '/usr/lib/nagios/plugins/check_nextcloud_mounts.py':
ensure => present,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/check_nextcloud_mounts.py'),
mode => '0744',
}
sunet::sudoer {'nagios_run_nextcloud_mounts_command':
user_name => 'nagios',
collection => 'nrpe_nextcloud_mounts_check',
command_line => '/usr/lib/nagios/plugins/check_nextcloud_mounts.py'
}
sunet::nagios::nrpe_command {'check_nextcloud_mounts':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_nextcloud_mounts.py'
}
file { '/usr/local/bin/scan_external_mounts':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/scan_external_mounts.sh'),
mode => '0744',
}
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
cmd => '/usr/local/bin/scan_external_mounts',
hour => '1',
minute => '20',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
file { '/usr/local/bin/nocc':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/nocc.erb'),
mode => '0740',
}
file { '/etc/sudoers.d/99-nocc':
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
mode => '0440',
owner => 'root',
group => 'root',
}
file { '/usr/local/bin/occ':
ensure => present,
force => true,
@ -107,30 +43,19 @@ class sunetdrive::multinode (
group => 'root',
}
file { '/usr/local/bin/upgrade23-25.sh':
ensure => absent,
}
file { '/usr/local/bin/get_paying_customers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/multinode/get_paying_customers.erb.sh'),
content => template('sunetdrive/multinode/upgrade23-25.erb.sh'),
mode => '0744',
}
file { '/usr/local/bin/get_non_paying_customers':
file { '/usr/local/bin/get_containers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/multinode/get_non_paying_customers.erb.sh'),
mode => '0744',
}
file { '/usr/local/bin/restart_and_prune':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/multinode/restart_and_prune.erb.sh'),
content => template('sunetdrive/multinode/get_containers'),
mode => '0744',
}
file { '/usr/local/bin/add_admin_user':
@ -149,29 +74,12 @@ class sunetdrive::multinode (
content => template('sunetdrive/multinode/prune.erb.sh'),
mode => '0744',
}
file { '/opt/proxysql/proxysql.cnf':
ensure => file,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/multinode/proxysql.cnf.erb'),
mode => '0644',
}
sunet::scriptherder::cronjob { 'prune_non_paying':
cmd => '/usr/local/bin/restart_and_prune',
weekday => '1-6',
hour => '2',
minute => '45',
ok_criteria => ['exit_status=0','max_age=3d'],
warn_criteria => ['exit_status=1','max_age=5d'],
}
sunet::scriptherder::cronjob { 'prune_all_paying':
cmd => '/usr/local/bin/restart_and_prune include_paying',
weekday => '0',
hour => '2',
minute => '45',
ok_criteria => ['exit_status=0','max_age=7d'],
warn_criteria => ['exit_status=1','max_age=9d'],
cron { 'multinode_prune':
command => '/opt/nextcloud/prune.sh',
require => File['/opt/nextcloud/prune.sh'],
user => 'root',
minute => '25',
hour => '3',
}
file { '/opt/nextcloud/apache.php.ini':
ensure => file,
@ -254,36 +162,19 @@ MACAddressPolicy=none'
hour => '0',
weekday => '0',
}
# if $nodenumber == '2' {
# cron { 'add_back_bucket_for_karin_nordgren':
# command => '(/usr/local/bin/occ nextcloud-kmh-app-1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh-app-1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true',
# user => 'root',
# minute => '*/10',
# }
# }
cron { 'multinode_cron':
command => '/opt/nextcloud/cron.sh',
require => File['/opt/nextcloud/cron.sh'],
user => 'root',
minute => '*/10',
}
$customers.each | $index, $customer | {
$customer_config_full = hiera_hash($customer)
$customer_config = $customer_config_full[$environment]
cron { "multinode_cron_${customer}":
command => "/opt/nextcloud/cron.sh nextcloud-${customer}-app-1",
require => File['/opt/nextcloud/cron.sh'],
user => 'root',
minute => '*/10',
}
if $environment == 'prod' {
if 'primary_bucket' in $customer_config.keys() {
$s3_bucket = $customer_config['primary_bucket']
} else {
$s3_bucket = "primary-${customer}-drive.sunet.se"
}
$s3_bucket = "primary-${customer}-drive.sunet.se"
$site_name = "${customer}.drive.sunet.se"
$trusted_proxies = ['lb1.drive.sunet.se','lb2.drive.sunet.se', 'lb3.drive.sunet.se', 'lb4.drive.sunet.se']
} else {
if 'primary_bucket' in $customer_config.keys() {
$s3_bucket = $customer_config['primary_bucket']
} else {
$s3_bucket = "primary-${customer}-${environment}.sunet.se"
}
$s3_bucket = "primary-${customer}-${environment}.sunet.se"
$site_name = "${customer}.drive.${environment}.sunet.se"
$trusted_proxies = ["lb1.drive.${environment}.sunet.se","lb2.drive.${environment}.sunet.se",
"lb3.drive.${environment}.sunet.se","lb4.drive.${environment}.sunet.se"]
@ -292,54 +183,33 @@ MACAddressPolicy=none'
$apache_error_path = "/opt/multinode/${customer}/404.html"
$config_php_path = "/opt/multinode/${customer}/config.php"
$cron_log_path ="/opt/multinode/${customer}/cron.log"
$customer_config_full = hiera_hash($customer)
$customer_config = $customer_config_full[$environment]
$dbhost = "mariadb${customer}_db_1"
$dbhost = 'proxysql_proxysql_1'
$dbname = "nextcloud_${customer}"
$dbuser = "nextcloud_${customer}"
$gs_enabled = hiera('gs_enabled')
$gs_federation = hiera('gs_federation')
$gss_master_admin = hiera_array('gss_master_admin')
$gss_master_url = hiera("gss_master_url_${environment}")
$https_port = hiera_hash('multinode_mapping')[$customer]['port']
$lookup_server = hiera("lookup_server_${environment}")
$mail_domain = hiera("mail_domain_${environment}")
$mail_from_address = hiera("mail_from_address_${environment}")
$mail_smtphost = hiera("mail_smtphost_${environment}")
$nextcloud_log_path ="/opt/multinode/${customer}/nextcloud.log"
$audit_log_path ="/opt/multinode/${customer}/audit.log"
if $customer_config['nextcloud_version'] {
$nextcloud_version = $customer_config['nextcloud_version']
} else {
$nextcloud_version = hiera("nextcloud_version_${environment}")
}
$nextcloud_version = hiera("nextcloud_version_${environment}")
$nextcloud_version_string = split($nextcloud_version, '[-]')[0]
$rclone_conf_path = "/opt/multinode/${customer}/rclone.conf"
$redis_conf_dir = "/opt/multinode/${customer}/server"
$redis_conf_path = "${redis_conf_dir}/redis.conf"
if $::facts['sunet_nftables_enabled'] == 'yes' {
$redis_host= "redis-${customer}-redis-server-1"
} else {
$redis_host= "redis-${customer}_redis-server_1"
}
$redis_host= "redis${customer}_redis-server_1"
$s3_host = $customer_config['s3_host']
$s3_usepath = hiera('s3_usepath')
$smtpuser = hiera("smtp_user_${environment}")
$trusted_domains = [$site_name, $facts['networking']['fqdn'], 'localhost']
$trusted_domains = [$site_name, $facts['fqdn'], 'localhost']
$tug_office = hiera_array('tug_office')
if $customer_config['twofactor_enforced_groups'] {
$twofactor_enforced_groups = $customer_config['twofactor_enforced_groups']
}
if $customer_config['twofactor_enforced_excluded_groups'] {
$twofactor_enforced_excluded_groups = $customer_config['twofactor_enforced_excluded_groups']
}
if $customer_config['full_backup_retention'] {
$full_backup_retention = $customer_config['full_backup_retention']
} else {
$full_backup_retention = hiera('full_backup_retention')
}
# Calculate some values
$expiration_months = max(12, $full_backup_retention)
$expiration_days_min = $expiration_months * 31
$expiration_days_max = $expiration_months * 31 + 93
# Secrets from local.eyaml
$admin_password = safe_hiera("${customer}_admin_password")
@ -352,14 +222,13 @@ MACAddressPolicy=none'
$secret = safe_hiera("${customer}_secret")
$passwordsalt= safe_hiera("${customer}_passwordsalt")
$redis_host_password = safe_hiera("${customer}_redis_host_password")
$gss_jwt_key = safe_hiera('gss_jwt_key')
$smtppassword = safe_hiera('smtp_password')
$extra_config = {
admin_password => $admin_password,
backup_password => $backup_password,
dbhost => $dbhost,
dbname => $dbname,
dbuser => $dbuser,
drive_email_template_plain_text_left => hiera($environment)['drive_email_template_plain_text_left'],
drive_email_template_text_left => hiera($environment)['drive_email_template_text_left'],
drive_email_template_url_left => hiera($environment)['drive_email_template_url_left'],
@ -372,6 +241,18 @@ MACAddressPolicy=none'
}
$config = deep_merge($customer_config, $extra_config)
ensure_resource('file', "/opt/multinode/${customer}" , { ensure => directory, recurse => true } )
$dirs = ['datadir', 'init', 'conf', 'scripts' ]
$dirs.each |$dir| {
ensure_resource('file',"${config['mariadb_dir']}/${dir}", { ensure => directory, recurse => true } )
}
ensure_resource('file',"${config['mariadb_dir']}/backups", {
ensure => directory,
owner => 'root',
group => 'script',
mode => '0750',
recurse => true
} )
# Use the other sunetdrive classes with overridden config
$db_ip = ['127.0.0.1']
$app_compose = sunet::docker_compose { "drive_${customer}_app_docker_compose":
@ -394,12 +275,33 @@ MACAddressPolicy=none'
description => "Redis cache server for ${customer}",
require => File[$redis_conf_path],
}
$mariadb_compose = sunet::docker_compose { "drive_mariadb_${customer}_compose":
content => template('sunetdrive/multinode/docker-compose_mariadb.yml.erb'),
service_name => "mariadb-${customer}",
compose_dir => "/opt/multinode/${customer}",
compose_filename => 'docker-compose.yml',
description => "Mariadb server for ${customer}",
owner => 'root',
group => 'script',
mode => '0750',
}
file { "/opt/multinode/${customer}/mariadb-${customer}/do_backup.sh":
ensure => present,
content => template('sunetdrive/mariadb_backup/do_backup.erb.sh'),
mode => '0744',
}
sunetdrive::app_type { "app_${customer}":
location => $location,
override_config => $config,
override_compose => $app_compose,
}
sunetdrive::db_type { "db_${customer}":
location => $location,
override_config => $config,
override_compose => $mariadb_compose,
}
file { $redis_conf_dir:
ensure => directory,
recurse => true,
@ -438,13 +340,6 @@ MACAddressPolicy=none'
group => 'root',
mode => '0644',
}
file { $audit_log_path:
ensure => file,
force => true,
owner => 'www-data',
group => 'root',
mode => '0644',
}
file { $rclone_conf_path:
ensure => present,
owner => 'www-data',
@ -460,18 +355,10 @@ MACAddressPolicy=none'
content => template('sunetdrive/multinode/complete_reinstall.erb.sh'),
mode => '0744',
}
if $::facts['sunet_nftables_enabled'] == 'yes' {
$name = "https_port_${customer}"
ensure_resource('sunet::nftables::ufw_allow_compat', $name, {
from => ['0.0.0.0/0', '::/0'],
port => $https_port,
})
} else {
# Open ports
sunet::misc::ufw_allow { "https_port_${customer}":
from => '0.0.0.0',
port => $https_port,
}
# Open ports
sunet::misc::ufw_allow { "https_port_${customer}":
from => '0.0.0.0',
port => $https_port,
}
}
}

View file

@ -1,80 +0,0 @@
class sunetdrive::multinode_db(){
$is_multinode = true;
$environment = sunetdrive::get_environment()
$allcustomers = hiera_hash('multinode_mapping')
$customers = $allcustomers.keys
$customers.each |$customer| {
file { "/etc/mariadb/backups/${customer}":
ensure => directory,
}
file { "/etc/mariadb/init/04-nextcloud.${customer}.sql":
ensure => present,
content => "CREATE SCHEMA nextcloud_${customer};\nCREATE USER 'nextcloud_${customer}'@'%' IDENTIFIED BY '${hiera("${customer}_mysql_user_password")}';\nGRANT ALL PRIVILEGES ON nextcloud_${customer}.* TO 'nextcloud_${customer}'@'%' IDENTIFIED BY '${hiera("${customer}_mysql_user_password")}';\n",
mode => '0744',
}
}
if $facts["networking"]["fqdn"] =~ /^multinode-db1\.drive\.(test\.){1}sunet\.se$/ {
$statistics_secret = safe_hiera('statistics_secret')
notify { 'hostmessage':
message => 'We are on multinode-db1. Set up statistics environment.',
}
$custdata=$customers.reduce({}) |$memo, $value| {
$memo + {$value => lookup($value)}
}
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
$local_path = '/tmp/rclone-current-linux-amd64.deb'
exec { 'rclone_deb':
command => "/usr/bin/wget -q ${rclone_url} -O ${local_path}",
creates => $local_path,
}
package { 'rclone':
ensure => installed,
provider => dpkg,
source => $local_path,
require => Exec['rclone_deb'],
}
file { '/root/.rclone.conf':
ensure => file,
content => template('sunetdrive/mariadb_backup/rclone.conf.erb'),
owner => 'root',
group => 'root',
mode => '0600',
}
file { '/root/tasks/':
ensure => directory,
}
-> file { '/root/tasks/listusersbydep.sh':
ensure => file,
content => template('sunetdrive/mariadb/listusersdep.sh.erb'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/genusersondepartmentlists.sh':
ensure => file,
content => template('sunetdrive/mariadb/genuserdeplists.sh.erb'),
owner => 'root',
group => 'root',
mode => '0700',
}
file {'/opt/mariadb/statistics/':
ensure => directory,
}
-> file {'/opt/mariadb/statistics/custdata.json':
ensure => file,
content => template('sunetdrive/mariadb/custconfig.json.erb'),
owner => 'root',
group => 'root',
mode => '0600',
}
sunet::scriptherder::cronjob { 'genuserdeplists':
cmd => '/root/tasks/genusersondepartmentlists.sh',
hour => '2',
minute => '5',
ok_criteria => ['exit_status=0','max_age=30h'],
warn_criteria => ['exit_status=1', 'max_age=60h'],
}
}
}

View file

@ -1,79 +0,0 @@
# NRPE class
class sunetdrive::nrpe(
$loadw = '15,10,5',
$loadc = '30,25,20',
$procsw = 150,
$procsc = 200,
) {
require apt
class { 'sunet::nagios':
command_timeout => 600,
loadw => $loadw,
loadc => $loadc,
procsw => $procsw,
procsc => $procsc,
}
package {'nagios-plugins-contrib': ensure => latest}
if ($facts['os']['name'] == 'Ubuntu' and versioncmp($facts['os']['release']['full'], '22.04') >= 0 ){
$mem_w = '90'
$mem_c = '95'
} else {
$mem_w = '10'
$mem_c = '5'
}
$checks = ['nrpe_check_memory']
$checks.each |$check| {
ensure_resource("sunet::nagios::${check}", "nagios-nrpe-${check}")
}
sunet::nagios::nrpe_command {'check_entropy':
command_line => '/usr/lib/nagios/plugins/check_entropy -w 256'
}
sunet::nagios::nrpe_command {'check_ntp_time':
command_line => '/usr/lib/nagios/plugins/check_ntp_time -H ntp.se'
}
sunet::nagios::nrpe_command {'check_scriptherder':
command_line => '/usr/local/bin/scriptherder --mode check'
}
sunet::sudoer {'nagios_run_needrestart_command':
user_name => 'nagios',
collection => 'nrpe_needrestart_check',
command_line => '/usr/sbin/needrestart -p -l'
}
sunet::sudoer {'nagios_run_galera_command':
user_name => 'nagios',
collection => 'nrpe_galera_check',
command_line => '/usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
}
sunet::nagios::nrpe_command {'check_galera_cluster':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
}
sunet::sudoer {'nagios_run_proxysql_command':
user_name => 'nagios',
collection => 'nrpe_proxysql_check',
command_line => '/usr/lib/nagios/plugins/check_proxysql_server, /usr/lib/nagios/plugins/check_mysql_server_status'
}
sunet::nagios::nrpe_command {'check_proxysql_server':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_proxysql_server'
}
sunet::sudoer {'nagios_run_replication_command':
user_name => 'nagios',
collection => 'nrpe_replication_check',
command_line => '/usr/local/bin/check_replication'
}
sunet::nagios::nrpe_command {'check_async_replication':
command_line => '/usr/bin/sudo /usr/local/bin/check_replication'
}
sunet::sudoer {'nagios_run_backup_command':
user_name => 'nagios',
collection => 'nrpe_backup_check',
command_line => '/usr/local/bin/check_backups'
}
sunet::nagios::nrpe_command {'check_backups':
command_line => '/usr/bin/sudo /usr/local/bin/check_backups'
}
sunet::nagios::nrpe_command {'check_mysql_server_status':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_mysql_server_status'
}
}

View file

@ -3,8 +3,6 @@ class sunetdrive::onlyoffice () {
$environment = sunetdrive::get_environment()
$extra_hosts = hiera_hash($environment)['extra_hosts']
$docker_tag = hiera_hash($environment)['collabora_tag']
$token_name = 'Sunet'
$access_token = safe_hiera('collabora_access_token')
$customers = hiera('fullnodes')
$multinode_customers = keys(hiera_hash('multinode_mapping'))
if $environment == 'prod' {
@ -12,18 +10,14 @@ class sunetdrive::onlyoffice () {
} else {
$domain = 'drive.test.sunet.se'
}
exec { 'collabora_docker_login':
command => "docker login registry.gitlab.collabora.com -u ${token_name} -p ${access_token}",
}
sunet::collabora::docs { 'sunet-onlyoffice':
dns => [ '89.32.32.32' ],
extra_hosts => $extra_hosts,
dns => [ '89.32.32.32' ],
extra_hosts => $extra_hosts,
extra_volumes => ['/opt/collabora/coolwsd.xml:/etc/coolwsd/coolwsd.xml'],
docker_tag => $docker_tag,
docker_image => 'registry.gitlab.collabora.com/productivity/collabora-online-nc',
docker_tag => $docker_tag,
}
file {'/opt/collabora/coolwsd.xml':
ensure => present,
ensure => present,
content => template('sunetdrive/document/coolwsd.xml.erb'),
}
}

View file

@ -1,48 +0,0 @@
#Class for SUNET-Drive-portal-Server
class sunetdrive::portal (
String $portal_version = '0.0.1-1'
) {
$environment = sunetdrive::get_environment()
if $environment == 'prod' {
$domain = 'drive.sunet.se'
} else {
$domain = 'drive.test.sunet.se'
}
sunet::docker_compose { 'portal_compose':
content => template('sunetdrive/portal/docker-compose.erb.yaml'),
service_name => 'portal',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Portal server',
}
exec { 'workaround_docker_compose_dir':
command => 'mkdir -p /opt/portal/nginx',
unless => 'test -d /opt/portal/nginx',
}
$nginx_dirs = ['acme', 'certs', 'conf', 'dhparam', 'html', 'vhost']
$nginx_dirs.each | $dir| {
file { "/opt/portal/nginx/${dir}":
ensure => directory,
owner => 'root',
group => 'root',
mode => '0751',
}
}
file { '/opt/portal/config.yaml':
ensure => present,
content => template('sunetdrive/portal/config.erb.yaml'),
mode => '0644',
}
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
}
# For acme and redirect
sunet::misc::ufw_allow { 'http':
from => '0.0.0.0/0',
port => 80,
}
}

View file

@ -3,8 +3,6 @@ class sunetdrive::proxysql (
$bootstrap = undef,
$location = undef,
$proxysql_container_name = 'proxysql_proxysql_1',
$manage_config = true,
$manage_network = true,
) {
# Config from group.yaml
@ -12,8 +10,6 @@ class sunetdrive::proxysql (
$config = hiera_hash($environment)
$db_ip = $config['db']
$nextcloud_ip = $config['app']
$nextcloud_ipv6 = $config['app_v6']
$nextcloud_ip_all = $nextcloud_ip + $nextcloud_ipv6
$proxysql_ok_num = length($nextcloud_ip)
$proxysql_warn_num = $proxysql_ok_num - 1
@ -29,9 +25,6 @@ class sunetdrive::proxysql (
$mysql_user = safe_hiera('mysql_user')
$transaction_persistent = 1
if $::facts['dockerhost2'] == 'yes' and $manage_network {
$hostnet = true
}
file { '/usr/local/bin/proxysql':
ensure => file,
@ -61,12 +54,10 @@ class sunetdrive::proxysql (
require => Package['nagios-nrpe-server'],
content => template('sunetdrive/proxysql/check_mysql_server_status.erb'),
}
if $manage_config {
file { '/opt/proxysql/proxysql.cnf':
ensure => present,
content => template('sunetdrive/proxysql/proxysql.cnf.erb'),
mode => '0644',
}
file { '/opt/proxysql/proxysql.cnf':
ensure => present,
content => template('sunetdrive/proxysql/proxysql.cnf.erb'),
mode => '0644',
}
file { '/opt/proxysql/my.cnf':
@ -74,22 +65,9 @@ class sunetdrive::proxysql (
content => template('sunetdrive/proxysql/my.cnf.erb'),
mode => '0644',
}
if $::facts['sunet_nftables_enabled'] == 'yes' {
sunet::nftables::docker_expose { 'stats_ports':
allow_clients => $tug_office,
port => 6080,
iif => 'ens3',
}
sunet::nftables::docker_expose { 'proxysql':
allow_clients => $nextcloud_ip_all,
port => 6032,
iif => 'ens3',
}
} else {
sunet::misc::ufw_allow { 'stats_ports':
from => $tug_office,
port => 6080,
}
sunet::misc::ufw_allow { 'stats_ports':
from => $tug_office,
port => 6080,
}
sunet::docker_compose { 'drive_proxysql_docker_compose':

View file

@ -17,10 +17,10 @@ class sunetdrive::redis_cluster (
content => template('sunetdrive/redis_cluster/reset_cluster.erb.sh'),
mode => '0700',
}
exec { 'set_permissions_directory':
command => 'chown -R 999:root /opt/redis/node-*'
}
exec { 'set_permissions_files':
command => 'chown -R 999:999 /opt/redis/node-*/*'
file { '/root/.bashrc':
ensure => present,
content => template('sunetdrive/redis_cluster/bashrc.erb'),
mode => '0644',
}
}

View file

@ -1,16 +1,12 @@
#Class for SUNET-Drive-Lookup-Server
class sunetdrive::reva (
String $domain = 'drive.test.sunet.se',
String $customer = 'sunet',
String $reva_domain = "${customer}-reva.${domain}",
String $reva_version = 'v1.26.0',
String $domain = '',
String $reva_domain = ''
) {
$environment = sunetdrive::get_environment()
$shared_secret = safe_hiera('shared_secret')
$statistics_secret = safe_hiera('statistics_secret')
$iopsecret = safe_hiera('iopsecret')
$smtp_credentials = safe_hiera('smtp_credentials')
# Firewall settings
#Create users
@ -23,17 +19,18 @@ class sunetdrive::reva (
content => template('sunetdrive/reva/revad.toml.erb'),
mode => '0644',
}
file { '/opt/reva/rclone.conf':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/reva/rclone.conf.erb'),
mode => '0644',
}
file { '/opt/reva/data':
ensure => directory,
owner => 'www-data',
}
file { '/opt/reva/ocm-providers.json':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/reva/ocm-providers.json.erb'),
mode => '0644',
}
sunet::docker_compose { 'drive_reva_docker_compose':
content => template('sunetdrive/reva/docker-compose.yml.erb'),
service_name => 'reva',
@ -41,12 +38,9 @@ class sunetdrive::reva (
compose_filename => 'docker-compose.yml',
description => 'Sciencemesh reva server',
}
$ports = [443,19000]
$ports.each | $port|{
sunet::misc::ufw_allow { "reva_${port}":
from => '0.0.0.0/0',
port => $port,
}
}
sunet::misc::ufw_allow { 'https_reva':
from => '0.0.0.0/0',
port => 443,
}
}

View file

@ -29,23 +29,16 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
}
}
}
$dehydrated_status = $dehydrated_name ? {
undef => 'absent',
default => 'present'
}
file {'/opt/docker_run':
ensure => 'absent',
}
sunet::docker_compose { 'satosa':
content => template('sunetdrive/satosa/docker-compose.yml.erb'),
service_name => 'satosa',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Satosa',
sunet::docker_run {'satosa':
image => $image,
imagetag => $tag,
volumes => ['/etc/satosa:/etc/satosa','/etc/dehydrated:/etc/dehydrated'],
ports => ['443:8000'],
env => ['METADATA_DIR=/etc/satosa/metadata', 'WORKER_TIMEOUT=120']
}
file {'/etc/satosa/proxy_conf.yaml':
content => inline_template("<%= @merged_conf.to_yaml %>\n"),
notify => Sunet::Docker_compose['satosa']
notify => Sunet::Docker_run['satosa']
}
$plugins = hiera('satosa_config')
sort(keys($plugins)).each |$n| {
@ -53,16 +46,26 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
$fn = $plugins[$n]
file { $fn:
content => inline_template("<%= @conf.to_yaml %>\n"),
notify => Sunet::Docker_compose['satosa']
notify => Sunet::Docker_run['satosa']
}
}
sunet::misc::ufw_allow { 'satosa-allow-https':
from => 'any',
ufw::allow { 'satosa-allow-https':
ip => 'any',
port => '443'
}
sunet::misc::ufw_allow { 'satosa-allow-http':
$dehydrated_status = $dehydrated_name ? {
undef => 'absent',
default => 'present'
}
sunet::docker_run {'alwayshttps':
ensure => $dehydrated_status,
from => 'any',
image => 'docker.sunet.se/always-https',
ports => ['80:80'],
env => ['ACME_URL=http://acme-c.sunet.se']
}
ufw::allow { 'satosa-allow-http':
ensure => $dehydrated_status,
ip => 'any',
port => '80'
}
if ($dehydrated_name) {
@ -74,6 +77,12 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
cert_file => '/etc/satosa/https.crt'
}
}
file { '/opt/satosa':
ensure => directory,
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { '/opt/satosa/restart.sh':
ensure => file,
owner => 'root',

View file

@ -3,8 +3,6 @@ class sunetdrive::script (
$bootstrap = undef,
$location = undef
) {
include sunet::packages::python3_pip
include sunet::packages::kopia
$environment = sunetdrive::get_environment()
$customer = sunetdrive::get_customer()
$apikey_test = safe_hiera('monitor_apikey_test')
@ -19,21 +17,7 @@ class sunetdrive::script (
$backup_server = $config['backup_server']
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
$local_path = '/tmp/rclone-current-linux-amd64.deb'
$singlenodes = lookup('singlenodes')
$multinodes = keys(lookup('multinode_mapping'))
$extra_backup_jobs = pick($config['extra_backup_jobs'], {})
if $customer == 'mdu' {
$eppn_suffix = 'mdh.se'
$include_userbuckets = 'true'
} elsif $customer == 'uu' {
$eppn_suffix = 'users.uu.se'
$include_userbuckets = 'false'
}
else {
$eppn_suffix = "${customer}.se"
$include_userbuckets = 'false'
}
$singlenodes = hiera('singlenodes')
$ssh_config = "Host *.sunet.se
User script
@ -57,17 +41,6 @@ class sunetdrive::script (
}
$site_name = $config['site_name']
$user_bucket_name = $config['user_bucket_name']
if $config['user_scans'] {
$config['user_scans'].each |$job| {
sunet::scriptherder::cronjob { $job['name']:
cmd => "ssh -t -l script ${job['server']} /usr/bin/sudo /usr/local/bin/occ ${job['container']} files:scan ${job['user']}",
hour => $job['hour'],
minute => $job['minute'],
ok_criteria => ['exit_status=0','max_age=1d'],
warn_criteria => ['exit_status=1','max_age=2d'],
}
}
}
# It is a start that will get us user buckets and primary buckets
$backup_projects = $location
@ -81,11 +54,11 @@ class sunetdrive::script (
source => $local_path,
require => Exec['rclone_deb'],
}
package { 'fuse3':
package { 'python3.9':
ensure => installed,
provider => apt,
}
package { 'python3':
-> package { 'python3-pip':
ensure => installed,
provider => apt,
}
@ -93,29 +66,11 @@ class sunetdrive::script (
ensure => installed,
provider => apt,
}
package { 'xmlstarlet':
ensure => installed,
provider => apt,
}
$drive_version = '0.3.1'
if $facts['os']['distro']['id'] == 'Debian' {
$pip_cmd = 'pip3 install --break-system-packages'
} else {
$pip_cmd = 'python3 -m pip install'
}
exec { 'drive-utils':
command => "${pip_cmd} https://pypi.sunet.se/packages/drive-utils-${drive_version}.tar.gz",
unless => "python3 -m pip list | grep drive-utils | grep ${drive_version}",
require => Package['python3'],
}
file { '/opt/backups':
ensure => directory,
mode => '0700'
}
file { '/opt/backups/scripts':
ensure => directory,
mode => '0700'
command => "python3.9 -m pip install https://pypi.sunet.se/packages/drive-utils-${drive_version}.tar.gz",
unless => "python3.9 -m pip list | grep drive-utils | grep ${drive_version}",
require => Package['python3.9'],
}
file { '/root/.ssh/':
ensure => directory,
@ -184,6 +139,13 @@ class sunetdrive::script (
group => 'root',
mode => '0700',
}
file { '/root/tasks/switch_redis_master_to.sh':
ensure => file,
content => template('sunetdrive/script/switch_redis_master_to.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/restart-nextcloud-farm':
ensure => file,
content => template('sunetdrive/script/restart-nextcloud-farm.erb'),
@ -254,12 +216,6 @@ class sunetdrive::script (
group => 'root',
mode => '0700',
}
file_line { 'FIXME_remove_when_s3_migration_done_in_sto3':
ensure => 'present',
line => '37.156.195.53 s3.sto3.safedc.net',
path => '/etc/hosts',
match => '^37.156.195.53',
}
if $environment == 'test' {
sunet::scriptherder::cronjob { 'reboot-customer':
cmd => '/root/tasks/reboot-customer.sh',
@ -269,13 +225,8 @@ class sunetdrive::script (
warn_criteria => ['exit_status=1','max_age=31d'],
}
}
# Opt out of userbuckets, also customers that ended the contract
if $customer in ['extern', 'gih', 'hkr', 'suni', 'common', 'su', 'lnu'] {
sunet::scriptherder::cronjob { 'makebuckets':
ensure => absent,
cmd => 'bin/true',
}
} else {
# Opt out of userbuckets
unless $customer in ['extern', 'gih', 'suni', 'common'] {
sunet::scriptherder::cronjob { 'makebuckets':
cmd => '/root/tasks/makebuckets.sh',
minute => '*/5',
@ -309,69 +260,31 @@ class sunetdrive::script (
}
# Opt in to folder structure in projectbuckets
if $customer in ['gih', 'mdu'] {
sunet::scriptherder::cronjob { 'create_folders_in_project_buckets':
ensure => absent,
cmd => 'true',
}
file { '/root/tasks/create_folders_in_project_buckets.sh':
ensure => absent,
}
file { '/root/tasks/create_folders_in_fullnode_buckets.sh':
ensure => file,
content => template('sunetdrive/script/create_folders_in_fullnode_buckets.erb.sh'),
content => template('sunetdrive/script/create_folders_in_project_buckets.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
}
if $customer in ['gih'] {
sunet::scriptherder::cronjob { 'create_folders_in_fullnode_buckets':
cmd => '/root/tasks/create_folders_in_fullnode_buckets.sh',
sunet::scriptherder::cronjob { 'create_folders_in_project_buckets':
cmd => '/root/tasks/create_folders_in_project_buckets.sh',
minute => '*/30',
ok_criteria => ['exit_status=0','max_age=1h'],
warn_criteria => ['exit_status=1','max_age=2h'],
}
}
if $customer in ['mdu'] {
sunet::scriptherder::cronjob { 'create_folders_in_fullnode_buckets':
cmd => '/root/tasks/create_folders_in_fullnode_buckets.sh "Arbetsmaterial (work material)" "Bevarande (retention)" "Gallringsbart (disposal)"',
sunet::scriptherder::cronjob { 'create_folders_in_project_buckets':
cmd => '/root/tasks/create_folders_in_project_buckets.sh "Arbetsmaterial (work material)" "Bevarande (retention)" "Gallringsbart (disposal)"',
minute => '*/30',
ok_criteria => ['exit_status=0','max_age=1h'],
warn_criteria => ['exit_status=1','max_age=2h'],
}
}
if $customer == 'common' {
$multinode_passwords = $multinodes.map | $index, $customer | {
safe_hiera("${customer}_admin_app_password")
}
file { '/root/tasks/announce.sh':
ensure => file,
content => template('sunetdrive/script/multinodeannounce.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/backupmultinodedb.sh':
ensure => file,
content => template('sunetdrive/script/backupmultinodedb.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/opt/backups/scripts/hb.sh':
ensure => file,
content => template('sunetdrive/script/backup-hb.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
sunet::scriptherder::cronjob { 'backupmultinodedb':
cmd => '/root/tasks/backupmultinodedb.sh',
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
if $environment == 'prod' {
file { '/root/tasks/aggregate.sh':
ensure => file,
@ -387,22 +300,33 @@ class sunetdrive::script (
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
}
file { '/root/tasks/backupsinglenodedb.sh':
ensure => file,
content => template('sunetdrive/script/backupsinglenodedb.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
$singlenodes.each | $singlenode| {
$multinode = hiera_hash('multinode_mapping')[$singlenode]['server']
$multinodeserver = "${multinode}.${site_name}"
$nccontainer = "nextcloud-${singlenode}-app-1"
$nccontainer = "nextcloud${singlenode}_app_1"
sunet::scriptherder::cronjob { "backup${singlenode}db":
ensure => absent,
cmd => 'true',
}
sunet::scriptherder::cronjob { "listusers_${singlenode}":
cmd => "/root/tasks/listusers.sh ${singlenode} ${multinodeserver}",
minute => '*/5',
ok_criteria => ['exit_status=0','max_age=30m'],
warn_criteria => ['exit_status=1', 'max_age=60m'],
}
sunet::scriptherder::cronjob { "backup${singlenode}db":
cmd => "/root/tasks/backupsinglenodedb.sh ${multinodeserver} ${singlenode}",
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
if $environment == 'prod' {
sunet::scriptherder::cronjob { "statistics${singlenode}":
cmd => "/root/tasks/usage.sh ${singlenode} ${multinodeserver}",
@ -412,7 +336,7 @@ class sunetdrive::script (
warn_criteria => ['exit_status=1','max_age=3d'],
}
}
unless $singlenode in ['mau', 'uu'] {
unless $singlenode in ['mau'] {
sunet::scriptherder::cronjob { "make${singlenode}buckets":
cmd => "/root/tasks/makebuckets.sh ${multinodeserver} ${nccontainer} ${singlenode}-${environment}",
minute => '*',
@ -421,45 +345,23 @@ class sunetdrive::script (
}
}
}
$gss_backup_server = $config['gss_backup_server']
$lookup_backup_server = $config['lookup_backup_server']
sunet::scriptherder::cronjob { 'backupgssdb':
ensure => 'absent',
cmd => 'true',
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
sunet::scriptherder::cronjob { 'backuplookupdb':
ensure => 'absent',
cmd => 'true',
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
} else {
$admin_app_password = safe_hiera('admin_app_password')
file { '/root/tasks/announce.sh':
ensure => file,
content => template('sunetdrive/script/announce.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/delete_announcement_with_subject.sh':
ensure => file,
content => template('sunetdrive/script/delete_announcement_with_subject.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/backupmultinodedb.sh':
ensure => absent,
}
sunet::scriptherder::cronjob { 'backupmultinodedb':
ensure => absent,
cmd => 'true',
}
sunet::scriptherder::cronjob { 'backupdb':
cmd => "/root/tasks/backupdb.sh ${backup_server}",
hour => '2',

View file

@ -1,23 +1,10 @@
#Class for SUNET-Drive-Script-receiver
class sunetdrive::scriptreceiver()
{
include sunet::packages::yq
sunet::system_user {'script': username => 'script', group => 'script', managehome => true, shell => '/bin/bash' }
# These tasks correspond to a ${task}.erb.sh template
$tasks = [
'list_users',
'list_files_for_user',
'create_bucket',
'backup_db',
'purge_backups',
'maintenancemode',
'restart_sunet_service',
'start_sentinel',
'stop_sentinel',
'removeswap',
'backup_multinode_db'
]
$tasks = ['list_users', 'list_files_for_user', 'create_bucket', 'backup_db', 'purge_backups', 'maintenancemode', 'restart_sunet_service', 'start_sentinel', 'stop_sentinel', 'makeswap']
$environment = sunetdrive::get_environment()
$config = hiera_hash($environment)
@ -35,6 +22,7 @@ class sunetdrive::scriptreceiver()
owner => 'script',
group => 'script',
}
$kano_shell = ['89.46.21.246','2001:6b0:6c::1bc']
sunet::misc::ufw_allow { 'script_port':
from => $script_ipv4 + $script_ipv6 + $kano_shell,
@ -47,9 +35,7 @@ class sunetdrive::scriptreceiver()
type => 'ssh-ed25519',
key => $script_pub_key,
}
file { '/etc/sysctl.d/gofasta.conf':
ensure => 'absent',
}
file { '/opt/rotate':
ensure => directory,
mode => '0750',
@ -62,28 +48,14 @@ class sunetdrive::scriptreceiver()
owner => 'root',
group => 'root',
}
file { '/usr/local/bin/get_drive_customers':
ensure => file,
content => template('sunetdrive/scriptreceiver/get_drive_customers.erb.sh'),
mode => '0744',
owner => 'root',
group => 'root',
}
file { '/usr/local/bin/safer_reboot':
ensure => file,
content => template('sunetdrive/scriptreceiver/safer_reboot.erb'),
mode => '0744',
mode => '0740',
owner => 'root',
group => 'root',
}
file { '/root/.bashrc':
ensure => file,
content => template('sunetdrive/scriptreceiver/bashrc.erb.sh'),
mode => '0644',
owner => 'root',
group => 'root',
}
file { '/etc/sudoers.d/99-safer_reboot':
file { "/etc/sudoers.d/99-safer_reboot":
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/safer_reboot\n",
mode => '0440',
@ -118,19 +90,6 @@ class sunetdrive::scriptreceiver()
minute => '*',
hour => '*',
}
file { '/usr/local/bin/clear_scriptherder':
ensure => file,
content => template('sunetdrive/scriptreceiver/clear_scriptherder.erb.sh'),
mode => '0740',
owner => 'root',
group => 'root',
}
file { '/home/script/bin/makeswap.sh':
ensure => absent,
}
file { '/etc/sudoers.d/99-makeswap':
ensure => absent,
}
$tasks.each |String $task| {
file { "/home/script/bin/${task}.sh":
ensure => file,

View file

@ -7,13 +7,10 @@ class sunetdrive::sitemonitornaemon() {
$tls_servers_with_port = hiera_array('tls_servers_with_port')
$nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0]
$nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0]
$monitorhost = $facts['networking']['fqdn']
$monitorhost = $::fqdn
$environment = sunetdrive::get_environment()
$influx_passwd = safe_hiera('influx_passwd')
$slack_url = safe_hiera('slack_url')
$extra_host_groups = {
node3_hosts => join($facts['configured_hosts_in_cosmos']['all'].filter |$host| { $host =~ /^node3\./ }, ',')
}
file { '/usr/local/bin/slack_nagios.sh':
ensure => present,
@ -45,20 +42,11 @@ class sunetdrive::sitemonitornaemon() {
}
file { '/etc/naemon/conf.d/sunetdrive_thruk_templates.conf':
ensure => present,
owner => 'naemon',
group => 'naemon',
content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'),
mode => '0644',
}
file { '/etc/naemon/conf.d/sunetdrive_extra_hostgroups.cfg':
ensure => present,
content => template('sunetdrive/monitor/sunetdrive_extra_hostgroups.cfg.erb'),
mode => '0644',
}
nagioscfg::service {'check_scriptherder':
hostgroup_name => ['sunetdrive::nrpe'],
check_command => 'check_nrpe_1arg_to300!check_scriptherder',
description => 'Scriptherder Status',
contact_groups => ['naemon-admins'],
}
nagioscfg::service {'check_galera_cluster':
hostgroup_name => ['galera_monitor'],
check_command => 'check_nrpe_1arg!check_galera_cluster',
@ -107,12 +95,6 @@ class sunetdrive::sitemonitornaemon() {
description => 'Status of sarimner interface',
contact_groups => ['alerts']
}
nagioscfg::service {'check_nextcloud_mounts':
hostgroup_name => ['node3_hosts','sunetdrive::multinode'],
check_command => 'check_nrpe_1arg!check_nextcloud_mounts',
description => 'S3 buckets with multiple Nextcloud mounts',
contact_groups => ['alerts']
}
}

View file

@ -1,10 +0,0 @@
# Class for Ubuntu 20.04
class sunetdrive::ubuntu_2004() {
if $facts['os']['name'] == 'Ubuntu' and $facts['os']['distro']['release']['full'] == '20.04' {
# Hide deprecation warnings for Ubuntu 2004
file_line {'env_rubyopt':
path => '/etc/environment',
line => 'RUBYOPT=\'-W0\'',
}
}
}

View file

@ -7,7 +7,7 @@ LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so
ErrorLog ${APACHE_LOG_DIR}/error.log
CustomLog ${APACHE_LOG_DIR}/access.log combined
<Directory /var/www/html/>
<Directory /var/www/nextcloud/>
Require all granted
AllowOverride All
Options FollowSymLinks MultiViews
@ -16,10 +16,6 @@ LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so
Dav off
</IfModule>
</Directory>
<Directory /var/www/html/data>
Order allow,deny
deny from all
</Directory>
</VirtualHost>
@ -51,9 +47,5 @@ LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so
Dav off
</IfModule>
</Directory>
<Directory /var/www/html/data>
Order allow,deny
deny from all
</Directory>
</VirtualHost>
# vim: syntax=apache ts=4 sw=4 sts=4 sr noet

View file

@ -1,14 +1,13 @@
Welcome to your personal space for research data storage!
Hello and welcome to your personal space for research data storage,
Researchers and PhD-students at MDU have access to a free storage space of 200 GB.
It is important that research data is managed in a secure and careful manner, so that it is protected from damage and destruction. Mälardalen University offers its researchers and doctoral students a secure and stable IT service that makes it possible to store research data. Furthermore, it enables research collaboration internally and externally since researchers can easily share their data within the project or with other individual researchers at MDU in a secure way. This technical infrastructure entails a good collaboration space for researchers, a secure handling of data, the possibility for the researcher to be able to store data in such a way that data management according to the FAIR principles can be complied with at MDU.
Your storage space is in the “Your storage space” folder that appears a few minutes after opening SUNET Drive for the first time.
To meet the general need for research data storage, MDU has purchased Sunet Drive's S3 research data storage service.  Each researcher will be offered a space of 200GB per person. To simplify administration, all spaces will be appropriated in advance in the same way, regardless of the research domain or total amount of research data.
The “Your storage space” folder has an ABG structure which means that there are folders for Arbetsmaterial (work material), Bevarande (retention) and Gallringsbart (disposable).
200 GB will not correspond to all of MDU researchers' data storage needs; therefore, the researcher will have the possibility to purchase additional data space via a digital form. The additional purchase function is done by internal billing and the purchased space is then visible at the next login in the researcher's personal space.   
It is possible to create subfolders within these main folders themselves.
If there is a need for a specific project space for research data storage and/or sharing, this is also possible. Such a space is created by an administrator after ordering project space through the same form as when purchasing additional storage space.  The project space will then be visible to project members in the personal storage space at the next login.
All research data, except for military material and security-classified information, can be stored on SUNET Drive.
If your data needs to undergo an export control, please refer to the check list on the internal portal.
Can a MDU student be included in the research project? It is possible to assign a student a time-limited access/sharing opportunity to specific folders in either the project space or the researcher's personal space. Contact the Data Access Unit (DAU) to enable such a feature.
A user guide and a FAQ regarding SUNET Drive is available on the internal portal. If you have any questions, please contact dau@mdu.se
If you have any questions about the storage space, please contact dau@mdu.se.

View file

@ -25,7 +25,7 @@ log_errors_max_len = 1024
max_execution_time = 86400
max_file_uploads = 20
max_input_time = 86400
memory_limit = <%= @php_memory_limit_mb %>M
memory_limit = 512M
output_buffering = Off
post_max_size = 30G
precision = 14
@ -151,7 +151,7 @@ ldap.max_links = -1
[dba]
; Nothing here
[opcache]
opcache.interned_strings_buffer=32
; Nothing here
[curl]
; Nothing here
[openssl]

View file

@ -1,42 +0,0 @@
#!/usr/bin/env python3
from collections import Counter
import json
import shlex
import subprocess
import sys
exit = 0
base_message = "OK: no duplicate mounts"
long_message = ""
get_containers = subprocess.Popen('/usr/local/bin/get_containers', stdout=subprocess.PIPE).stdout.read()
containers = get_containers.decode().splitlines()
for i, container in enumerate(containers, start=1):
buckets = []
list_command = f"/usr/local/bin/nocc {container} files_external:list --all --show-password --output json"
command = shlex.split(list_command)
mount_data_byte = subprocess.Popen(command, stdout=subprocess.PIPE).stdout.read()
try:
mount_data = json.loads(mount_data_byte.decode())
except json.decoder.JSONDecodeError as err:
if i == 1 or i != len(containers):
base_message = "WARNING: invalid json"
long_message += f"\ncontainer: {container} - json decode error: {err}"
# lets do exit 0 for now
# exit = 1
continue
for items in mount_data:
buckets.append(items["configuration"]["bucket"])
bucket_count = dict(Counter(buckets))
for k, v in bucket_count.items():
if v > 1:
base_message = "WARNING: buckets with multiple mounts"
long_message += f"\ncontainer: {container} - bucket: {k} - {v}"
# lets do exit 0 for now
# exit = 1
print(base_message)
if long_message != "":
print(long_message.lstrip())
sys.exit(exit)

View file

@ -147,9 +147,7 @@ ldap.max_links = -1
[dba]
; Nothing here
[opcache]
opcache.interned_strings_buffer=16
opcache.validate_timestamps=0
opcache.memory_consumption=128
; Nothing here
[curl]
; Nothing here
[openssl]

View file

@ -3,6 +3,7 @@
config_php='/var/www/html/config/config.php'
dbhost="<%= @dbhost %>"
mysql_user_password="<%= @mysql_user_password %>"
admin_password="<%= @admin_password %>"
location="<%= @location %>"
bucket="<%= @s3_bucket %>"
@ -13,9 +14,6 @@ if [[ "${user_input}" == "IKnowWhatIAmDoing" ]]; then
echo "WARNING: This will delete everything in the database and reinstall Nextcloud."
echo "You have 10 seconds to abort by hitting CTRL/C"
sleep 10s
echo "Setting temp admin password"
apt update && apt install -y apg
admin_password="$(apg -m 40 | head -1)"
echo "Ok, proceeding."
echo "Dropping database in 3 seconds"
sleep 3s
@ -50,11 +48,6 @@ EOF
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
echo "Now delete the admin user:"
echo " occ user:delete admin"
echo "and then create a new admin user:"
echo " /usr/local/bin/add_admin_user <username> <email address>"
echo ""
echo "Please use edit-secrets to add these variables to all Nextcloud servers:"
echo "instanceid: DEC::PKCS7[${instanceid}]!"
echo "secret: DEC::PKCS7[${secret}]!"

View file

@ -1,5 +1,7 @@
<?php
$CONFIG = array (
'memcache.local' => '\\OC\\Memcache\\APCu',
'appstoreenabled' => false,
'apps_paths' =>
array (
0 =>
@ -15,41 +17,145 @@ $CONFIG = array (
'writable' => true,
),
),
'appstoreenabled' => false,
'auth.bruteforce.protection.enabled' => false,
'config_is_read_only' => true,
'memcache.distributed' => '\\OC\\Memcache\\Redis',
'memcache.locking' => '\\OC\\Memcache\\Redis',
<% if @location == 'gss-test' -%>
'redis.cluster' => [
'seeds' => [
'redis1.drive.test.sunet.se:6379',
'redis2.drive.test.sunet.se:6379',
'redis3.drive.test.sunet.se:6379',
'redis1.drive.test.sunet.se:6380',
'redis2.drive.test.sunet.se:6380',
'redis3.drive.test.sunet.se:6380',
'redis1.drive.test.sunet.se:6381',
'redis2.drive.test.sunet.se:6381',
'redis3.drive.test.sunet.se:6381'
],
'timeout' => 1.1,
'read_timeout' => 0.0,
'password' => '<%= @redis_cluster_password %>',
'failover_mode' => \RedisCluster::FAILOVER_ERROR
],
<% elsif @environment == 'test' && ! @is_multinode -%>
'redis.cluster' => [
'seeds' => [
'redis1.<%= @customer %>.drive.test.sunet.se:6379',
'redis2.<%= @customer %>.drive.test.sunet.se:6379',
'redis3.<%= @customer %>.drive.test.sunet.se:6379',
'redis1.<%= @customer %>.drive.test.sunet.se:6380',
'redis2.<%= @customer %>.drive.test.sunet.se:6380',
'redis3.<%= @customer %>.drive.test.sunet.se:6380',
'redis1.<%= @customer %>.drive.test.sunet.se:6381',
'redis2.<%= @customer %>.drive.test.sunet.se:6381',
'redis3.<%= @customer %>.drive.test.sunet.se:6381'
],
'timeout' => 1.1,
'read_timeout' => 0.0,
'password' => '<%= @redis_cluster_password %>',
'failover_mode' => \RedisCluster::FAILOVER_ERROR
],
<% elsif @environment == 'prod' && ! @is_multinode && @location != 'gss-prod' -%>
'redis.cluster' => [
'seeds' => [
'redis1.<%= @customer %>.drive.sunet.se:6379',
'redis2.<%= @customer %>.drive.sunet.se:6379',
'redis3.<%= @customer %>.drive.sunet.se:6379',
'redis1.<%= @customer %>.drive.sunet.se:6380',
'redis2.<%= @customer %>.drive.sunet.se:6380',
'redis3.<%= @customer %>.drive.sunet.se:6380',
'redis1.<%= @customer %>.drive.sunet.se:6381',
'redis2.<%= @customer %>.drive.sunet.se:6381',
'redis3.<%= @customer %>.drive.sunet.se:6381'
],
'timeout' => 1.1,
'read_timeout' => 0.0,
'password' => '<%= @redis_cluster_password %>',
'failover_mode' => \RedisCluster::FAILOVER_ERROR
],
<% else -%>
'redis' =>
array (
'host' => '<%= @redis_host %>',
'password' => '<%= @redis_host_password %>',
'port' => 6379,
),
<% end -%>
'forcessl' => true,
'overwriteprotocol' => 'https',
'objectstore' =>
array (
'class' => '\\OC\\Files\\ObjectStore\\S3',
'arguments' =>
array (
'bucket' => '<%= @s3_bucket %>',
'key' => '<%= @s3_key %>',
'secret' => '<%= @s3_secret %>',
'region' => 'us-east-1',
'hostname' => '<%= @s3_host %>',
'port' => '',
'objectPrefix' => 'urn:oid:',
'autocreate' => false,
'use_ssl' => true,
'use_path_style' => true,
'legacy_auth' => false,
),
),
'csrf.disabled' => true,
'passwordsalt' => '<%= @passwordsalt %>',
'secret' => '<%= @secret %>',
'trusted_domains' =>
array (
<%- index = 0 -%>
<%- @trusted_domains.each do |item| -%>
<%= index %> => '<%= item %>',<% index += 1 %>
<%- end -%>
),
<%- unless @trusted_proxies.empty? -%>
'trusted_proxies' =>
array (
<%- index = 0 -%>
<%- @trusted_proxies.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
),
<%- end -%>
'config_is_read_only' => true,
'datadirectory' => '/var/www/html/data',
'davstorage.request_timeout' => 86401,
<%- if @hostnet -%>
'dbhost' => '127.0.0.1',
<%- else -%>
'dbhost' => '<%= @dbhost %>',
<%- end -%>
'dbname' => '<%= @dbname %>',
'dbname' => 'nextcloud',
'dbpassword' => '<%= @mysql_user_password %>',
'dbport' => '3306',
'dbtableprefix' => 'oc_',
'dbtype' => 'mysql',
'dbuser' => '<%= @dbuser %>',
'default_phone_region' => 'SE',
'dbuser' => 'nextcloud',
'drive_email_template_text_left' => '<%= @drive_email_template_text_left %>',
'drive_email_template_plain_text_left' => '<%= @drive_email_template_plain_text_left %>',
'drive_email_template_url_left' => '<%= @drive_email_template_url_left %>',
<% if @location == 'sunet-test' -%>
'filelocking.debug' => true,
<% end -%>
'files_external_allow_create_new_local' => false,
'forcessl' => true,
'gs.enabled' => '<%= @gs_enabled %>',
'gs.federation' => '<%= @gs_federation %>',
'gs.trustedHosts' => ['*.sunet.se'],
'htaccess.RewriteBase' => '/',
'gss.discovery.manual.mapping.file' => '/var/www/html/mappingfile.json',
'gss.discovery.manual.mapping.parameter' => 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6',
'gss.discovery.manual.mapping.regex' => true,
'gss.jwt.key' => '<%= @gss_jwt_key %>',
'gss.master.admin' =>
array (
<%- index = 0 -%>
<%- @gss_master_admin.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
),
'gss.master.url' => '<%= @gss_master_url %>',
'gss.mode' => '<%= @gss_mode %>',
'gss.user.discovery.module' => '\\OCA\\GlobalSiteSelector\\UserDiscoveryModules\\ManualUserMapping',
'installed' => true,
'instanceid' => '<%= @instanceid %>',
'integrity.check.disabled' => true,
'log_type' => 'file',
'loglevel' => 1,
'loglevel' => 0,
'lookup_server' => '<%= @lookup_server %>',
'mail_domain' => '<%= @mail_domain %>',
'mail_from_address' => '<%= @mail_from_address %>',
'mail_sendmailmode' => 'smtp',
@ -62,126 +168,33 @@ $CONFIG = array (
'mail_smtpport' => '587',
'mail_smtpsecure' => 'tls',
'mail_template_class' => 'OCA\DriveEmailTemplate\EMailTemplate',
'memcache.distributed' => '\\OC\\Memcache\\Redis',
'memcache.local' => '\\OC\\Memcache\\APCu',
'memcache.locking' => '\\OC\\Memcache\\Redis',
'mysql.utf8mb4' => true,
'objectstore' =>
array (
'class' => '\\OC\\Files\\ObjectStore\\S3',
'arguments' =>
array (
'bucket' => '<%= @s3_bucket %>',
'key' => '<%= @s3_key %>',
'secret' => '<%= @s3_secret %>',
'region' => 'us-east-1',
'hostname' => '<%= @s3_host %>',
'port' => '',
'useMultipartCopy' => true,
'objectPrefix' => 'urn:oid:',
'autocreate' => false,
'use_ssl' => true,
'use_path_style' => true,
'legacy_auth' => false,
),
),
'overwrite.cli.url' => 'https://<%= @site_name %>/',
'overwritehost' => '<%= @site_name %>',
'overwriteprotocol' => 'https',
'passwordsalt' => '<%= @passwordsalt %>',
<% if @environment == 'test' && ! @is_multinode -%>
'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>',
'read_timeout' => 0.0,
'seeds' => [
'redis1.<%= @customer %>.drive.test.sunet.se:6379',
'redis2.<%= @customer %>.drive.test.sunet.se:6379',
'redis3.<%= @customer %>.drive.test.sunet.se:6379',
'redis1.<%= @customer %>.drive.test.sunet.se:6380',
'redis2.<%= @customer %>.drive.test.sunet.se:6380',
'redis3.<%= @customer %>.drive.test.sunet.se:6380',
'redis1.<%= @customer %>.drive.test.sunet.se:6381',
'redis2.<%= @customer %>.drive.test.sunet.se:6381',
'redis3.<%= @customer %>.drive.test.sunet.se:6381'
],
'timeout' => 1.1
],
<% elsif @environment == 'prod' && ! @is_multinode -%>
'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>',
'read_timeout' => 0.0,
'seeds' => [
'redis1.<%= @customer %>.drive.sunet.se:6379',
'redis2.<%= @customer %>.drive.sunet.se:6379',
'redis3.<%= @customer %>.drive.sunet.se:6379',
'redis1.<%= @customer %>.drive.sunet.se:6380',
'redis2.<%= @customer %>.drive.sunet.se:6380',
'redis3.<%= @customer %>.drive.sunet.se:6380',
'redis1.<%= @customer %>.drive.sunet.se:6381',
'redis2.<%= @customer %>.drive.sunet.se:6381',
'redis3.<%= @customer %>.drive.sunet.se:6381'
],
'timeout' => 1.1
],
<% else -%>
'redis' =>
array (
'host' => '<%= @redis_host %>',
'password' => '<%= @redis_host_password %>',
'port' => 6379,
),
<% end -%>
'secret' => '<%= @secret %>',
'skeletondirectory' => '<%= @skeletondirectory %>',
'overwrite.cli.url' => 'https://<%= @site_name %>',
'templatedirectory' => '',
<% if @environment == 'test' -%>
'trashbin_retention_obligation' => 'auto, 30',
<% end -%>
'trusted_domains' =>
array (
<%- index = 0 -%>
<%- @trusted_domains.each do |item| -%>
<%= index %> => '<%= item %>',<% index += 1 %>
<%- end -%>
),
<%- unless @trusted_proxies.empty? -%>
'trusted_proxies' =>
array (
<%- index = 0 -%>
<%- @trusted_proxies.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
),
<%- end -%>
'skeletondirectory' => '<%= @skeletondirectory %>',
'twofactor_enforced' => 'true',
'twofactor_enforced_groups' =>
array (
0 => 'admin',
1 => 'forcemfa',
<%- if @twofactor_enforced_groups -%>
<%- index = 2 -%>
<%- @twofactor_enforced_groups.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
<%- end -%>
),
array (
0 => 'admin',
<%- index = 1 -%>
<%- @twofactor_enforced_groups.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
),
'twofactor_enforced_excluded_groups' =>
array (
<%- if @twofactor_enforced_excluded_groups -%>
<%- index = 0 -%>
<%- @twofactor_enforced_excluded_groups.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
<%- end -%>
),
'updatechecker' => false,
array (
<%- index = 0 -%>
<%- @twofactor_enforced_excluded_groups.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
),
'version' => '<%= @nextcloud_version_string %>',
<% if @environment == 'test' -%>
'versions_retention_obligation' => '<%= @expiration_days_min %>, <%= @expiration_days_max %>',
<% end -%>
'app_install_overwrite' =>
array (
0 => 'globalsiteselector',
),
);

View file

@ -5,10 +5,6 @@ services:
app:
image: docker.sunet.se/drive/nextcloud-custom:<%= @nextcloud_version %>
restart: always
container_name: nextcloud_app_1
<%- if @hostnet -%>
network_mode: host
<%- end -%>
volumes:
- /opt/nextcloud/000-default.conf:/etc/apache2/sites-enabled/000-default.conf
- /opt/nextcloud/mpm_prefork.conf:/etc/apache2/mods-available/mpm_prefork.conf
@ -19,30 +15,28 @@ services:
- /opt/nextcloud/complete_reinstall.sh:/complete_reinstall.sh
- /opt/nextcloud/config.php:/var/www/html/config/config.php
- /opt/nextcloud/nextcloud.log:/var/www/html/data/nextcloud.log
- /opt/nextcloud/audit.log:/var/www/html/data/audit.log
- /opt/nextcloud/rclone.conf:/rclone.conf
<%- if @skeletondirectory -%>
- /opt/nextcloud/skeleton:<%= @skeletondirectory %>
<%- end -%>
<%- if @location =~ /^gss/ -%>
- /opt/nextcloud/mappingfile.json:/var/www/html/mappingfile.json
<%- end -%>
<% if @location =~ /^kau/ -%>
- /mnt:/opt/tmp/
<%- end -%>
<%- if ! @hostnet -%>
networks:
- default
- proxysql_proxysql
<%- end -%>
dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32
<%- if !@hostnet -%>
ports:
- 443:443
<%- end -%>
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
command: apachectl -D FOREGROUND
tty: true
<%- if !@hostnet -%>
networks:
proxysql_proxysql:
external: true
<%- end -%>

View file

@ -1,58 +1,57 @@
{
"/(\\w+\\.)*antagning.se$/": "antagning.drive.sunet.se",
"/(\\w+\\.)*bth.se$/": "bth.drive.sunet.se",
"/(\\w+\\.)*chalmers.se$/": "chalmers.drive.sunet.se",
"/(\\w+\\.)*du.se$/": "du.drive.sunet.se",
"/(\\w+\\.)*eduid.se$/": "extern.drive.sunet.se",
"/(\\w+\\.)*esh.se$/": "esh.drive.sunet.se",
"/(\\w+\\.)*fhs.se$/": "fhs.drive.sunet.se",
"/(\\w+\\.)*gih.se$/": "gih.drive.sunet.se",
"/(\\w+\\.)*gu.se$/": "gu.drive.sunet.se",
"/(\\w+\\.)*hb.se$/": "hb.drive.sunet.se",
"/(\\w+\\.)*shh.se$/": "shh.drive.sunet.se",
"/(\\w+\\.)*hh.se$/": "hh.drive.sunet.se",
"/(\\w+\\.)*hhs.se$/": "hhs.drive.sunet.se",
"/(\\w+\\.)*hig.se$/": "hig.drive.sunet.se",
"/(\\w+\\.)*his.se$/": "his.drive.sunet.se",
"/(\\w+\\.)*hj.se$/": "hj.drive.sunet.se",
"/(\\w+\\.)*hkr.se$/": "hkr.drive.sunet.se",
"/(\\w+\\.)*hv.se$/": "hv.drive.sunet.se",
"/(\\w+\\.)*irf.se$/": "irf.drive.sunet.se",
"/(\\w+\\.)*kb.se$/": "kb.drive.sunet.se",
"/(\\w+\\.)*ki.se$/": "ki.drive.sunet.se",
"/(\\w+\\.)*kkh.se$/": "kkh.drive.sunet.se",
"/(\\w+\\.)*kmh.se$/": "kmh.drive.sunet.se",
"/(\\w+\\.)*konstfack.se$/": "konstfack.drive.sunet.se",
"/(\\w+\\.)*kth.se$/": "kth.drive.sunet.se",
"/(\\w+\\.)*kva.se$/": "kva.drive.sunet.se",
"/(\\w+\\.)*liu.se$/": "liu.drive.sunet.se",
"/(\\w+\\.)*lnu.se$/": "lnu.drive.sunet.se",
"/(\\w+\\.)*ltu.se$/": "ltu.drive.sunet.se",
"/(\\w+\\.)*lu.se$/": "lu.drive.sunet.se",
"/(\\w+\\.)*mah.se$/": "mau.drive.sunet.se",
"/(\\w+\\.)*mau.se$/": "mau.drive.sunet.se",
"/(\\w+\\.)*mdh.se$/": "mdu.drive.sunet.se",
"/(\\w+\\.)*mdu.se$/": "mdu.drive.sunet.se",
"/(\\w+\\.)*miun.se$/": "miun.drive.sunet.se",
"/(\\w+\\.)*nordunet.se$/": "nordunet.drive.sunet.se",
"/(\\w+\\.)*nordu.net$/": "nordunet.drive.sunet.se",
"/(\\w+\\.)*nrm.se$/": "nrm.drive.sunet.se",
"/(\\w+\\.)*oru.se$/": "oru.drive.sunet.se",
"/(\\w+\\.)*rkh.se$/": "rkh.drive.sunet.se",
"/(\\w+\\.)*sics.se$/": "sics.drive.sunet.se",
"/(\\w+\\.)*slu.se$/": "slu.drive.sunet.se",
"/(\\w+\\.)*smhi.se$/": "smhi.drive.sunet.se",
"/(\\w+\\.)*sp.se$/": "sp.drive.sunet.se",
"/(\\w+\\.)*su.se$/": "su.drive.sunet.se",
"/(\\w+\\.)*sunet.se$/": "sunet.drive.sunet.se",
"/(\\w+\\.)*suni.se$/": "suni.drive.sunet.se",
"/(\\w+\\.)*swamid.se$/": "swamid.drive.sunet.se",
"/(\\w+\\.)*ths.se$/": "ths.drive.sunet.se",
"/(\\w+\\.)*uhr.se$/": "uhr.drive.sunet.se",
"/(\\w+\\.)*umu.se$/": "umu.drive.sunet.se",
"/(\\w+\\.)*uniarts.se$/": "uniarts.drive.sunet.se",
"/(\\w+\\.)*uu.se$/": "uu.drive.sunet.se",
"/(\\w+\\.)*vinnova.se$/": "vinnova.drive.sunet.se",
"/(\\w+\\.)*vr.se$/": "vr.drive.sunet.se",
"/antagning.se$/": "antagning.drive.sunet.se",
"/bth.se$/": "bth.drive.sunet.se",
"/chalmers.se$/": "chalmers.drive.sunet.se",
"/du.se$/": "du.drive.sunet.se",
"/eduid.se$/": "extern.drive.sunet.se",
"/esh.se$/": "esh.drive.sunet.se",
"/fhs.se$/": "fhs.drive.sunet.se",
"/gih.se$/": "gih.drive.sunet.se",
"/gu.se$/": "gu.drive.sunet.se",
"/hb.se$/": "hb.drive.sunet.se",
"/shh.se$/": "shh.drive.sunet.se",
"/hh.se$/": "hh.drive.sunet.se",
"/hhs.se$/": "hhs.drive.sunet.se",
"/hig.se$/": "hig.drive.sunet.se",
"/his.se$/": "his.drive.sunet.se",
"/hj.se$/": "hj.drive.sunet.se",
"/hkr.se$/": "hkr.drive.sunet.se",
"/hv.se$/": "hv.drive.sunet.se",
"/irf.se$/": "irf.drive.sunet.se",
"/kb.se$/": "kb.drive.sunet.se",
"/ki.se$/": "ki.drive.sunet.se",
"/kkh.se$/": "kkh.drive.sunet.se",
"/kmh.se$/": "kmh.drive.sunet.se",
"/konstfack.se$/": "konstfack.drive.sunet.se",
"/kth.se$/": "kth.drive.sunet.se",
"/kva.se$/": "kva.drive.sunet.se",
"/liu.se$/": "liu.drive.sunet.se",
"/lnu.se$/": "lnu.drive.sunet.se",
"/ltu.se$/": "ltu.drive.sunet.se",
"/lu.se$/": "lu.drive.sunet.se",
"/mah.se$/": "mau.drive.sunet.se",
"/mau.se$/": "mau.drive.sunet.se",
"/mdh.se$/": "mdu.drive.sunet.se",
"/mdu.se$/": "mdu.drive.sunet.se",
"/miun.se$/": "miun.drive.sunet.se",
"/nordunet.se$/": "nordunet.drive.sunet.se",
"/nrm.se$/": "nrm.drive.sunet.se",
"/oru.se$/": "oru.drive.sunet.se",
"/rkh.se$/": "rkh.drive.sunet.se",
"/sics.se$/": "sics.drive.sunet.se",
"/slu.se$/": "slu.drive.sunet.se",
"/smhi.se$/": "smhi.drive.sunet.se",
"/sp.se$/": "sp.drive.sunet.se",
"/su.se$/": "su.drive.sunet.se",
"/sunet.se$/": "sunet.drive.sunet.se",
"/suni.se$/": "suni.drive.sunet.se",
"/swamid.se$/": "swamid.drive.sunet.se",
"/ths.se$/": "ths.drive.sunet.se",
"/uhr.se$/": "uhr.drive.sunet.se",
"/umu.se$/": "umu.drive.sunet.se",
"/uniarts.se$/": "uniarts.drive.sunet.se",
"/uu.se$/": "uu.drive.sunet.se",
"/vinnova.se$/": "vinnova.drive.sunet.se",
"/vr.se$/": "vr.drive.sunet.se",
"/$/": "extern.drive.sunet.se"
}

View file

@ -1,58 +1,57 @@
{
"/(\\w+\\.)*antagning.se$/": "antagning.drive.test.sunet.se",
"/(\\w+\\.)*bth.se$/": "bth.drive.test.sunet.se",
"/(\\w+\\.)*chalmers.se$/": "chalmers.drive.test.sunet.se",
"/(\\w+\\.)*du.se$/": "du.drive.test.sunet.se",
"/(\\w+\\.)*eduid.se$/": "extern.drive.test.sunet.se",
"/(\\w+\\.)*esh.se$/": "esh.drive.test.sunet.se",
"/(\\w+\\.)*fhs.se$/": "fhs.drive.test.sunet.se",
"/(\\w+\\.)*gih.se$/": "gih.drive.test.sunet.se",
"/(\\w+\\.)*gu.se$/": "gu.drive.test.sunet.se",
"/(\\w+\\.)*hb.se$/": "hb.drive.test.sunet.se",
"/(\\w+\\.)*shh.se$/": "shh.drive.test.sunet.se",
"/(\\w+\\.)*hh.se$/": "hh.drive.test.sunet.se",
"/(\\w+\\.)*hhs.se$/": "hhs.drive.test.sunet.se",
"/(\\w+\\.)*hig.se$/": "hig.drive.test.sunet.se",
"/(\\w+\\.)*his.se$/": "his.drive.test.sunet.se",
"/(\\w+\\.)*hj.se$/": "hj.drive.test.sunet.se",
"/(\\w+\\.)*hkr.se$/": "hkr.drive.test.sunet.se",
"/(\\w+\\.)*hv.se$/": "hv.drive.test.sunet.se",
"/(\\w+\\.)*irf.se$/": "irf.drive.test.sunet.se",
"/(\\w+\\.)*kb.se$/": "kb.drive.test.sunet.se",
"/(\\w+\\.)*ki.se$/": "ki.drive.test.sunet.se",
"/(\\w+\\.)*kkh.se$/": "kkh.drive.test.sunet.se",
"/(\\w+\\.)*kmh.se$/": "kmh.drive.test.sunet.se",
"/(\\w+\\.)*konstfack.se$/": "konstfack.drive.test.sunet.se",
"/(\\w+\\.)*kth.se$/": "kth.drive.test.sunet.se",
"/(\\w+\\.)*kva.se$/": "kva.drive.test.sunet.se",
"/(\\w+\\.)*liu.se$/": "liu.drive.test.sunet.se",
"/(\\w+\\.)*lnu.se$/": "lnu.drive.test.sunet.se",
"/(\\w+\\.)*ltu.se$/": "ltu.drive.test.sunet.se",
"/(\\w+\\.)*lu.se$/": "lu.drive.test.sunet.se",
"/(\\w+\\.)*mah.se$/": "mau.drive.test.sunet.se",
"/(\\w+\\.)*mau.se$/": "mau.drive.test.sunet.se",
"/(\\w+\\.)*mdh.se$/": "mdu.drive.test.sunet.se",
"/(\\w+\\.)*mdu.se$/": "mdu.drive.test.sunet.se",
"/(\\w+\\.)*miun.se$/": "miun.drive.test.sunet.se",
"/(\\w+\\.)*nordunet.se$/": "nordunet.drive.test.sunet.se",
"/(\\w+\\.)*nordu.net$/": "nordunet.drive.test.sunet.se",
"/(\\w+\\.)*nrm.se$/": "nrm.drive.test.sunet.se",
"/(\\w+\\.)*oru.se$/": "oru.drive.test.sunet.se",
"/(\\w+\\.)*rkh.se$/": "rkh.drive.test.sunet.se",
"/(\\w+\\.)*sics.se$/": "sics.drive.test.sunet.se",
"/(\\w+\\.)*slu.se$/": "slu.drive.test.sunet.se",
"/(\\w+\\.)*smhi.se$/": "smhi.drive.test.sunet.se",
"/(\\w+\\.)*sp.se$/": "sp.drive.test.sunet.se",
"/(\\w+\\.)*su.se$/": "su.drive.test.sunet.se",
"/(\\w+\\.)*sunet.se$/": "sunet.drive.test.sunet.se",
"/(\\w+\\.)*suni.se$/": "suni.drive.test.sunet.se",
"/(\\w+\\.)*swamid.se$/": "swamid.drive.test.sunet.se",
"/(\\w+\\.)*ths.se$/": "ths.drive.test.sunet.se",
"/(\\w+\\.)*uhr.se$/": "uhr.drive.test.sunet.se",
"/(\\w+\\.)*umu.se$/": "umu.drive.test.sunet.se",
"/(\\w+\\.)*uniarts.se$/": "uniarts.drive.test.sunet.se",
"/(\\w+\\.)*uu.se$/": "uu.drive.test.sunet.se",
"/(\\w+\\.)*vinnova.se$/": "vinnova.drive.test.sunet.se",
"/(\\w+\\.)*vr.se$/": "vr.drive.test.sunet.se",
"/antagning.se$/": "antagning.drive.test.sunet.se",
"/bth.se$/": "bth.drive.test.sunet.se",
"/chalmers.se$/": "chalmers.drive.test.sunet.se",
"/du.se$/": "du.drive.test.sunet.se",
"/eduid.se$/": "extern.drive.test.sunet.se",
"/esh.se$/": "esh.drive.test.sunet.se",
"/fhs.se$/": "fhs.drive.test.sunet.se",
"/gih.se$/": "gih.drive.test.sunet.se",
"/gu.se$/": "gu.drive.test.sunet.se",
"/hb.se$/": "hb.drive.test.sunet.se",
"/shh.se$/": "shh.drive.test.sunet.se",
"/hh.se$/": "hh.drive.test.sunet.se",
"/hhs.se$/": "hhs.drive.test.sunet.se",
"/hig.se$/": "hig.drive.test.sunet.se",
"/his.se$/": "his.drive.test.sunet.se",
"/hj.se$/": "hj.drive.test.sunet.se",
"/hkr.se$/": "hkr.drive.test.sunet.se",
"/hv.se$/": "hv.drive.test.sunet.se",
"/irf.se$/": "irf.drive.test.sunet.se",
"/kb.se$/": "kb.drive.test.sunet.se",
"/ki.se$/": "ki.drive.test.sunet.se",
"/kkh.se$/": "kkh.drive.test.sunet.se",
"/kmh.se$/": "kmh.drive.test.sunet.se",
"/konstfack.se$/": "konstfack.drive.test.sunet.se",
"/kth.se$/": "kth.drive.test.sunet.se",
"/kva.se$/": "kva.drive.test.sunet.se",
"/liu.se$/": "liu.drive.test.sunet.se",
"/lnu.se$/": "lnu.drive.test.sunet.se",
"/ltu.se$/": "ltu.drive.test.sunet.se",
"/lu.se$/": "lu.drive.test.sunet.se",
"/mah.se$/": "mau.drive.test.sunet.se",
"/mau.se$/": "mau.drive.test.sunet.se",
"/mdh.se$/": "mdu.drive.test.sunet.se",
"/mdu.se$/": "mdu.drive.test.sunet.se",
"/miun.se$/": "miun.drive.test.sunet.se",
"/nordunet.se$/": "nordunet.drive.test.sunet.se",
"/nrm.se$/": "nrm.drive.test.sunet.se",
"/oru.se$/": "oru.drive.test.sunet.se",
"/rkh.se$/": "rkh.drive.test.sunet.se",
"/sics.se$/": "sics.drive.test.sunet.se",
"/slu.se$/": "slu.drive.test.sunet.se",
"/smhi.se$/": "smhi.drive.test.sunet.se",
"/sp.se$/": "sp.drive.test.sunet.se",
"/su.se$/": "su.drive.test.sunet.se",
"/sunet.se$/": "sunet.drive.test.sunet.se",
"/suni.se$/": "suni.drive.test.sunet.se",
"/swamid.se$/": "swamid.drive.test.sunet.se",
"/ths.se$/": "ths.drive.test.sunet.se",
"/uhr.se$/": "uhr.drive.test.sunet.se",
"/umu.se$/": "umu.drive.test.sunet.se",
"/uniarts.se$/": "uniarts.drive.test.sunet.se",
"/uu.se$/": "uu.drive.test.sunet.se",
"/vinnova.se$/": "vinnova.drive.test.sunet.se",
"/vr.se$/": "vr.drive.test.sunet.se",
"/$/": "extern.drive.test.sunet.se"
}

View file

@ -1,21 +0,0 @@
#!/bin/bash
if [[ "${1}" =~ ^nextcloud ]]; then
container=${1}
shift
else
container="nextcloud_app_1"
fi
oc_list=$(env| grep 'OC_')
if [[ "x${oc_list}" != "x" ]]; then
for row in $(echo "${oc_list}"); do
MY_VARS="${MY_VARS} -e ${row}"
done
fi
docker exec -i ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
exit 0

View file

@ -7,7 +7,6 @@ else
container="nextcloud_app_1"
fi
oc_list=$(env| grep 'OC_')
if [[ "x${oc_list}" != "x" ]]; then
for row in $(echo "${oc_list}"); do
@ -15,19 +14,4 @@ if [[ "x${oc_list}" != "x" ]]; then
done
fi
if [[ ${1} == 'config:editable' ]]; then
echo "config:editable is deprecated"
exit 0
fi
docker exec ${container} chmod u+w /var/www/html/config/
docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => true,/config_is_read_only\1 => false,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp'
docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php
docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp
docker exec -ti ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => false,/config_is_read_only\1 => true,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp'
docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php
docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp
docker exec ${container} chmod u-w /var/www/html/config/
exit 0

View file

@ -1,79 +0,0 @@
#!/bin/bash
mountid="${1}"
user="${2}"
container="${3}"
if [[ -z ${mountid} ]] || [[ -z ${user} ]]; then
echo "We need a valid mount id and user to proceed"
echo "Usage: ${0} <mountid> <user> [<container>]"
exit
fi
if [[ -z ${container} ]]; then
container="nextcloud_app_1"
fi
occ="/usr/local/bin/occ ${container}"
function get_config {
${occ} files_external:config ${mountid} ${1} | tr -d '\n\t\r'
}
echo "Gathering information, hang tight."
echo -n "."
bucket="$(get_config bucket)"
echo -n "."
hostname="$(get_config hostname)"
echo -n "."
key="$(get_config key)"
echo -n "."
region="$(get_config region)"
echo -n "."
secret="$(get_config secret)"
jsonfile="/tmp/${user}-user-bucket.json"
mount_point="${user/@/-}"
mount_point="${mount_point/./-}-user-bucket"
echo "This will remount the user bucket with mountid ${mountid} for ${user} as project bucket with mountpoint ${mount_point}."
read -r -p "Press enter to continue"
echo '
[
{
"mount_point": "\/'${mount_point}'",
"storage": "\\OCA\\Files_External\\Lib\\Storage\\AmazonS3",
"authentication_type": "amazons3::accesskey",
"configuration": {
"bucket": "'${bucket}'",
"hostname": "'${hostname}'",
"key": "'${key}'",
"legacy_auth": false,
"port": "443",
"region": "'${region}'",
"secret": "'${secret}'",
"storageClass": "",
"useMultipartCopy": true,
"use_path_style": true,
"use_ssl": true
},
"options": {
"encrypt": true,
"previews": true,
"enable_sharing": true,
"filesystem_check_changes": 0,
"encoding_compatibility": false,
"readonly": false
},
"applicable_users": [
],
"applicable_groups": ["admin"]
}
]
' > "${jsonfile}"
docker cp ${jsonfile} ${container}:/${jsonfile}
${occ} files_external:import /${jsonfile}
docker exec ${container} rm /${jsonfile}
rm ${jsonfile}
${occ} files_external:delete ${mountid}

View file

@ -1,34 +0,0 @@
#!/bin/bash
error_ids=""
# Only run if this is the only instance of this script running
# note: since this script forks to run pgrep, we need -eq 2 here
# shellcheck disable=SC2126
if [[ $(pgrep -a -f "${0}" | grep -v scriptherder | wc -l) -eq 2 ]]; then
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
# a specific host, but will differ between hosts
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
errors=''
for container in $(/usr/local/bin/get_containers); do
error_ids="${error_ids} ${container}: "
for id in $(/usr/local/bin/nocc "${container}" files_external:list --all --output json | jq '.[].mount_id' | jq .); do
/usr/local/bin/nocc "${container}" files_external:scan "${id}" | grep Error
# shellcheck disable=SC2181
if [[ ${?} -eq 0 ]]; then
errors="${errors} ${id}"
error_ids="${error_ids} ${id}"
fi
done
done
else
echo "Another instance of this script is already running, exiting"
pgrep -a -f "${0}" | grep -v scriptherder
exit 0
fi
if [[ -n "${errors}" ]]; then
echo "Errors found in the following mounts: ${error_ids}"
exit 1
fi
echo "No errors found"
exit 0

View file

@ -1,69 +0,0 @@
version: "3.7"
services:
nginx:
image: docker.io/nginxproxy/nginx-proxy:latest
container_name: nginx
networks:
- internal_network
- external_network
dns:
- 89.32.32.32
ports:
- "80:80"
- "443:443"
labels:
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: "true"
volumes:
- /opt/jupyter_site/nginx/certs:/etc/nginx/certs:ro
- /opt/jupyter_site/nginx/conf:/etc/nginx/conf.d
- /opt/jupyter_site/nginx/dhparam:/etc/nginx/dhparam
- /opt/jupyter_site/nginx/html:/usr/share/nginx/html
- /opt/jupyter_site/nginx/vhost:/etc/nginx/vhost.d
- /var/run/docker.sock:/tmp/docker.sock:ro
environment:
- ENABLE_IPV6=true
restart: unless-stopped
acme:
image: docker.io/nginxproxy/acme-companion:latest
container_name: acme
networks:
- external_network
dns:
- 89.32.32.32
volumes:
- /opt/jupyter_site/nginx/acme:/etc/acme.sh
- /opt/jupyter_site/nginx/certs:/etc/nginx/certs:rw
- /opt/jupyter_site/nginx/conf:/etc/nginx/conf.d
- /opt/jupyter_site/nginx/dhparam:/etc/nginx/dhparam
- /opt/jupyter_site/nginx/html:/usr/share/nginx/html
- /opt/jupyter_site/nginx/vhost:/etc/nginx/vhost.d:rw
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- NGINX_PROXY_CONTAINER=nginx
- DEFAULT_EMAIL=drive@sunet.se
depends_on:
- nginx
restart: unless-stopped
web:
image: docker.sunet.se/drive/jupyter-site:<%= @site_version %>
container_name: web
restart: always
networks:
- internal_network
ports:
- "127.0.0.1:3000:3000"
dns:
- 89.32.32.32
environment:
- VIRTUAL_HOST=<%= @domain %>
- VIRTUAL_PATH=/
- VIRTUAL_PORT=3000
- LETSENCRYPT_HOST=<%= @domain %>
networks:
external_network:
internal_network:
internal: true

View file

@ -0,0 +1,16 @@
<?php
$CONFIG = [
'DB' => [
'host' => "<%= @dbhost %>",
'db' => "lookup" ,
'user' => "lookup",
'pass' => "<%= @mysql_user_password %>",
],
'GLOBAL_SCALE' => true,
'AUTH_KEY' => "<%= @gss_jwt_key %>",
];

View file

@ -0,0 +1,24 @@
version: '3.2'
services:
app:
image: docker.sunet.se/drive/nextcloud-lookup:<%= @lookup_version %>
restart: always
volumes:
- /opt/lookup/config.php:/var/www/html/config/config.php
networks:
- default
- proxysql_proxysql
dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32
ports:
- 443:443
command: apache2-foreground
tty: true
networks:
proxysql_proxysql:
external: true

View file

@ -0,0 +1,62 @@
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
CREATE DATABASE IF NOT EXISTS `lookup` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
USE `lookup`;
CREATE USER 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
GRANT ALL PRIVILEGES ON lookup.* TO 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
DROP TABLE IF EXISTS `emailValidation`;
CREATE TABLE IF NOT EXISTS `emailValidation` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`storeId` int(11) NOT NULL,
`token` varchar(16) COLLATE utf8mb4_unicode_ci NOT NULL,
PRIMARY KEY (`id`),
KEY `token` (`token`),
KEY `storeId` (`storeId`)
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
DROP TABLE IF EXISTS `store`;
CREATE TABLE IF NOT EXISTS `store` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`userId` int(11) NOT NULL,
`k` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`v` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`valid` tinyint(1) NOT NULL DEFAULT '0',
PRIMARY KEY (`id`),
KEY `key` (`k`(191)),
KEY `value` (`v`(191)),
KEY `userId` (`userId`)
) ENGINE=InnoDB AUTO_INCREMENT=51 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
DROP TABLE IF EXISTS `users`;
CREATE TABLE IF NOT EXISTS `users` (
`id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT,
`federationId` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
KEY `federationId` (`federationId`(191))
) ENGINE=InnoDB AUTO_INCREMENT=15 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
DROP TABLE IF EXISTS `toVerify`;
CREATE TABLE IF NOT EXISTS `toVerify` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`userId` int(11) NOT NULL,
`storeId` int(11) NOT NULL,
`property` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL,
`location` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`tries` int(11) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;

View file

@ -1,3 +0,0 @@
CREATE SCHEMA roundcubemail;
CREATE USER 'roundcube'@'%' IDENTIFIED BY '<%= @roundcube_password %>';
GRANT ALL PRIVILEGES ON roundcubemail.* TO 'roundcube'@'%' IDENTIFIED BY '<%= @roundcube_password %>';

View file

@ -1 +0,0 @@
<%= @custdata.to_json %>

View file

@ -4,7 +4,6 @@ services:
db:
image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %>
container_name: mariadb_db_1
restart: always
volumes:
- /etc/mariadb/backups:/backups
@ -21,6 +20,11 @@ services:
- MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %>
- BOOTSTRAP=<%= @bootstrap %>
- FORCE_BOOTSTRAP=0
command: "--wsrep_cluster_address=gcomm://<%= @db_ip.join(',') %>"
ports:
- 3306:3306
- 4444:4444
- 4567:4567
- 4568:4568
command: "--wsrep_cluster_address=gcomm://<%= @db_ip[0] %>,<%= @db_ip[1] %>,<%= @db_ip[2] %>"
tty: true

View file

@ -1,32 +0,0 @@
#!/bin/bash
<% basedir="statistics:drive-server-coms" -%>
<% cupath="/opt/mariadb/statistics/users/" -%>
<% custdata="/opt/mariadb/statistics/custdata.json" -%>
status=0
<% @custdata.each do |cust,data| -%>
#Customer <%= cust %> has no billing departments.
<% if defined?(data[@environment]["billdomains"]) && data[@environment]["billdomains"] -%>
mkdir -p /opt/mariadb/statistics/users/<%= cust %>
chmod '0700' /opt/mariadb/statistics/users/<%= cust %>
<% data[@environment]["billdomains"].each do |dom| -%>
/root/tasks/listusersbydep.sh <%= cust %> <%= dom %> > /opt/mariadb/statistics/users/<%= cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json
if jq . <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json &>/dev/null
then
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json <%= basedir%>/<%= cust %>-<%= @environment%>/
[[ $? -eq 0 ]] || { status=1 ; echo "Error: Upload of user data failed." ; }
else
echo "Error in json data"
status=1
fi
<% end -%>
<% end -%>
<% end -%>
if [[ -f <%= custdata %> ]]
then
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= custdata %> <%= basedir%>/
fi
exit ${status}

View file

@ -1,24 +0,0 @@
#!/bin/bash
function usage () {
printf "Usage:\t%s <customer> <department email domain>\n" "${0##*/}"
printf "Example:\t%s uu int.uu.se\n" "${0##*/}"
}
[[ -z "${1}" || -z "${2}" || ! "${1}" =~ ^[a-zA-Z0-9.]{1,200}$ || ! "${2}" =~ ^[a-zA-Z0-9.]{1,200}$ ]] && usage && exit 1
depdom="${2}"
customer="${1}"
docker exec mariadb_db_1 /bin/bash -c 'mysql -p${MYSQL_ROOT_PASSWORD} -NB -e '\
$'\'select JSON_PRETTY(JSON_OBJECTAGG(uid,displayname)) from ('\
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_global_scale_users.displayname'\
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_global_scale_users'\
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_global_scale_users.uid'\
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\""'\
$' UNION'\
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_user_saml_users.displayname'\
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_user_saml_users'\
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_user_saml_users.uid'\
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\"") as users\'' \
| sed 's/\\n/\n/g'

View file

@ -1,4 +0,0 @@
#!/bin/bash
pw=$(yq -r '.services.db.environment[0]' /opt/mariadb/docker-compose.yml | awk -F '=' '{print $2}')
docker exec -ti mariadb_db_1 mysql -u root -p"${pw}" "${@}"

View file

@ -1,3 +0,0 @@
#!/bin/bash
six_hours_ago=$(date -d "6 hours ago" "+%Y-%m-%d %H:%M:%S")
docker exec mariadb_db_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "PURGE BINARY LOGS BEFORE '${six_hours_ago}'"

View file

@ -1,6 +1,6 @@
#!/bin/bash
result="$(docker exec -u root mariadb_backup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
result="$(docker exec mariadbbackup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
if [[ "${result}" == "Slave_running ON" ]]; then
echo "OK: Replica running"
exit 0

View file

@ -8,12 +8,10 @@ mkdir -p "${backup_dir}"
if [[ -z ${customer} ]]; then
buopts="--slave-info --safe-slave-backup"
dumpopts="--dump-slave"
mysql -p"${MYSQL_ROOT_PASSWORD}" -u root -e "stop slave"
mysql -p${MYSQL_ROOT_PASSWORD} -e "stop slave"
fi
# shellcheck disable=SC2086
mariadb-backup --backup ${buopts} -u root -p"${MYSQL_ROOT_PASSWORD}" --stream=xbstream | gzip >"${backup_dir}/${stream_name}"
# shellcheck disable=SC2086
mysqldump --all-databases --single-transaction ${dumpopts} -u root -p"${MYSQL_ROOT_PASSWORD}" | gzip >"${backup_dir}/${dump_name}"
mariadb-backup --backup ${buopts} -u root -p${MYSQL_ROOT_PASSWORD} --stream=xbstream | gzip >"${backup_dir}/${stream_name}"
mysqldump --all-databases --single-transaction ${dumpopts} -u root -p${MYSQL_ROOT_PASSWORD} | gzip >"${backup_dir}/${dump_name}"
if [[ -z ${customer} ]]; then
mysql -p"${MYSQL_ROOT_PASSWORD}" -u root -e "start slave"
mysql -p${MYSQL_ROOT_PASSWORD} -e "start slave"
fi

View file

@ -3,8 +3,7 @@ version: '3.2'
services:
mariadb_backup:
image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %>
container_name: mariadb_backup_mariadb_backup_1
image: docker.sunet.se/drive/mariadb
dns:
- 89.46.20.75
- 89.46.21.29

View file

@ -3,7 +3,7 @@
container=${1}
customer=${2}
if [[ -z ${container} ]]; then
container='mariadb_backup_mariadb_backup_1'
container='mariadbbackup_mariadb_backup_1'
fi
if [[ -z ${customer} ]]; then
location='<%= @location %>'

View file

@ -3,7 +3,7 @@
container=${1}
customer=${2}
if [[ -z ${container} ]]; then
container='mariadb_backup_mariadb_backup_1'
container='mariadbbackup_mariadb_backup_1'
fi
if [[ -z ${customer} ]]; then
location='<%= @location %>'
@ -13,18 +13,11 @@ dexec="docker exec ${container}"
password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}')
mysql="${dexec} mysql -p${password} -u root"
mysql="${dexec} mysql -p${password}"
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_global_scale_users")')" == "1" ]]
then
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
fi
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_user_saml_users")')" == "1" ]]
then
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_user_saml_users')"
fi
users="$(echo "${users}" | sort | uniq)"
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
users="${users}
$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
project="statistics"
bucket="drive-server-coms"
@ -32,7 +25,7 @@ base_dir="${project}:${bucket}"
mountpoint="/opt/statistics"
customer_dir="${mountpoint}/${location}"
mkdir -p "${customer_dir}"
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}"
rclone mkdir "${base_dir}/${location}"
echo "${users}" | awk 'BEGIN{print "{"} {print t "\""$1"\": \""$2"\""} {t=","} END{print "}"}' | jq . >"${customer_dir}/users.json"
status=0
@ -41,7 +34,7 @@ if ! jq . "${customer_dir}/users.json" &>/dev/null; then
fi
if [[ ${status} -eq 0 ]]; then
# something is wrong if we cant copy the file in 30 seconds, so we should note that
if ! timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
if ! timeout 30s rclone copy --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
status=1
fi
fi

View file

@ -1,6 +1,6 @@
[statistics]
type = webdav
url = https://89.45.236.246/remote.php/dav/files/_script/
url = https://89.46.20.42/remote.php/dav/files/_script/
vendor = nextcloud
user = _script
pass = <%= @statistics_secret %>

View file

@ -1,3 +1,3 @@
#!/bin/bash
docker exec mariadb_backup_mariadb_backup_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "show status like 'Slave_running'"
docker exec mariadbbackup_mariadb_backup_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "show status like 'Slave_running'"

View file

@ -1,8 +0,0 @@
<% @extra_host_groups.each do |group, members| -%>
# <%= group %>
define hostgroup {
hostgroup_name <%= group %>
alias <%= group %>
members <%= members %>
}
<% end -%>

View file

@ -131,13 +131,39 @@ define host {
<% end -%>
use monitor-site
}
<% if site.match('lookup') %>
define service {
notes_url https://<%= site %>
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
check_command check_https
check_interval 5
check_period 24x7
<% if @environment == 'prod' %>
contacts slack
<% else -%>
contact_groups naemon-admins
<% end -%>
host_name <%= site %>
max_check_attempts 3
notification_interval 60
notification_period 24x7
retry_interval 1
service_description HTTPS
<% if site.match('test') -%>
servicegroups test-sites
<% else -%>
servicegroups prod-sites
<% end -%>
}
<% end -%>
<% unless site.match('lookup') %>
define service {
notes_url https://<%= site %>/status.php
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
check_command check_nextcloud
check_interval 5
check_period 24x7
<% cur_cust = site.gsub(/\.drive.*/,'') %>
<% cur_cust = site.sub('/\.drive.*/','') %>
# 'check_nextcloud' command definition
<% if @environment == 'prod' and not site.match('test') and @fullnodes.include?(cur_cust) %>
contacts slack
@ -160,12 +186,10 @@ define service {
notes_url https://<%= site %>/status.php
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
<% if site.match('test') -%>
<% version_variable = 'nextcloud_version_test_' << cur_cust -%>
check_command check_nextcloud_version!'<%= @nextcloud_version_test %>'
<% else -%>
<% version_variable = 'nextcloud_version_prod_' << cur_cust -%>
check_command check_nextcloud_version!'<%= @nextcloud_version_prod %>'
<% end -%>
<% version = String(scope.lookupvar(version_variable)).gsub(/-[0-9]+/,'') -%>
check_command check_nextcloud_version!'<%= version %>'
check_interval 5
check_period 24x7
contact_groups naemon-admins
@ -182,3 +206,4 @@ define service {
<% end -%>
}
<% end -%>
<% end -%>

View file

@ -3,12 +3,10 @@
config_php='/var/www/html/config/config.php'
dbhost="<%= @dbhost %>"
mysql_user_password="<%= @mysql_user_password %>"
admin_password="<%= @admin_password %>"
location="<%= @location %>"
bucket="<%= @s3_bucket %>"
customer="<%= @customer %>"
echo "Setting temp admin password"
apt update && apt install -y apg
admin_password="$(apg -m 40 | head -1)"
/usr/bin/mysql -e "drop database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
/usr/bin/mysql -e "create database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
@ -37,11 +35,6 @@ EOF
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
echo "Now delete the admin user:"
echo " occ <container> user:delete admin"
echo "and then create a new admin user:"
echo " /usr/local/bin/add_admin_user <username> <email address> <container>"
echo ""
echo "${customer}_instanceid: DEC::PKCS7[${instanceid}]!"
echo "${customer}_secret: DEC::PKCS7[${secret}]!"
echo "${customer}_passwordsalt: DEC::PKCS7[${passwordsalt}]!"

View file

@ -5,7 +5,7 @@ no_files=30 # Keep this many files as an archive, script is run once a week
# a specific host, but will differ between hosts
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
for logfile in $(ls /opt/multinode/*/{nextcloud.log,audit.log,server/server.log}); do
for logfile in $(ls /opt/multinode/*/{nextcloud.log,server/server.log}); do
if [[ -f ${logfile}.gz.${no_files} ]]; then
rm ${logfile}.gz.${no_files}
fi

View file

@ -11,10 +11,11 @@ services:
volumes:
- <%= @redis_conf_dir %>:/data
command: redis-server /data/redis.conf --loglevel verbose
networks:
- proxysql_proxysql
restart: always
networks:
- mariadb<%= @customer %>_<%= @customer %>
networks:
proxysql_proxysql:
mariadb<%= @customer %>_<%= @customer %>:
external: true

View file

@ -13,21 +13,19 @@ services:
- /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini
- <%= @config_php_path %>:/var/www/html/config/config.php
- <%= @nextcloud_log_path %>:/var/www/html/data/nextcloud.log
- <%= @audit_log_path %>:/var/www/html/data/audit.log
- <%= @rclone_conf_path %>:/rclone.conf
networks:
- default
- proxysql_proxysql
- mariadb<%= @customer %>_<%= @customer %>
dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32
ports:
- <%= @https_port %>:443
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
command: apachectl -D FOREGROUND
tty: true
networks:
proxysql_proxysql:
mariadb<%= @customer %>_<%= @customer %>:
external: true

View file

@ -1,4 +0,0 @@
#!/bin/bash
me=$(hostname -s)
cat /etc/hiera/data/common.yaml | yq -r '.multinode_mapping| to_entries |map({name: .key} + .value)| map(select(.server == "'"${me}"'")) |.[] |.name' | \
grep -Ev "$(cat /etc/hiera/data/common.yaml | yq -r '.singlenodes[]' | sed -e 's/^- //' -e 's/$/|/' | tr -d '\n' | sed 's/|$//')"

View file

@ -1,5 +0,0 @@
#!/bin/bash
me="$(hostname -s)"
cat /etc/hiera/data/common.yaml | yq -r '.multinode_mapping| to_entries |map({name: .key} + .value)| map(select(.server == "'"${me}"'")) |.[] |.name' |
grep -E "$(cat /etc/hiera/data/common.yaml | yq -r '.singlenodes[]' | sed -e 's/^- //' -e 's/$/|/' | tr -d '\n' | sed 's/|$//')"

View file

@ -1,142 +0,0 @@
datadir="/var/lib/proxysql"
# ProxySQL admin configuration section
admin_variables=
{
admin_credentials="admin:<%= @admin_password%>;cluster_admin:<%= @cluster_admin_password %>"
mysql_ifaces="0.0.0.0:6032"
refresh_interval=2000
web_enabled=true
web_port=6080
stats_credentials="stats:<%= @admin_password %>"
cluster_username="cluster_admin"
cluster_password="<%= @cluster_admin_password %>"
cluster_check_interval_ms=200
cluster_check_status_frequency=100
cluster_mysql_query_rules_save_to_disk=true
cluster_mysql_servers_save_to_disk=true
cluster_mysql_users_save_to_disk=true
cluster_proxysql_servers_save_to_disk=true
cluster_mysql_query_rules_diffs_before_sync=3
cluster_mysql_servers_diffs_before_sync=3
cluster_mysql_users_diffs_before_sync=3
cluster_proxysql_servers_diffs_before_sync=3
}
# MySQL/MariaDB related section
mysql_variables=
{
threads=4
max_connections=2048
default_query_delay=0
default_query_timeout=36000000
have_compress=true
poll_timeout=2000
interfaces="0.0.0.0:3306;/tmp/proxysql.sock"
default_schema="information_schema"
stacksize=1048576
server_version="10.5.5"
connect_timeout_server=10000
monitor_history=60000
monitor_connect_interval=2000
monitor_ping_interval=2000
ping_interval_server_msec=10000
ping_timeout_server=200
commands_stats=true
sessions_sort=true
monitor_username="proxysql"
monitor_password="<%= @monitor_password %>"
monitor_galera_healthcheck_interval=2000
monitor_galera_healthcheck_timeout=800
}
# Specify all ProxySQL hosts here
proxysql_servers =
(
<%- index = 0 -%>
<%- @nextcloud_ip.each do |appserver| -%>
<%- index += 1 -%>
{
hostname="<%= appserver %>"
port=6032
comment="proxysql<%= index %>"
},
<%- end -%>
)
# HG10 - single-writer
# HF30 - multi-writer
mysql_galera_hostgroups =
(
{
writer_hostgroup=10
backup_writer_hostgroup=20
reader_hostgroup=30
offline_hostgroup=9999
max_writers=1
writer_is_also_reader=1
max_transactions_behind=0
active=1
}
)
# List all MariaDB Galera nodes here
mysql_servers =
(
<%- @db_ip.each do |db| -%>
{
address="<%= db %>"
port=3306
hostgroup=10
max_connections=100
},
<%- end -%>
)
# Default query rules:
# - All writes -> HG10 (single-writer)
# - All reads -> HG30 (multi-writer)
mysql_query_rules =
(
{
rule_id=100
active=1
match_pattern="^SELECT .* FOR UPDATE"
destination_hostgroup=10
apply=1
},
{
rule_id=200
active=1
match_pattern="^SELECT .*"
destination_hostgroup=30
apply=1
},
{
rule_id=300
active=1
match_pattern=".*"
destination_hostgroup=10
apply=1
}
)
# All MySQL user that you want to pass through this instance
# - The MySQL user must be created first in the DB server and grant it to access from this ProxySQL host
mysql_users =
(
<%- index = 0 -%>
<%- @allnames.each do |customer| -%>
{
username="nextcloud_<%= customer %>"
password="<%= @passwords[index] %>"
default_hostgroup=10
transaction_persistent=<%= @transaction_persistent %>
active=1
},
<%- index += 1 -%>
<%- end -%>
)

View file

@ -1,21 +0,0 @@
#!/bin/bash
include_paying="${1}"
customers="$(/usr/local/bin/get_non_paying_customers)"
echo "Starting cleanup: $(date)"
if [[ -n ${include_paying} ]]; then
echo "Including paying customers: $(date)"
customers="${customers}
$(/usr/local/bin/get_paying_customers)"
fi
touch /etc/no-automatic-cosmos
for customer in ${customers}; do
echo "Stopping ${customer}: $(date)"
systemctl stop sunet-{redis,nextcloud}-"${customer}"
echo "Pruning docker: $(date)"
docker system prune -af --volumes
echo "Starting ${customer}: $(date)"
systemctl start sunet-{redis,nextcloud}-"${customer}"
done
rm /etc/no-automatic-cosmos
echo "Cleanup done: $(date)"

View file

@ -1,2 +0,0 @@
---
domain: "<%= @domain %>"

View file

@ -1,71 +0,0 @@
version: "3.7"
services:
nginx:
image: docker.io/nginxproxy/nginx-proxy:latest
container_name: nginx
networks:
- internal_network
- external_network
dns:
- 89.32.32.32
ports:
- "80:80"
- "443:443"
labels:
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: "true"
volumes:
- /opt/portal/nginx/certs:/etc/nginx/certs:ro
- /opt/portal/nginx/conf:/etc/nginx/conf.d
- /opt/portal/nginx/dhparam:/etc/nginx/dhparam
- /opt/portal/nginx/html:/usr/share/nginx/html
- /opt/portal/nginx/vhost:/etc/nginx/vhost.d
- /var/run/docker.sock:/tmp/docker.sock:ro
environment:
- ENABLE_IPV6=true
restart: unless-stopped
acme:
image: docker.io/nginxproxy/acme-companion:latest
container_name: acme
networks:
- external_network
dns:
- 89.32.32.32
volumes:
- /opt/portal/nginx/acme:/etc/acme.sh
- /opt/portal/nginx/certs:/etc/nginx/certs:rw
- /opt/portal/nginx/conf:/etc/nginx/conf.d
- /opt/portal/nginx/dhparam:/etc/nginx/dhparam
- /opt/portal/nginx/html:/usr/share/nginx/html
- /opt/portal/nginx/vhost:/etc/nginx/vhost.d:rw
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- NGINX_PROXY_CONTAINER=nginx
- DEFAULT_EMAIL=noc@sunet.se
depends_on:
- nginx
restart: unless-stopped
portal:
image: docker.sunet.se/drive/portal:<%= @portal_version %>
container_name: portal
restart: always
networks:
- internal_network
ports:
- "127.0.0.1:8080:8080"
dns:
- 89.32.32.32
volumes:
- /opt/portal/config.yaml:/app/config.yaml
environment:
- VIRTUAL_HOST=portal.<%= @domain %>
- VIRTUAL_PATH=/
- VIRTUAL_PORT=8080
- LETSENCRYPT_HOST=portal.<%= @domain %>
networks:
external_network:
internal_network:
internal: true

View file

@ -4,25 +4,18 @@ services:
proxysql:
image: docker.sunet.se/drive/proxysql:<%= @proxysql_version %>
container_name: proxysql_proxysql_1
<%- if @hostnet -%>
network_mode: host
<%- else -%>
ports:
- 3306:3306
- 6032:6032
- 6080:6080
<%- end -%>
environment:
INITIALIZE: 1
dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32
<%- if !@hostnet -%>
networks:
- proxysql
<%- end -%>
restart: unless-stopped
volumes:
- proxysql:/var/lib/proxysql
@ -32,8 +25,6 @@ services:
volumes:
proxysql:
<%- if !@hostnet -%>
networks:
proxysql:
driver: bridge
<%- end -%>

View file

@ -1,14 +1,8 @@
#!/bin/bash
PATH="${PATH}:/usr/local/bin"
restarted="false"
domain=$(hostname -d)
prefix="intern-db"
if [[ ${domain} =~ ^drive ]]; then
prefix="multinode-db"
fi
for index in 1 2 3; do
db_ip=$(host "${prefix}${index}.${domain}" | awk '/has address/ {print $NF}')
db_ip=$(host "intern-db${index}.$(hostname -d)" | awk '/has address/ {print $NF}')
result=$(proxysql "select * from main.mysql_servers where hostname = '${db_ip}' and hostgroup_id = 10")
if [[ -z ${result} ]]; then
query="INSERT INTO main.mysql_servers (hostgroup_id, hostname, max_connections, comment) VALUES( 10, '${db_ip}', 100, 'Inserted by script at $(date)')"

View file

@ -0,0 +1,58 @@
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoredups:ignorespace
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"

View file

@ -5,7 +5,7 @@ redis_password="<%= @redis_password %>"
for index in 1 2 3; do
cur_host="redis${index}.$(hostname -d)"
if [[ "${my_host}" == "${cur_host}" ]]; then
ip="$(facter networking.ip)"
ip="$(hostname -I | awk '{print $1}')"
else
ip="$(host "${cur_host}" | grep "has address" | awk '{print $NF}')"
fi

View file

@ -3,20 +3,17 @@ version: '3.2'
services:
reva-server:
image: docker.sunet.se/drive/sciencemesh-reva:<%= @reva_version %>
# image: docker.sunet.se/drive/sciencemesh-reva:v2.12.0
image: michielbdejong/reva:mentix-fixes
network_mode: host
security_opt:
- seccomp:unconfined
dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32
environment:
- CGO_ENABLED=1
volumes:
- /opt/reva/revad.toml:/etc/revad/revad.toml
- /opt/reva/data:/var/tmp/reva
- /opt/reva/rclone.conf:/root/.rclone.conf
- /opt/reva/<%= @environment %>/<%= @domain %>.crt:/etc/revad/tls/<%= @domain %>.crt
- /opt/reva/<%= @environment %>/<%= @domain %>.key:/etc/revad/tls/<%= @domain %>.key
- /opt/reva/ocm-providers.json:/etc/revad/ocm-providers.json
- /opt/reva/test/drive.test.sunet.se.crt:/etc/revad/tls/drive.test.sunet.se.crt
- /opt/reva/test/drive.test.sunet.se.key:/etc/revad/tls/drive.test.sunet.se.key
restart: always

View file

@ -0,0 +1,14 @@
[
{ "domain": "mesh.pondersource.org", "services": [
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://mesh.pondersource.org/ocm/" }, "host": "https://mesh.pondersource.org" },
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://cloud.pondersource.org/remote.php/webdav/" }, "host": "https://cloud.pondersource.org" }
] },
{ "domain": "cs3mesh-iop.apps.dcw1.paas.psnc.pl", "services": [
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://cs3mesh-iop.apps.dcw1.paas.psnc.pl/ocm/" }, "host": "https://cs3mesh-iop.apps.dcw1.paas.psnc.pl" },
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://cs3mesh-drive.apps.dcw1.paas.psnc.pl/remote.php/webdav/" }, "host": "https://cs3mesh-drive.apps.dcw1.paas.psnc.pl" }
] },
{ "domain": "<%= @reva_domain %>", "services": [
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://<%= @reva_domain%>/ocm/" }, "host": "https://<%= @reva_domain %>" },
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://<%= @domain %>/remote.php/webdav/" }, "host": "https://<%= @domain %>" }
] }
]

View file

@ -1,6 +0,0 @@
[statistics]
type = webdav
url = https://89.45.236.246/remote.php/dav/files/_script/
vendor = nextcloud
user = _script
pass = <%= @statistics_secret %>

View file

@ -1,83 +1,48 @@
[vars]
internal_gateway = "<%= @reva_domain %>"
provider_domain = "<%= @reva_domain %>"
external_reva_endpoint = "https://<%= @reva_domain %>" # append here any route if applicable
efss_sciencemesh_endpoint = "https://<%= @customer %>.<%= @domain %>/index.php/apps/sciencemesh/"
machine_api_key = "<%= @iopsecret %>"
efss_shared_secret = "<%= @shared_secret %>"
[http]
certfile = "/etc/revad/tls/<%= @domain %>.crt"
keyfile = "/etc/revad/tls/<%= @domain %>.key"
[log]
level = "debug"
[shared]
gatewaysvc = "{{ vars.internal_gateway }}:19000"
gatewaysvc = "<%= @reva_domain %>:19000"
# gatewaycertfile = "/etc/revad/tls/revanc1.crt"
# [registry]
# driver = "static"
#
# [registry.static]
# services = ["authprovider","userprovider"]
#
# [registry.static.authprovider]
# bearer = ["localhost:0123"]
# basic = ["localhost:1234"]
# publiclink = ["localhost:9876"]
[grpc]
address = "0.0.0.0:19000"
# certfile = "/etc/revad/tls/revanc1.crt"
# keyfile = "/etc/revad/tls/revanc1.key"
[grpc.services.gateway]
address = ":19000"
authregistrysvc = "{{ grpc.services.authregistry.address }}"
appregistrysvc = "{{ grpc.services.appregistry.address }}"
storageregistrysvc = "{{ grpc.services.storageregistry.address }}"
preferencessvc = "{{ grpc.services.userprovider.address }}"
userprovidersvc = "{{ grpc.services.userprovider.address }}"
usershareprovidersvc = "{{ grpc.services.usershareprovider.address }}"
ocmcoresvc = "{{ grpc.services.ocmcore.address }}"
ocmshareprovidersvc = "{{ grpc.services.ocmshareprovider.address }}"
ocminvitemanagersvc = "{{ grpc.services.ocminvitemanager.address }}"
ocmproviderauthorizersvc = "{{ grpc.services.ocmproviderauthorizer.address }}"
datagateway = "https://{{ http.services.datagateway.address }}/data"
transfer_expires = 6 # give it a moment
commit_share_to_storage_grant = true
commit_share_to_storage_ref = true
[grpc.services.appregistry]
driver = "static"
[grpc.services.appregistry.drivers.static]
mime_types = [
{"mime_type" = "text/plain", "extension" = "txt", "name" = "Text file", "description" = "Text file", "allow_creation" = true},
{"mime_type" = "text/markdown", "extension" = "md", "name" = "Markdown file", "description" = "Markdown file", "allow_creation" = true},
{"mime_type" = "application/vnd.oasis.opendocument.text", "extension" = "odt", "name" = "OpenDocument", "description" = "OpenDocument text document", "default_app" = "Collabora", "allow_creation" = true},
{"mime_type" = "application/vnd.oasis.opendocument.spreadsheet", "extension" = "ods", "name" = "OpenSpreadsheet", "description" = "OpenDocument spreadsheet document", "default_app" = "Collabora", "allow_creation" = true},
{"mime_type" = "application/vnd.oasis.opendocument.presentation", "extension" = "odp", "name" = "OpenPresentation", "description" = "OpenDocument presentation document", "default_app" = "Collabora", "allow_creation" = true},
{"mime_type" = "application/vnd.jupyter", "extension" = "ipynb", "name" = "Jupyter Notebook", "description" = "Jupyter Notebook"}
]
### AUTH PROVIDERS ###
authregistrysvc = "<%= @reva_domain %>:19000"
appprovidersvc = "<%= @reva_domain %>:19000"
appregistry = "<%= @reva_domain %>:19000"
storageregistrysvc = "<%= @reva_domain %>:19000"
preferencessvc = "<%= @reva_domain %>:19000"
userprovidersvc = "<%= @reva_domain %>:19000"
usershareprovidersvc = "<%= @reva_domain %>:19000"
publicshareprovidersvc = "<%= @reva_domain %>:19000"
ocmcoresvc = "<%= @reva_domain %>:19000"
ocmshareprovidersvc = "<%= @reva_domain %>:19000"
ocminvitemanagersvc = "<%= @reva_domain %>:19000"
ocmproviderauthorizersvc = "<%= @reva_domain %>:19000"
commit_share_to_storage_grant = false
datagateway = "https://<%= @reva_domain %>/data"
transfer_expires = 6 # give it a moment
[grpc.services.authregistry]
driver = "static"
[grpc.services.authregistry.drivers.static.rules]
basic = "{{ grpc.services.authprovider[0].address }}"
machine = "{{ grpc.services.authprovider[1].address }}"
ocmshares = "{{ grpc.services.authprovider[2].address }}"
[[grpc.services.authprovider]]
auth_manager = "nextcloud"
[grpc.services.authprovider.auth_managers.nextcloud]
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
mock_http = false
[[grpc.services.authprovider]]
auth_manager = "machine"
[grpc.services.authprovider.auth_managers.machine]
api_key = "{{ vars.machine_api_key }}"
gateway_addr = "{{ vars.internal_gateway }}:19000"
[[grpc.services.authprovider]]
auth_manager = "ocmshares"
### STORAGE PROVIDERS ###
basic = "<%= @reva_domain %>:19000"
[grpc.services.storageregistry]
driver = "static"
@ -86,36 +51,8 @@ driver = "static"
home_provider = "/home"
[grpc.services.storageregistry.drivers.static.rules]
"/home" = {"address" = "{{ grpc.services.storageprovider[0].address }}"}
"nextcloud" = {"address" = "{{ grpc.services.storageprovider[0].address }}"}
"/ocm" = {"address" = "{{ grpc.services.storageprovider[1].address }}"}
"ocm" = {"address" = "{{ grpc.services.storageprovider[1].address }}"}
[[grpc.services.storageprovider]]
driver = "nextcloud"
mount_id = "nextcloud"
expose_data_server = true
enable_home_creation = false
data_server_url = "https://localhost:{{ http.services.dataprovider[0].address.port }}/data"
[grpc.services.storageprovider.drivers.nextcloud]
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
mock_http = false
[[grpc.services.storageprovider]]
driver = "ocmoutcoming"
mount_id = "ocm"
mount_path = "/ocm"
expose_data_server = true
enable_home_creation = false
data_server_url = "{{ vars.external_reva_endpoint }}/data"
[grpc.services.storageprovider.drivers.ocmoutcoming]
machine_secret = "{{ vars.machine_api_key }}"
### OTHER PROVIDERS ###
"/home" = {"address" = "<%= @reva_domain %>:19000"}
"123e4567-e89b-12d3-a456-426655440000" = {"address" = "<%= @reva_domain %>:19000"}
[grpc.services.usershareprovider]
driver = "memory"
@ -124,148 +61,121 @@ driver = "memory"
driver = "nextcloud"
[grpc.services.ocmcore.drivers.nextcloud]
host = "{{ vars.external_reva_endpoint }}"
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
webdav_host = "https://<%= @domain %>/"
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
mock_http = false
[grpc.services.ocminvitemanager]
# TODO the driver should be "nextcloud" once it is implemented
driver = "json"
provider_domain = "{{ vars.provider_domain }}"
[grpc.services.ocmshareprovider]
driver = "nextcloud"
provider_domain = "{{ vars.provider_domain }}"
webdav_endpoint = "{{ vars.external_reva_endpoint }}"
webdav_prefix = "{{ vars.external_reva_endpoint }}/remote.php/dav/files"
# TODO the following should become {{ vars.external_reva_endpoint }}/external/{{.Token}}/...
webapp_template = "https://your.revad.org/external/sciencemesh/{{.Token}}/{relative-path-to-shared-resource}"
[grpc.services.ocmshareprovider.drivers.nextcloud]
webdav_host = "{{ vars.external_reva_endpoint }}"
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
webdav_host = "https://<%= @domain %>/"
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
mock_http = false
mount_id = "nextcloud"
[grpc.services.ocmproviderauthorizer]
driver = "mentix"
#driver = "mentix"
driver = "open"
[grpc.services.ocmproviderauthorizer.drivers.mentix]
url = "https://iop.sciencemesh.uni-muenster.de/iop/mentix/cs3"
verify_request_hostname = true
verify_request_hostname = false
insecure = false
timeout = 10
refresh = 900
[grpc.services.ocmproviderauthorizer.drivers.json]
# this is used by the docker-based test deployment, not in production
providers = "providers.testnet.json"
verify_request_hostname = true
[grpc.services.publicshareprovider]
driver = "memory"
[grpc.services.appprovider]
driver = "demo"
iopsecret = "<%= @iopsecret %>"
wopiurl = "http://0.0.0.0:8880/"
wopibridgeurl = "http://localhost:8000/wopib"
[grpc.services.appregistry]
driver = "static"
[grpc.services.appregistry.static.rules]
"text/plain" = "<%= @reva_domain %>:19000"
"text/markdown" = "<%= @reva_domain %>:19000"
"application/compressed-markdown" = "<%= @reva_domain %>:19000"
"application/vnd.oasis.opendocument.text" = "<%= @reva_domain %>:19000"
"application/vnd.oasis.opendocument.spreadsheet" = "<%= @reva_domain %>:19000"
"application/vnd.oasis.opendocument.presentation" = "<%= @reva_domain %>:19000"
[grpc.services.storageprovider]
driver = "nextcloud"
expose_data_server = true
data_server_url = "https://<%= @reva_domain %>/data"
enable_home_creation = true
[grpc.services.storageprovider.drivers.nextcloud]
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
mock_http = false
[grpc.services.authprovider]
auth_manager = "nextcloud"
[grpc.services.authprovider.auth_managers.nextcloud]
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
mock_http = false
[grpc.services.userprovider]
driver = "nextcloud"
[grpc.services.userprovider.drivers.nextcloud]
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
mock_http = false
[grpc.services.datatx]
txdriver = "rclone"
storagedriver = "json"
remove_transfer_on_cancel = true
[http]
enabled_services = ["ocmd"]
enabled_middlewares = ["providerauthorizer", "cors"]
address = "0.0.0.0:443"
certfile = "/etc/revad/tls/drive.test.sunet.se.crt"
keyfile = "/etc/revad/tls/drive.test.sunet.se.key"
[grpc.services.datatx.txdrivers.rclone]
# rclone endpoint
endpoint = "http://rclone.docker"
# basic auth is used
auth_user = "rcloneuser"
auth_pass = "eilohtho9oTahsuongeeTh7reedahPo1Ohwi3aek"
auth_header = "x-access-token"
job_status_check_interval = 2000
job_timeout = 120000
storagedriver = "json"
remove_transfer_job_on_cancel = true
[grpc.services.datatx.storagedrivers.json]
file = ""
[grpc.services.datatx.txdrivers.rclone.storagedrivers.json]
file = ""
### HTTP ENDPOINTS ###
[http.services.appprovider]
address = ":443"
insecure = true
[http.services.datagateway]
address = ":443"
[[http.services.dataprovider]]
[http.services.dataprovider]
driver = "nextcloud"
[http.services.prometheus]
[http.services.sysinfo]
[http.services.dataprovider.drivers.nextcloud]
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
mock_http = false
[[http.services.dataprovider]]
address = ":443"
driver = "ocmoutcoming"
[http.services.dataprovider.drivers.ocmoutcoming]
machine_secret = "{{ vars.machine_api_key }}"
[http.services.sciencemesh]
address = ":443"
provider_domain = "{{ vars.provider_domain }}"
mesh_directory_url = "https://sciencemesh.cesnet.cz/iop/meshdir"
ocm_mount_point = "/sciencemesh"
[http.services.sciencemesh.smtp_credentials]
disable_auth = false
sender_mail = "noreply@<%= @domain %>"
sender_login = "noreply@<%= @domain %>"
sender_password = "<%= @smtp_credentials %>"
smtp_server = "smtp.sunet.se"
smtp_port = 587
[http.services.ocmprovider]
address = ":443"
ocm_prefix = "ocm"
provider = "Reva for ownCloud/Nextcloud"
endpoint = "{{ vars.external_reva_endpoint }}"
enable_webapp = true
enable_datatx = true
[http.services.ocmd]
address = ":443"
prefix = "ocm"
[http.services.ocmd.config]
host = "{{ vars.provider_domain }}"
host = "<%= @reva_domain %>"
provider = "test-revanc1"
[http.middlewares.providerauthorizer]
#driver = "mentix"
driver = "open"
[http.middlewares.providerauthorizer.drivers.mentix]
url = "https://iop.sciencemesh.uni-muenster.de/iop/mentix/cs3"
verify_request_hostname = false
insecure = false
timeout = 10
refresh = 900
[http.services.ocs]
address = ":443"
prefix = "ocs"
[http.services.ocdav]
address = ":443"
[http.services.prometheus]
address = ":443"
[http.services.metrics]
address = ":443"
metrics_data_driver_type = "json"
metrics_data_location = "/etc/revad/metrics.json"
metrics_record_interval = 5000
[http.services.sysinfo]
prefix = "ocdav"
[http.middlewares.cors]
[http.middlewares.log]

View file

@ -1,23 +0,0 @@
services:
satosa:
environment:
- "METADATA_DIR=/etc/satosa/metadata"
- "WORKER_TIMEOUT=120"
dns:
- "89.32.32.32"
image: "<%= @image %><% if @tag %>:<%= @tag %><% end %>"
pull_policy: "always"
ports:
- "443:8000"
volumes:
- "/etc/satosa:/etc/satosa"
- "/etc/dehydrated:/etc/dehydrated"
alwayshttps:
environment:
- "ACME_URL=http://acme-c.sunet.se"
dns:
- "89.32.32.32"
image: "docker.sunet.se/always-https"
pull_policy: "always"
ports:
- "80:80"

View file

@ -27,21 +27,21 @@ Customer;Total GB;Users;Product"
for customer in $(${yq} -r '.fullnodes | .[]' ${commonyaml}); do
product=1 # Prisplan 1
csv="${csv}
$(rclone cat --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
$(rclone cat "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
grep -E -v '^DATE|^Customer' |
sed 's/$/;1/')"
done
for customer in $(${yq} -r '.singlenodes | .[]' ${commonyaml}); do
product=2 # Prisplan 2
csv="${csv}
$(rclone cat --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
$(rclone cat "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
grep -E -v '^DATE|^Customer' |
sed 's/$/;'${product}'/')"
done
echo "${csv}" >"${aggregatefile}"
rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${aggregatefile}" "${aggregatedir}/"
rclone copy "${aggregatefile}" "${aggregatedir}/"
mv "${aggregatefile}" "latest.csv"
rclone move -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "latest.csv" "${latestdir}/"
rclone move "latest.csv" "${latestdir}/"
cd "${olddir}" || (echo "Could not switch back to old dir" && exit 1)
rmdir "${tempdir}"

View file

@ -1,75 +0,0 @@
#!/bin/bash
VALID_ARGS=$(getopt -o cdghi:m:s: --long create,delete,get,help,id:,message:,subject: -- "$@")
# shellcheck disable=SC2181
if [[ ${?} -ne 0 ]]; then
exit 1;
fi
usage () {
echo "${0}: -c|--create -m|--message <'Your announcement goes here'> -s|--subject <Your subject goes here>"
echo "${0}: -d|--delete -i|--id <announcement_id>"
echo "${0}: -g|--get"
exit 1
}
eval set -- "${VALID_ARGS}"
# shellcheck disable=SC2078
while [ : ]; do
case "$1" in
-c | --create)
method='POST'
shift
;;
-d | --delete)
method='DELETE'
shift
;;
-g | --get)
method='GET'
shift
;;
-h | --help)
usage
;;
-i | --id)
argument="${2}"
shift 2
;;
-m | --message)
message="${2}"
shift 2
;;
-s | --subject)
subject="${2}"
shift 2
;;
*)
break
;;
esac
done
if [[ ${method} == 'DELETE' ]] && [[ -z ${argument} ]]; then
usage
fi
if [[ ${method} == 'POST' ]]; then
if [[ -z ${message} ]] || [[ -z ${subject} ]]; then
usage
fi
argument='{"subject":"'${subject}'","message":"'${message}'", "plainMessage":"'${message}'", "groups": [], "userId": "admin", "activities": false, "notifications": true, "emails": false, "comments": false }'
fi
curl_cmd(){
local method="${1}"
if [[ ${method} == 'POST' ]] && [[ -n ${2} ]]; then
local payload=(-d "${2}" -H "Content-Type: application/json")
elif [[ ${method} == 'DELETE' ]] && [[ -n ${2} ]]; then
local id="/${2}"
fi
local admin_app_password="<%= @admin_app_password %>"
domain="$(hostname -d)"
curl -s -X "${method}" -u "admin:${admin_app_password}" "${payload[@]}" -H 'OCS-APIRequest: true' "https://${domain}/ocs/v2.php/apps/announcementcenter/api/v1/announcements${id}"
}
curl_cmd "${method}" "${argument}"

View file

@ -6,78 +6,63 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
number_of_full_to_keep='<%= @full_backup_retention %>'
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
customer="<%= @customer %>"
environment="<%= @environment %>"
declare -A extra_backup_jobs
#<% if @extra_backup_jobs.any? %>
#<% @extra_backup_jobs.each do |client, job| %>
extra_backup_jobs["<%= client %>"]="<%= job %>"
#<% end %>
#<% end %>
#<% if @location.start_with?('common') %>
declare -a sixmonths=('mau')
if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
number_of_full_to_keep=6
number_of_full_to_keep=6
fi
declare -a projects
#<% @singlenodes.each do |singlenode| %>
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %> <%= singlenode %>")
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %>")
#<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %>
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= singlenode %>")
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
#<% end %>
#<% end %>
#<% else %>
declare -a projects=("<%= @primary_project %> <%= @mirror_project %> ${customer}")
declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
#<% @assigned_projects.each do |project| %>
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> ${customer}")
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
#<% end %>
#<% end %>
if [[ ${customer} == 'common' ]]; then
projects+=("<%= @location %> <%= @location %>-mirror ${customer}")
projects+=("<%= @location %> <%= @location %>-mirror")
fi
function do_backup {
local project="${1}"
local mirror="${2}"
local bucket="${3}"
local customer="${4}"
local mirrorbucket="${bucket}-mirror"
local mountpoint="/opt/backupmounts/${bucket}"
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
local oktorun=$? # 1 == this bucket has no other bakup process in progress
if [[ ${oktorun} -ne 0 ]]; then
mkdir -p ${mountpoint}
rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
rclone mkdir ${mirror}:${mirrorbucket}
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
if [[ "${extra_backup_jobs[${customer}]:+found}" == "found" ]] && [[ -f "${extra_backup_jobs[${customer}]}" ]]; then
${extra_backup_jobs[${customer}]} ${project} ${mirror} ${bucket} ${customer} ${environment}
fi
umount ${mountpoint}
rmdir ${mountpoint}
# Clean up
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${mirrorbucket}
fi
local project="${1}"
local mirror="${2}"
local bucket="${3}"
local mirrorbucket="${bucket}-mirror"
local mountpoint="/opt/backupmounts/${bucket}"
mkdir -p ${mountpoint}
rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
rclone mkdir ${mirror}:${mirrorbucket}
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
umount ${mountpoint}
rmdir ${mountpoint}
# Clean up
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
--force rclone://${mirror}:/${mirrorbucket}
}
for entry in "${projects[@]}"; do
project=$(echo ${entry} | awk '{print $1}')
mirror=$(echo ${entry} | awk '{print $2}')
customer=$(echo ${entry} | awk '{print $3}')
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
project=$(echo ${entry} | awk '{print $1}')
mirror=$(echo ${entry} | awk '{print $2}')
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
if [[ ${?} -eq 124 ]]; then
size=$((${fork_limit} * 1000000001))
else
size=$(echo ${maybesize} | jq -r '.bytes' )
fi
# If bucket is above 50 GB we fork
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
do_backup ${project} ${mirror} ${bucket} ${customer} &
else
do_backup ${project} ${mirror} ${bucket} ${customer}
fi
done
# If bucket is above 50 GB we fork
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
do_backup ${project} ${mirror} ${bucket} &
else
do_backup ${project} ${mirror} ${bucket}
fi
done
done

View file

@ -0,0 +1,86 @@
#!/bin/bash
# Backup all buckets
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
# a specific host, but will differ between hosts
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
number_of_full_to_keep="<%= @full_backup_retention %>"
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
split_limit=1000 #in GB, if bigger than this number, we fork backup of each directory to it's own process
declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
#<% @assigned_projects.each do |project| %>
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
#<% end %>
function do_huge_backup {
local project="${1}"
local mirror="${2}"
local bucket="${3}"
declare -a directories
declare -a empty
for dir in $(rclone lsd ${project}:${bucket} | awk '{print $NF}'); do
directories+=("${dir}")
mountpoint="/opt/backupmounts/${bucket}-${dir}"
do_backup ${project} ${mirror} ${bucket} ${mountpoint} ${dir} ${empty} &
done
mountpoint="/opt/backupmounts/${bucket}"
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${directories[@]} &
}
function do_backup {
local project="${1}"
shift
local mirror="${1}"
shift
local bucket="${1}"
shift
local mountpoint="${1}"
shift
local dire="${1}"
shift
declare -a exclude
exclude=( "${@}" )
suffix=""
opts=""
if [[ "${dire}" != "none" ]]; then
suffix="/${dire}"
fi
if ((${#exclude[@]})); then
for dir in "${exclude[@]}"; do
opts="${opts} --exclude /${dir}"
done
fi
local mirrorbucket="${bucket}-mirror"
mkdir -p ${mountpoint}
rclone mount ${project}:${bucket}${suffix} ${mountpoint}/ --daemon --allow-other
rclone mkdir ${mirror}:${mirrorbucket}${suffix}
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt ${opts} \
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}${suffix}
umount ${mountpoint}
rmdir ${mountpoint}
# Clean up
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
--force rclone://${mirror}:/${mirrorbucket}${suffix}
}
for entry in "${projects[@]}"; do
project=$(echo ${entry} | awk '{print $1}')
mirror=$(echo ${entry} | awk '{print $2}')
declare -a empty
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
size=$(rclone size --json ${project}:${bucket} | jq -r '.bytes')
mirrorbucket="${bucket}-mirror"
mountpoint="/opt/backupmounts/${bucket}"
# If bucket is above ${split_limit} we fork and do backup per directory
if [[ ${size} -gt $((${split_limit} * 1000000000)) ]]; then
do_huge_backup ${project} ${mirror} ${bucket} &
# If bucket is above ${fork_limit} we fork and do backup for bucket
elif [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${empty} &
else
# If bucket is below ${fork_limit} we do not fork and do backup for bucket
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${empty}
fi
done
done

View file

@ -1,8 +0,0 @@
#!/bin/bash
project="${1}"
mirror="${2}"
bucket="${3}"
customer="${4}"
environment="${5}"
rsync -e "ssh -i ${HOME}/.ssh/id_script" -avz "/opt/backupmounts/${bucket}" "sd-${environment}@sd-${environment}-backup.hb.se:~/sd-${environment}/${bucket}"

View file

@ -6,14 +6,11 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
number_of_full_to_keep="<%= @full_backup_retention %>"
backup="${1}"
if [[ -z ${backup} ]]; then
backup="backup1.$(hostname -d)"
fi
if ! [[ ${backup} =~ backup1.*sunet.se$ ]]; then
echo "Usage: ${0} <fqdn of backup server>"
echo "Example: ${0} backup1.sunet.drive.sunet.se"
fi
backup_dir="/opt/backups/backup-files"
backup_dir="/opt/backups"
bucket="db-backups"
mirror="<%= @customer %>-<%= @environment %>-mirror"
if [[ ${mirror} =~ common-(test|prod)-mirror ]]; then
@ -21,15 +18,16 @@ if [[ ${mirror} =~ common-(test|prod)-mirror ]]; then
bucket="${bucket}-${suffix}"
backup_dir="${backup_dir}-${suffix}"
fi
mkdir -p ${backup_dir}
echo "Backing up database for ${backup}"
ssh ${backup} "sudo /home/script/bin/backup_db.sh"
echo "Cleaning up old backups for ${backup}"
ssh ${backup} "sudo /home/script/bin/purge_backups.sh /opt/mariadb_backup/backups/"
echo "Copying backups here"
mkdir -p ${backup_dir}
scp script@${backup}:/opt/mariadb_backup/backups/$(date +%Y/%m/%d)/*.gz ${backup_dir}
echo "Copying backups to remote bucket"
rclone mkdir ${mirror}:${bucket}
duplicity --allow-source-mismatch --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption ${backup_dir} rclone://${mirror}:/${bucket}
duplicity --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption ${backup_dir} rclone://${mirror}:/${bucket}
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${bucket}
rm -rf "${backup_dir}"
echo "cleaning up"
rm -r ${backup_dir}

View file

@ -1,22 +0,0 @@
#!/bin/bash
# Backup all databases
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
# a specific host, but will differ between hosts
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
number_of_full_to_keep=7
backup="multinode-db1.$(hostname -d)"
remote_backup_dir="/etc/mariadb/backups"
backup_dir="/opt/backups/multinode"
bucket="db-backups-multinode"
mirror="common-<%= @environment %>-mirror"
echo "Backing up all databases for for multinode customer"
ssh "${backup}" "sudo /home/script/bin/backup_multinode_db.sh"
echo "Copying backups here"
mkdir -p ${backup_dir}
scp "script@${backup}:${remote_backup_dir}/mariadb-dump*.sql.gz" "${backup_dir}"
echo "Copying backups to remote bucket"
rclone mkdir "${mirror}:${bucket}"
duplicity --allow-source-mismatch --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption "${backup_dir}" "rclone://${mirror}:/${bucket}"
duplicity remove-all-but-n-full "${number_of_full_to_keep}" --tempdir /mnt --archive-dir /mnt --force "rclone://${mirror}:/${bucket}"
echo "cleaning up"
rm -r "${backup_dir}"

View file

@ -25,7 +25,7 @@ if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
else
number_of_full_to_keep=1
fi
container="mariadb-${customer}_db_1"
container="mariadb${customer}_db_1"
backup_dir="/opt/backups"
bucket="db-backups"
mirror="${customer}-<%= @environment %>-mirror"

View file

@ -2,7 +2,9 @@
project="${1}"
bucket="${2}"
declare -a sixmonths=('multinode')
number_of_full_to_keep='<%= @full_backup_retention %>'
max_num_inc=$((32 * number_of_full_to_keep))
max_num_full=$((2 * number_of_full_to_keep))
output_status="OK"
exit_status=0
problems=""
@ -10,19 +12,6 @@ num_problems=0
data_dir='/opt/backups/data'
for project in $(ls ${data_dir}); do
for bucket in $(ls ${data_dir}/${project}/ | sed 's/\.dat$//'); do
issixmonths="false"
for customer in "${sixmonths[@]}"; do
if [[ "${bucket}" =~ ${customer} ]]; then
issixmonths="true"
fi
done
number_of_full_to_keep='<%= @full_backup_retention %>'
if [[ "${issixmonths}" == "true" ]]; then
number_of_full_to_keep=6
fi
max_num_inc=$((50 * number_of_full_to_keep))
max_num_full=$((2 * number_of_full_to_keep))
tabular_data=$(cat "${data_dir}/${project}/${bucket}.dat")
# We warn if there are too many old backups
num_full=$(echo "${tabular_data}" | grep -c full)

View file

@ -3,7 +3,7 @@
project="${1}"
bucket="${2}"
data_dir='/opt/backups/data'
for project in $(rclone listremotes | grep -v 'mirror' | grep -v 'statistics'); do
for project in $(rclone listremotes | grep -v 'mirror'); do
for bucket in $(rclone lsd "${project}" | awk '{print $NF}' | grep -E '\-mirror|db-backups'); do
mkdir -p "${data_dir}/${project}"
duplicity collection-status --log-file /dev/stdout --no-encryption "rclone://${project}${bucket}" | grep -E '^ inc|^ full' > "${data_dir}/${project}/${bucket}.dat"

View file

@ -1,60 +0,0 @@
#!/bin/bash
customer="<%= @customer %>"
environment="<%= @environment %>"
eppn_suffix="<%= @eppn_suffix %>"
include_userbuckets="<%= @include_userbuckets %>"
container="nextcloud_app_1"
yq="/usr/local/bin/yq"
if ! [[ -x ${yq} ]]; then
pip install yq
fi
declare -a directories
if [[ -n ${1} ]]; then
directories=("${@}")
else
directories+=("Arbetsmaterial")
directories+=("Bevarande")
directories+=("Gallringsbart")
fi
olddir="${PWD}"
tempdir=$(mktemp -d)
dirty=0
primary=''
declare -a users=( 'admin' )
cd "${tempdir}" || echo "Could not cd to tempdir"
declare -a projects=( $("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml) )
if [[ "${include_userbuckets}" == "true" ]]; then
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
projects+=( "${primary}" )
fi
for project in "${projects[@]}"; do
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do
count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
if [[ ${count} -gt 0 ]]; then
echo "Skipping ${project}:${bucket} because it has stuff in it already"
continue
fi
for directory in "${directories[@]}"; do
dirty=1
if [[ -n ${primary} ]] && [[ ${project} == "${primary}" ]] ; then
user=$(echo "${bucket}" | awk -F '-' '{print $1}')
users+=( "${user}@${eppn_suffix}" )
fi
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
temp="README.md"
echo "**${directory}**" >"${temp}"
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
done
done
done
cd "${olddir}" || echo "could not cd to home dir"
rmdir "${tempdir}"
if [[ ${dirty} -gt 0 ]]; then
for user in "${users[@]}"; do
ssh -t "node3.$(hostname -d)" -l script -i .ssh/id_script "sudo /usr/local/bin/occ ${container} files:scan ${user}"
done
fi

View file

@ -0,0 +1,44 @@
#!/bin/bash
customer="<%= @customer %>"
environment="<%= @environment %>"
container="nextcloud_app_1"
yq="/usr/local/bin/yq"
if ! [[ -x ${yq} ]]; then
pip install yq
fi
declare -a directories
if [[ -n ${1} ]]; then
directories=("${@}")
else
directories+=("Arbetsmaterial")
directories+=("Bevarande")
directories+=("Gallringsbart")
fi
olddir="${PWD}"
tempdir=$(mktemp -d)
dirty=0
cd "${tempdir}" || echo "Could not cd to tempdir"
for project in $(${yq} -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml); do
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}'); do
count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
if [[ ${count} -gt 0 ]]; then
echo "Skipping ${project}:${bucket} because it has stuff in it already"
continue
fi
for directory in "${directories[@]}"; do
dirty=1
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
temp="README.md"
echo "**${directory}**" >"${temp}"
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
done
done
done
cd "${olddir}" || echo "could not cd to home dir"
rmdir "${tempdir}"
if [[ ${dirty} -gt 0 ]]; then
ssh -t "node3.$(hostname -d)" -l script -i .ssh/id_script "sudo /usr/local/bin/occ ${container} files:scan admin"
fi

View file

@ -5,7 +5,7 @@ shift
include_userbuckets="${1}"
shift
environment="<%= @environment %>"
container="nextcloud-${customer}-app-1"
container="nextcloud${customer}_app_1"
yq="/usr/local/bin/yq"
if ! [[ -x ${yq} ]]; then

View file

@ -1,35 +0,0 @@
#!/bin/bash
VALID_ARGS=$(getopt -o s: --long subject: -- "$@")
# shellcheck disable=SC2181
if [[ ${?} -ne 0 ]]; then
exit 1;
fi
usage () {
echo "${0}: -s|--subject <subject>"
exit 1
}
eval set -- "${VALID_ARGS}"
# shellcheck disable=SC2078
while [ : ]; do
case "$1" in
-s | --subject)
subject="${2}"
shift 2
;;
*)
break
;;
esac
done
if [[ -z ${subject} ]]; then
usage
fi
id=$(/root/tasks/announce.sh --get | xmlstarlet sel -t -i '//subject="'"${subject}"'"' -m "/ocs/data/element/id" -v .)
if [[ -n ${id} ]]; then
/root/tasks/announce.sh --delete --id "${id}"
fi

View file

@ -1,22 +1,22 @@
#!/bin/bash
customer="${1}"
multinode="${2}"
customer="${1}"
multinode="${2}"
environment="<%= @environment %>"
location="${customer}-${environment}"
userjson=$(ssh -o StrictHostKeyChecking=no "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud-${customer}-app-1")
userjson=$(ssh "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud${customer}_app_1")
project="statistics"
bucket="drive-server-coms"
base_dir="${project}:${bucket}"
stat_dir="/opt/statistics"
customer_dir="${stat_dir}/${location}"
mkdir -p "${customer_dir}"
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}"
rclone mkdir "${base_dir}/${location}"
echo "${userjson}" | jq . >"${customer_dir}/users.json"
status=${?}
if [[ ${status} -eq 0 ]]; then
# something is wrong if we cant copy the file in 30 seconds, so we should note that
if ! timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
if ! timeout 30s rclone copy --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
status=1
fi
fi

View file

@ -17,7 +17,7 @@ if [[ "${ENVIRONMENT}" == "prod" ]]; then
fi
if [[ "${CUSTOMER}" == "common" ]]; then
customer=""
types="multinode"
types="multinode gss"
fi
domain="${customer}drive.${env}sunet.se"

View file

@ -38,16 +38,12 @@ rclone="rclone --config /root/.rclone.conf"
# These are dynamic
buckets="$(${rclone} lsd "${rcp}:" | awk '{print $NF}')"
users=$(${rclone} cat --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "statistics:drive-server-coms/${rcp}/users.json" | jq '. | with_entries( select(.key | match("@") ) )')
users=$(${rclone} cat "statistics:drive-server-coms/${rcp}/users.json" | jq '. | with_entries( select(.key | match("@") ) )')
for eppn in $(echo "${users}" | jq -r keys[]); do
username=${eppn%@*}
# Remove underscore from username
user=${username//_/-}
# convert user to lower case for bucket naming rules
user_lower=${user,,}
user=${eppn%@*}
echo "$(date) - Check bucket status for ${eppn}"
bucketname="${user_lower}-${site_name//./-}"
bucketname="${user}-${site_name//./-}"
if ! echo "${buckets}" | grep "${bucketname}" &> /dev/null; then
echo "$(date) - ${eppn} has no mounts configured, adding bucket and mounts..."
${rclone} mkdir "${rcp}:${bucketname}"

View file

@ -27,9 +27,7 @@ fi
site_name="<%= @site_name %>"
rclone="rclone --config /root/.rclone.conf"
username=${eppn%@*}
# Remove underscore from username
user=${username//_/-}
user=${eppn%@*}
bucketname="${user}-${site_name//./-}"

Some files were not shown because too many files have changed in this diff Show more