Compare commits
No commits in common. "main" and "mandersson-nmhost" have entirely different histories.
main
...
mandersson
66 changed files with 490 additions and 901 deletions
|
@ -1,9 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
if ! [[ $(hostname) =~ monitor ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
repo="/var/cache/cosmos/repo"
|
||||
common="${repo}/global/overlay/etc/hiera/data/common.yaml"
|
||||
|
||||
|
@ -25,7 +21,7 @@ for environment in test prod; do
|
|||
version=$(yq -r ".${key}" "${group}")
|
||||
print_fact "${customer}" "${environment}" "${version}"
|
||||
done
|
||||
for customer in $(yq -r '.fullnodes[]' "${common}"); do
|
||||
for customer in $(yq -r '.fullnodes[]' "${common}") gss; do
|
||||
group="${repo}/${customer}-common/overlay/etc/hiera/data/group.yaml"
|
||||
version=$(yq -r ".${key}" "${group}")
|
||||
print_fact "${customer}" "${environment}" "${version}"
|
||||
|
|
|
@ -1,8 +1,14 @@
|
|||
# Lets determin who the customer is by looking at the hostname
|
||||
function sunetdrive::get_customer() >> String {
|
||||
$hostnameparts = split($facts['networking']['fqdn'],'\.')
|
||||
$hostnameparts = split($facts['fqdn'],'\.')
|
||||
if $hostnameparts[1] == 'drive' {
|
||||
if $hostnameparts[0] =~ /^gss/ {
|
||||
return 'gss'
|
||||
} elsif $hostnameparts[0] =~ /^lookup/ {
|
||||
return 'lookup'
|
||||
} else {
|
||||
return 'common'
|
||||
}
|
||||
} elsif $hostnameparts[0] =~ /idp-proxy/ {
|
||||
return 'common'
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Lets determin where we are by looking at the hostname
|
||||
function sunetdrive::get_environment() >> String {
|
||||
$hostname = $facts['networking']['fqdn']
|
||||
$hostname = $facts['fqdn']
|
||||
if $hostname =~ /^.*\.drive\.sunet\.se$/ {
|
||||
if $hostname =~ /^.*\.pilot\.drive\.sunet\.se$/ {
|
||||
return 'pilot'
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Lets determin where we are by looking at the hostname
|
||||
function sunetdrive::get_node_number() >> Integer {
|
||||
Integer(regsubst($facts['networking']['fqdn'], /^[a-zA-Z\-]+(\d).*$/, '\\1'))
|
||||
Integer(regsubst($::fqdn, /^[a-zA-Z\-]+(\d).*$/, '\\1'))
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ define sunetdrive::app_type (
|
|||
$override_config = undef,
|
||||
$override_compose = undef
|
||||
) {
|
||||
include sunet::packages::netcat_openbsd
|
||||
# Config from group.yaml and customer specific conf
|
||||
$environment = sunetdrive::get_environment()
|
||||
$customer = sunetdrive::get_customer()
|
||||
|
@ -15,6 +14,7 @@ define sunetdrive::app_type (
|
|||
# The config used
|
||||
$config = $override_config
|
||||
# Other settings
|
||||
$admin_password = $config[ 'admin_password' ]
|
||||
$dbhost = $config[ 'dbhost' ]
|
||||
$dbname = $config[ 'dbname' ]
|
||||
$dbuser = $config[ 'dbuser' ]
|
||||
|
@ -30,10 +30,10 @@ define sunetdrive::app_type (
|
|||
$config = hiera_hash($environment)
|
||||
$skeletondirectory = $config['skeletondirectory']
|
||||
# Other settings
|
||||
$admin_password = safe_hiera('admin_password')
|
||||
$dbhost = 'proxysql_proxysql_1'
|
||||
$dbname = 'nextcloud'
|
||||
$dbuser = 'nextcloud'
|
||||
$full_backup_retention = hiera('full_backup_retention')
|
||||
$instanceid = safe_hiera('instanceid')
|
||||
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||
$passwordsalt = safe_hiera('passwordsalt')
|
||||
|
@ -43,8 +43,8 @@ define sunetdrive::app_type (
|
|||
$s3_secret = safe_hiera('s3_secret')
|
||||
$secret = safe_hiera('secret')
|
||||
}
|
||||
$twofactor_enforced_groups = $config['twofactor_enforced_groups']
|
||||
$twofactor_enforced_excluded_groups = $config['twofactor_enforced_excluded_groups']
|
||||
$twofactor_enforced_groups = hiera_array('twofactor_enforced_groups')
|
||||
$twofactor_enforced_excluded_groups = hiera_array('twofactor_enforced_excluded_groups')
|
||||
$nextcloud_version = hiera("nextcloud_version_${environment}")
|
||||
$nextcloud_version_string = split($nextcloud_version, '[-]')[0]
|
||||
# Common settings for multinode and full nodes
|
||||
|
@ -61,16 +61,20 @@ define sunetdrive::app_type (
|
|||
$php_memory_limit_mb = 512
|
||||
}
|
||||
if $::facts['dockerhost2'] == 'yes' {
|
||||
$hostnet = true
|
||||
$hostnet = 'yes'
|
||||
}
|
||||
|
||||
# These are encrypted values from local.eyaml
|
||||
|
||||
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||
$smtppassword = safe_hiera('smtp_password')
|
||||
|
||||
#These are global values from common.yaml
|
||||
$gs_enabled = hiera('gs_enabled')
|
||||
$gs_federation = hiera('gs_federation')
|
||||
$gss_master_admin = hiera_array('gss_master_admin')
|
||||
$gss_master_url = hiera("gss_master_url_${environment}")
|
||||
$lookup_server = hiera("lookup_server_${environment}")
|
||||
$mail_domain = hiera("mail_domain_${environment}")
|
||||
$mail_smtphost = hiera("mail_smtphost_${environment}")
|
||||
$mail_from_address = hiera("mail_from_address_${environment}")
|
||||
|
@ -78,6 +82,9 @@ define sunetdrive::app_type (
|
|||
$smtpuser = hiera("smtp_user_${environment}")
|
||||
$tug_office = hiera_array('tug_office')
|
||||
|
||||
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
|
||||
$gss_mode = hiera('gss_mode')
|
||||
|
||||
# These are global values from common.yaml but can be overridden in group.yaml
|
||||
$drive_email_template_text_left = $config['drive_email_template_text_left']
|
||||
$drive_email_template_plain_text_left = $config['drive_email_template_plain_text_left']
|
||||
|
@ -85,39 +92,9 @@ define sunetdrive::app_type (
|
|||
$lb_servers = hiera_hash($environment)['lb_servers']
|
||||
$document_servers = hiera_hash($environment)['document_servers']
|
||||
|
||||
# Calculate some values
|
||||
$expiration_months = max(12, $full_backup_retention)
|
||||
$expiration_days_min = $expiration_months * 31
|
||||
$expiration_days_max = $expiration_months * 31 + 93
|
||||
|
||||
unless $is_multinode{
|
||||
user { 'www-data': ensure => present, system => true }
|
||||
|
||||
file { '/usr/local/bin/get_containers':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/get_containers'),
|
||||
mode => '0744',
|
||||
}
|
||||
if ($nodenumber == 3) {
|
||||
file { '/usr/local/bin/scan_external_mounts':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/scan_external_mounts.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
|
||||
cmd => '/usr/local/bin/scan_external_mounts',
|
||||
hour => '1',
|
||||
minute => '20',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
}
|
||||
file { '/opt/nextcloud/cron.sh':
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
|
@ -131,22 +108,16 @@ define sunetdrive::app_type (
|
|||
minute => '*/5',
|
||||
}
|
||||
file { '/opt/nextcloud/user-sync.sh':
|
||||
ensure => absent,
|
||||
}
|
||||
file { '/usr/local/bin/nocc':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/nocc.erb'),
|
||||
mode => '0740',
|
||||
}
|
||||
file { '/etc/sudoers.d/99-nocc':
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
content => template('sunetdrive/application/user-sync.erb.sh'),
|
||||
}
|
||||
-> cron { 'gss_user_sync':
|
||||
command => '/opt/nextcloud/user-sync.sh',
|
||||
user => 'root',
|
||||
minute => '*/5',
|
||||
}
|
||||
file { '/usr/local/bin/occ':
|
||||
ensure => present,
|
||||
|
@ -171,21 +142,12 @@ define sunetdrive::app_type (
|
|||
content => template('sunetdrive/application/upgrade23-25.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/remount_user_bucket_as_project.sh':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/remount_user_bucket_as_project.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/opt/rotate/conf.d/nextcloud.conf':
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => "#This file is managed by puppet
|
||||
#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n/opt/nextcloud/audit.log:180:256\n",
|
||||
content => "#This file is managed by puppet\n#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n",
|
||||
mode => '0644',
|
||||
}
|
||||
file { '/opt/rotate/conf.d/redis.conf':
|
||||
|
@ -234,14 +196,7 @@ define sunetdrive::app_type (
|
|||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
}
|
||||
file { '/opt/nextcloud/audit.log':
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
mode => '0644',
|
||||
}
|
||||
file { '/opt/nextcloud/rclone.conf':
|
||||
ensure => file,
|
||||
|
@ -338,7 +293,23 @@ define sunetdrive::app_type (
|
|||
mode => '0744',
|
||||
}
|
||||
}
|
||||
if $location =~ /^kau/ {
|
||||
if $location =~ /^gss-test/ {
|
||||
file { '/opt/nextcloud/mappingfile.json':
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/mappingfile-test.json.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
} elsif $location =~ /^gss/ {
|
||||
file { '/opt/nextcloud/mappingfile.json':
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/mappingfile-prod.json.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
} elsif $location =~ /^kau/ {
|
||||
file { '/mnt':
|
||||
ensure => directory,
|
||||
owner => 'www-data',
|
||||
|
|
|
@ -38,8 +38,8 @@ define sunetdrive::cloudimage(
|
|||
resolver => ['130.242.80.14', '130.242.80.99'],
|
||||
search => $search,
|
||||
#
|
||||
repo => $facts['cosmos_repo_origin_url'],
|
||||
tagpattern => $facts['cosmos_tag_pattern'],
|
||||
repo => $::cosmos_repo_origin_url,
|
||||
tagpattern => $::cosmos_tag_pattern,
|
||||
#
|
||||
cpus => $cpus,
|
||||
memory => $memory,
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
|
||||
# Common class
|
||||
class sunetdrive::common {
|
||||
include sunet::tools
|
||||
include sunet::motd
|
||||
include apt
|
||||
include apparmor
|
||||
include sunet::packages::jq
|
||||
if $::facts['sunet_nftables_enabled'] != 'yes' {
|
||||
warning('Enabling UFW')
|
||||
include ufw
|
||||
} else {
|
||||
if $facts['networking']['hostname'] =~ /^kube[wc]/ {
|
||||
warning('Setting nftables to installed but disabled')
|
||||
ensure_resource ('class','sunet::nftables::init', { enabled => false })
|
||||
} else {
|
||||
warning('Enabling nftables')
|
||||
ensure_resource ('class','sunet::nftables::init', { })
|
||||
}
|
||||
}
|
||||
package {'sysstat': ensure => 'latest'}
|
||||
package {'needrestart': ensure => installed}
|
||||
service {'sysstat': provider => 'systemd'}
|
||||
file_line { 'enable_sa':
|
||||
ensure => 'present',
|
||||
line => 'ENABLED="true"',
|
||||
path => '/etc/default/sysstat',
|
||||
match => 'ENABLED="false"',
|
||||
require => Package['sysstat'],
|
||||
}
|
||||
file_line { 'sa_cron_comment':
|
||||
ensure => 'present',
|
||||
line => '# Activity reports every 2 minutes everyday',
|
||||
path => '/etc/cron.d/sysstat',
|
||||
match => '^#\ Activity\ reports\ every\ 10\ minutes\ everyday',
|
||||
require => Package['sysstat'],
|
||||
notify => Service['sysstat'],
|
||||
}
|
||||
file_line { 'sa_cron':
|
||||
ensure => 'present',
|
||||
line => '*/2 * * * * root command -v debian-sa1 > /dev/null && debian-sa1 1 1',
|
||||
path => '/etc/cron.d/sysstat',
|
||||
match => '^5-55/10',
|
||||
require => Package['sysstat'],
|
||||
notify => Service['sysstat'],
|
||||
}
|
||||
if $::facts['os']['distro']['id'] == 'Debian' {
|
||||
exec { 'sysstat_systemd_timer':
|
||||
command => 'systemctl enable --now sysstat-collect.timer',
|
||||
unless => 'systemctl is-enabled --quiet sysstat-collect.timer',
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,7 +18,7 @@ define sunetdrive::db_type(
|
|||
$mariadb_dir = '/etc/mariadb'
|
||||
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
|
||||
$server_id = 1000 + Integer($facts['networking']['hostname'][-1])
|
||||
ensure_resource('file',$mariadb_dir, { ensure => directory } )
|
||||
ensure_resource('file',$mariadb_dir, { ensure => directory, recurse => true } )
|
||||
$dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
|
||||
$dirs.each |$dir| {
|
||||
ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
|
||||
|
@ -33,7 +33,7 @@ define sunetdrive::db_type(
|
|||
$ports = [3306, 4444, 4567, 4568]
|
||||
if $location =~ /^multinode/ {
|
||||
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['kube'] + $config['kube_v6']
|
||||
} elsif $location == 'sunet-test' or $location == 'sunet-prod' {
|
||||
} elsif $location == 'sunet-test' {
|
||||
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6']
|
||||
} else {
|
||||
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6
|
||||
|
@ -46,7 +46,11 @@ define sunetdrive::db_type(
|
|||
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
|
||||
|
||||
|
||||
if $location =~ /^lookup/ {
|
||||
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '05-lookup.sql']
|
||||
} else {
|
||||
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql']
|
||||
}
|
||||
$sql_files.each |$sql_file|{
|
||||
file { "${mariadb_dir}/init/${sql_file}":
|
||||
ensure => present,
|
||||
|
@ -86,11 +90,6 @@ define sunetdrive::db_type(
|
|||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
file { '/usr/local/bin/mysql':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/mysql.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/size-test':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/size-test.erb'),
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
include apt
|
||||
# Wrapper for sunet::dockerhost to do thiss specific things
|
||||
class sunetdrive::dockerhost(
|
||||
String $version = safe_hiera('docker_version'),
|
||||
|
|
|
@ -4,8 +4,10 @@ class sunetdrive::infra_script (
|
|||
$location = undef
|
||||
) {
|
||||
$environment = sunetdrive::get_environment()
|
||||
$customer = 'common'
|
||||
$customer = "common"
|
||||
$config = hiera_hash($environment)
|
||||
$gss_backup_server = $config['gss_backup_server']
|
||||
$lookup_backup_server = $config['lookup_backup_server']
|
||||
$ssh_config = "Host *.sunet.se
|
||||
User script
|
||||
IdentityFile /root/.ssh/id_script"
|
||||
|
@ -57,16 +59,14 @@ class sunetdrive::infra_script (
|
|||
mode => '0700',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backupgssdb':
|
||||
ensure => 'absent',
|
||||
cmd => 'true',
|
||||
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backuplookupdb':
|
||||
ensure => 'absent',
|
||||
cmd => 'true',
|
||||
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
|
|
42
manifests/lookup.pp
Normal file
42
manifests/lookup.pp
Normal file
|
@ -0,0 +1,42 @@
|
|||
#Class for SUNET-Drive-Lookup-Server
|
||||
class sunetdrive::lookup (
|
||||
$bootstrap = undef,
|
||||
$location = undef
|
||||
) {
|
||||
|
||||
$environment = sunetdrive::get_environment()
|
||||
|
||||
|
||||
# Firewall settings
|
||||
$nextcloud_ip = hiera_array("${location}_app", [])
|
||||
$tug_office = hiera_array('tug_office')
|
||||
|
||||
$dbhost = 'proxysql_proxysql_1'
|
||||
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||
$lookup_version = hiera("lookup_version_${environment}")
|
||||
|
||||
#Create users
|
||||
user { 'www-data': ensure => present, system => true }
|
||||
|
||||
file { '/opt/lookup/config.php':
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/lookup/config.php.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
sunet::docker_compose { 'drive_lookup_docker_compose':
|
||||
content => template('sunetdrive/lookup/docker-compose_lookup.yml.erb'),
|
||||
service_name => 'lookup',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Lookup server',
|
||||
}
|
||||
|
||||
sunet::misc::ufw_allow { 'https':
|
||||
from => '0.0.0.0/0',
|
||||
port => 443,
|
||||
}
|
||||
}
|
|
@ -1,13 +1,11 @@
|
|||
# This is a asyncronous replica of the Maria DB Cluster for SUNET Drive
|
||||
class sunetdrive::mariadb_backup($tag_mariadb=undef, $location=undef) {
|
||||
include sunet::packages::netcat_openbsd
|
||||
$dirs = [ 'datadir', 'init', 'conf', 'backups' ]
|
||||
$dirs.each | $dir | {
|
||||
ensure_resource('file',"/opt/mariadb_backup/${dir}", { ensure => directory, recurse => true } )
|
||||
}
|
||||
# Config from group.yaml
|
||||
$environment = sunetdrive::get_environment()
|
||||
$mariadb_version = hiera("mariadb_version_${environment}")
|
||||
$config = hiera_hash($environment)
|
||||
$first_db = $config['first_db']
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ class sunetdrive::multinode (
|
|||
)
|
||||
{
|
||||
include sunet::packages::yq
|
||||
$myname = $facts['networking']['hostname']
|
||||
$myname = $facts['hostname']
|
||||
$is_multinode = true;
|
||||
$environment = sunetdrive::get_environment()
|
||||
$lb_servers = hiera_hash($environment)['lb_servers']
|
||||
|
@ -14,6 +14,8 @@ class sunetdrive::multinode (
|
|||
$db_ip = hiera_hash($environment)['db']
|
||||
$admin_password = hiera('admin_password')
|
||||
$cluster_admin_password = hiera('cluster_admin_password')
|
||||
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
|
||||
$gss_mode = hiera('gss_mode')
|
||||
|
||||
$twofactor_enforced_groups = []
|
||||
$twofactor_enforced_excluded_groups = []
|
||||
|
@ -38,44 +40,6 @@ class sunetdrive::multinode (
|
|||
user { 'www-data': ensure => present, system => true }
|
||||
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
|
||||
ensure_resource('file', '/opt/nextcloud' , { ensure => directory, recurse => true } )
|
||||
file { '/usr/local/bin/get_containers':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/get_containers'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/scan_external_mounts':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/scan_external_mounts.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
|
||||
cmd => '/usr/local/bin/scan_external_mounts',
|
||||
hour => '1',
|
||||
minute => '20',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
file { '/usr/local/bin/nocc':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/nocc.erb'),
|
||||
mode => '0740',
|
||||
}
|
||||
file { '/etc/sudoers.d/99-nocc':
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/usr/local/bin/occ':
|
||||
ensure => present,
|
||||
force => true,
|
||||
|
@ -110,6 +74,14 @@ class sunetdrive::multinode (
|
|||
content => template('sunetdrive/multinode/get_non_paying_customers.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/get_containers':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/multinode/get_containers'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/restart_and_prune':
|
||||
ensure => present,
|
||||
force => true,
|
||||
|
@ -239,36 +211,26 @@ MACAddressPolicy=none'
|
|||
hour => '0',
|
||||
weekday => '0',
|
||||
}
|
||||
# if $nodenumber == '2' {
|
||||
# cron { 'add_back_bucket_for_karin_nordgren':
|
||||
# command => '(/usr/local/bin/occ nextcloud-kmh-app-1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh-app-1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true',
|
||||
# user => 'root',
|
||||
# minute => '*/10',
|
||||
# }
|
||||
# }
|
||||
if $nodenumber == '2' {
|
||||
cron { 'add_back_bucket_for_karin_nordgren':
|
||||
command => '(/usr/local/bin/occ nextcloud-kmh_app_1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh_app_1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true',
|
||||
user => 'root',
|
||||
minute => '*/10',
|
||||
}
|
||||
}
|
||||
$customers.each | $index, $customer | {
|
||||
$customer_config_full = hiera_hash($customer)
|
||||
$customer_config = $customer_config_full[$environment]
|
||||
cron { "multinode_cron_${customer}":
|
||||
command => "/opt/nextcloud/cron.sh nextcloud-${customer}-app-1",
|
||||
command => "/opt/nextcloud/cron.sh nextcloud-${customer}_app_1",
|
||||
require => File['/opt/nextcloud/cron.sh'],
|
||||
user => 'root',
|
||||
minute => '*/10',
|
||||
}
|
||||
if $environment == 'prod' {
|
||||
if 'primary_bucket' in $customer_config.keys() {
|
||||
$s3_bucket = $customer_config['primary_bucket']
|
||||
} else {
|
||||
$s3_bucket = "primary-${customer}-drive.sunet.se"
|
||||
}
|
||||
$site_name = "${customer}.drive.sunet.se"
|
||||
$trusted_proxies = ['lb1.drive.sunet.se','lb2.drive.sunet.se', 'lb3.drive.sunet.se', 'lb4.drive.sunet.se']
|
||||
} else {
|
||||
if 'primary_bucket' in $customer_config.keys() {
|
||||
$s3_bucket = $customer_config['primary_bucket']
|
||||
} else {
|
||||
$s3_bucket = "primary-${customer}-${environment}.sunet.se"
|
||||
}
|
||||
$site_name = "${customer}.drive.${environment}.sunet.se"
|
||||
$trusted_proxies = ["lb1.drive.${environment}.sunet.se","lb2.drive.${environment}.sunet.se",
|
||||
"lb3.drive.${environment}.sunet.se","lb4.drive.${environment}.sunet.se"]
|
||||
|
@ -277,6 +239,8 @@ MACAddressPolicy=none'
|
|||
$apache_error_path = "/opt/multinode/${customer}/404.html"
|
||||
$config_php_path = "/opt/multinode/${customer}/config.php"
|
||||
$cron_log_path ="/opt/multinode/${customer}/cron.log"
|
||||
$customer_config_full = hiera_hash($customer)
|
||||
$customer_config = $customer_config_full[$environment]
|
||||
|
||||
$dbhost = 'proxysql_proxysql_1'
|
||||
$dbname = "nextcloud_${customer}"
|
||||
|
@ -284,12 +248,14 @@ MACAddressPolicy=none'
|
|||
|
||||
$gs_enabled = hiera('gs_enabled')
|
||||
$gs_federation = hiera('gs_federation')
|
||||
$gss_master_admin = hiera_array('gss_master_admin')
|
||||
$gss_master_url = hiera("gss_master_url_${environment}")
|
||||
$https_port = hiera_hash('multinode_mapping')[$customer]['port']
|
||||
$lookup_server = hiera("lookup_server_${environment}")
|
||||
$mail_domain = hiera("mail_domain_${environment}")
|
||||
$mail_from_address = hiera("mail_from_address_${environment}")
|
||||
$mail_smtphost = hiera("mail_smtphost_${environment}")
|
||||
$nextcloud_log_path ="/opt/multinode/${customer}/nextcloud.log"
|
||||
$audit_log_path ="/opt/multinode/${customer}/audit.log"
|
||||
if $customer_config['nextcloud_version'] {
|
||||
$nextcloud_version = $customer_config['nextcloud_version']
|
||||
} else {
|
||||
|
@ -299,16 +265,11 @@ MACAddressPolicy=none'
|
|||
$rclone_conf_path = "/opt/multinode/${customer}/rclone.conf"
|
||||
$redis_conf_dir = "/opt/multinode/${customer}/server"
|
||||
$redis_conf_path = "${redis_conf_dir}/redis.conf"
|
||||
if $::facts['sunet_nftables_enabled'] == 'yes' {
|
||||
$redis_host= "redis-${customer}-redis-server-1"
|
||||
} else {
|
||||
$redis_host= "redis-${customer}_redis-server_1"
|
||||
}
|
||||
|
||||
$s3_host = $customer_config['s3_host']
|
||||
$s3_usepath = hiera('s3_usepath')
|
||||
$smtpuser = hiera("smtp_user_${environment}")
|
||||
$trusted_domains = [$site_name, $facts['networking']['fqdn'], 'localhost']
|
||||
$trusted_domains = [$site_name, $facts['fqdn'], 'localhost']
|
||||
$tug_office = hiera_array('tug_office')
|
||||
if $customer_config['twofactor_enforced_groups'] {
|
||||
$twofactor_enforced_groups = $customer_config['twofactor_enforced_groups']
|
||||
|
@ -316,15 +277,6 @@ MACAddressPolicy=none'
|
|||
if $customer_config['twofactor_enforced_excluded_groups'] {
|
||||
$twofactor_enforced_excluded_groups = $customer_config['twofactor_enforced_excluded_groups']
|
||||
}
|
||||
if $customer_config['full_backup_retention'] {
|
||||
$full_backup_retention = $customer_config['full_backup_retention']
|
||||
} else {
|
||||
$full_backup_retention = hiera('full_backup_retention')
|
||||
}
|
||||
# Calculate some values
|
||||
$expiration_months = max(12, $full_backup_retention)
|
||||
$expiration_days_min = $expiration_months * 31
|
||||
$expiration_days_max = $expiration_months * 31 + 93
|
||||
|
||||
# Secrets from local.eyaml
|
||||
$admin_password = safe_hiera("${customer}_admin_password")
|
||||
|
@ -337,6 +289,7 @@ MACAddressPolicy=none'
|
|||
$secret = safe_hiera("${customer}_secret")
|
||||
$passwordsalt= safe_hiera("${customer}_passwordsalt")
|
||||
$redis_host_password = safe_hiera("${customer}_redis_host_password")
|
||||
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||
$smtppassword = safe_hiera('smtp_password')
|
||||
|
||||
$extra_config = {
|
||||
|
@ -423,13 +376,6 @@ MACAddressPolicy=none'
|
|||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
file { $audit_log_path:
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
file { $rclone_conf_path:
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
|
@ -445,13 +391,6 @@ MACAddressPolicy=none'
|
|||
content => template('sunetdrive/multinode/complete_reinstall.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
if $::facts['sunet_nftables_enabled'] == 'yes' {
|
||||
$name = "https_port_${customer}"
|
||||
ensure_resource('sunet::nftables::ufw_allow_compat', $name, {
|
||||
from => ['0.0.0.0/0', '::/0'],
|
||||
port => $https_port,
|
||||
})
|
||||
} else {
|
||||
# Open ports
|
||||
sunet::misc::ufw_allow { "https_port_${customer}":
|
||||
from => '0.0.0.0',
|
||||
|
@ -459,4 +398,3 @@ MACAddressPolicy=none'
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,61 +14,4 @@ class sunetdrive::multinode_db(){
|
|||
mode => '0744',
|
||||
}
|
||||
}
|
||||
if $facts["networking"]["fqdn"] =~ /^multinode-db1\.drive\.(test\.){1}sunet\.se$/ {
|
||||
$statistics_secret = safe_hiera('statistics_secret')
|
||||
notify { 'hostmessage':
|
||||
message => 'We are on multinode-db1. Set up statistics environment.',
|
||||
}
|
||||
$custdata=$customers.reduce({}) |$memo, $value| {
|
||||
$memo + {$value => lookup($value)}
|
||||
}
|
||||
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
|
||||
$local_path = '/tmp/rclone-current-linux-amd64.deb'
|
||||
exec { 'rclone_deb':
|
||||
command => "/usr/bin/wget -q ${rclone_url} -O ${local_path}",
|
||||
creates => $local_path,
|
||||
}
|
||||
package { 'rclone':
|
||||
ensure => installed,
|
||||
provider => dpkg,
|
||||
source => $local_path,
|
||||
require => Exec['rclone_deb'],
|
||||
}
|
||||
file { '/root/.rclone.conf':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb_backup/rclone.conf.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
}
|
||||
file { '/root/tasks/listusersbydep.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb/listusersdep.sh.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/tasks/genusersondepartmentlists.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb/genuserdeplists.sh.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file {'/opt/mariadb/statistics/custdata.json':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb/custconfig.json.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'genuserdeplists':
|
||||
cmd => '/root/tasks/genusersondepartmentlists.sh',
|
||||
hour => '2',
|
||||
minute => '5',
|
||||
ok_criteria => ['exit_status=0','max_age=30h'],
|
||||
warn_criteria => ['exit_status=1', 'max_age=60h'],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,79 +0,0 @@
|
|||
|
||||
# NRPE class
|
||||
class sunetdrive::nrpe(
|
||||
$loadw = '15,10,5',
|
||||
$loadc = '30,25,20',
|
||||
$procsw = 150,
|
||||
$procsc = 200,
|
||||
) {
|
||||
|
||||
require apt
|
||||
class { 'sunet::nagios':
|
||||
command_timeout => 600,
|
||||
loadw => $loadw,
|
||||
loadc => $loadc,
|
||||
procsw => $procsw,
|
||||
procsc => $procsc,
|
||||
}
|
||||
package {'nagios-plugins-contrib': ensure => latest}
|
||||
if ($facts['os']['name'] == 'Ubuntu' and versioncmp($facts['os']['release']['full'], '22.04') >= 0 ){
|
||||
$mem_w = '90'
|
||||
$mem_c = '95'
|
||||
} else {
|
||||
$mem_w = '10'
|
||||
$mem_c = '5'
|
||||
}
|
||||
$checks = ['nrpe_check_memory']
|
||||
$checks.each |$check| {
|
||||
ensure_resource("sunet::nagios::${check}", "nagios-nrpe-${check}")
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_entropy':
|
||||
command_line => '/usr/lib/nagios/plugins/check_entropy -w 256'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_ntp_time':
|
||||
command_line => '/usr/lib/nagios/plugins/check_ntp_time -H ntp.se'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_scriptherder':
|
||||
command_line => '/usr/local/bin/scriptherder --mode check'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_needrestart_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_needrestart_check',
|
||||
command_line => '/usr/sbin/needrestart -p -l'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_galera_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_galera_check',
|
||||
command_line => '/usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_galera_cluster':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_proxysql_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_proxysql_check',
|
||||
command_line => '/usr/lib/nagios/plugins/check_proxysql_server, /usr/lib/nagios/plugins/check_mysql_server_status'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_proxysql_server':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_proxysql_server'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_replication_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_replication_check',
|
||||
command_line => '/usr/local/bin/check_replication'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_async_replication':
|
||||
command_line => '/usr/bin/sudo /usr/local/bin/check_replication'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_backup_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_backup_check',
|
||||
command_line => '/usr/local/bin/check_backups'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_backups':
|
||||
command_line => '/usr/bin/sudo /usr/local/bin/check_backups'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_mysql_server_status':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_mysql_server_status'
|
||||
}
|
||||
}
|
|
@ -4,7 +4,6 @@ class sunetdrive::proxysql (
|
|||
$location = undef,
|
||||
$proxysql_container_name = 'proxysql_proxysql_1',
|
||||
$manage_config = true,
|
||||
$manage_network = true,
|
||||
) {
|
||||
|
||||
# Config from group.yaml
|
||||
|
@ -12,8 +11,6 @@ class sunetdrive::proxysql (
|
|||
$config = hiera_hash($environment)
|
||||
$db_ip = $config['db']
|
||||
$nextcloud_ip = $config['app']
|
||||
$nextcloud_ipv6 = $config['app_v6']
|
||||
$nextcloud_ip_all = $nextcloud_ip + $nextcloud_ipv6
|
||||
$proxysql_ok_num = length($nextcloud_ip)
|
||||
$proxysql_warn_num = $proxysql_ok_num - 1
|
||||
|
||||
|
@ -29,8 +26,8 @@ class sunetdrive::proxysql (
|
|||
$mysql_user = safe_hiera('mysql_user')
|
||||
|
||||
$transaction_persistent = 1
|
||||
if $::facts['dockerhost2'] == 'yes' and $manage_network {
|
||||
$hostnet = true
|
||||
if $::facts['dockerhost2'] == 'yes' {
|
||||
$hostnet = 'yes'
|
||||
}
|
||||
|
||||
file { '/usr/local/bin/proxysql':
|
||||
|
@ -81,7 +78,7 @@ class sunetdrive::proxysql (
|
|||
iif => 'ens3',
|
||||
}
|
||||
sunet::nftables::docker_expose { 'proxysql':
|
||||
allow_clients => $nextcloud_ip_all,
|
||||
allow_clients => ['any'],
|
||||
port => 6032,
|
||||
iif => 'ens3',
|
||||
}
|
||||
|
|
|
@ -17,10 +17,4 @@ class sunetdrive::redis_cluster (
|
|||
content => template('sunetdrive/redis_cluster/reset_cluster.erb.sh'),
|
||||
mode => '0700',
|
||||
}
|
||||
exec { 'set_permissions_directory':
|
||||
command => 'chown -R 999:root /opt/redis/node-*'
|
||||
}
|
||||
exec { 'set_permissions_files':
|
||||
command => 'chown -R 999:999 /opt/redis/node-*/*'
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,23 +29,17 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
|
|||
}
|
||||
}
|
||||
}
|
||||
$dehydrated_status = $dehydrated_name ? {
|
||||
undef => 'absent',
|
||||
default => 'present'
|
||||
}
|
||||
file {'/opt/docker_run':
|
||||
ensure => 'absent',
|
||||
}
|
||||
sunet::docker_compose { 'satosa':
|
||||
content => template('sunetdrive/satosa/docker-compose.yml.erb'),
|
||||
service_name => 'satosa',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Satosa',
|
||||
sunet::docker_run {'satosa':
|
||||
image => $image,
|
||||
imagetag => $tag,
|
||||
dns => ['89.32.32.32'],
|
||||
volumes => ['/etc/satosa:/etc/satosa','/etc/dehydrated:/etc/dehydrated'],
|
||||
ports => ['443:8000'],
|
||||
env => ['METADATA_DIR=/etc/satosa/metadata', 'WORKER_TIMEOUT=120']
|
||||
}
|
||||
file {'/etc/satosa/proxy_conf.yaml':
|
||||
content => inline_template("<%= @merged_conf.to_yaml %>\n"),
|
||||
notify => Sunet::Docker_compose['satosa']
|
||||
notify => Sunet::Docker_run['satosa']
|
||||
}
|
||||
$plugins = hiera('satosa_config')
|
||||
sort(keys($plugins)).each |$n| {
|
||||
|
@ -53,13 +47,23 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
|
|||
$fn = $plugins[$n]
|
||||
file { $fn:
|
||||
content => inline_template("<%= @conf.to_yaml %>\n"),
|
||||
notify => Sunet::Docker_compose['satosa']
|
||||
notify => Sunet::Docker_run['satosa']
|
||||
}
|
||||
}
|
||||
sunet::misc::ufw_allow { 'satosa-allow-https':
|
||||
from => 'any',
|
||||
port => '443'
|
||||
}
|
||||
$dehydrated_status = $dehydrated_name ? {
|
||||
undef => 'absent',
|
||||
default => 'present'
|
||||
}
|
||||
sunet::docker_run {'alwayshttps':
|
||||
ensure => $dehydrated_status,
|
||||
image => 'docker.sunet.se/always-https',
|
||||
ports => ['80:80'],
|
||||
env => ['ACME_URL=http://acme-c.sunet.se']
|
||||
}
|
||||
sunet::misc::ufw_allow { 'satosa-allow-http':
|
||||
ensure => $dehydrated_status,
|
||||
from => 'any',
|
||||
|
@ -74,6 +78,12 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
|
|||
cert_file => '/etc/satosa/https.crt'
|
||||
}
|
||||
}
|
||||
file { '/opt/satosa':
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
-> file { '/opt/satosa/restart.sh':
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
|
|
|
@ -4,7 +4,6 @@ class sunetdrive::script (
|
|||
$location = undef
|
||||
) {
|
||||
include sunet::packages::python3_pip
|
||||
include sunet::packages::kopia
|
||||
$environment = sunetdrive::get_environment()
|
||||
$customer = sunetdrive::get_customer()
|
||||
$apikey_test = safe_hiera('monitor_apikey_test')
|
||||
|
@ -21,7 +20,6 @@ class sunetdrive::script (
|
|||
$local_path = '/tmp/rclone-current-linux-amd64.deb'
|
||||
$singlenodes = lookup('singlenodes')
|
||||
$multinodes = keys(lookup('multinode_mapping'))
|
||||
$extra_backup_jobs = pick($config['extra_backup_jobs'], {})
|
||||
|
||||
if $customer == 'mdu' {
|
||||
$eppn_suffix = 'mdh.se'
|
||||
|
@ -57,17 +55,6 @@ class sunetdrive::script (
|
|||
}
|
||||
$site_name = $config['site_name']
|
||||
$user_bucket_name = $config['user_bucket_name']
|
||||
if $config['user_scans'] {
|
||||
$config['user_scans'].each |$job| {
|
||||
sunet::scriptherder::cronjob { $job['name']:
|
||||
cmd => "ssh -t -l script ${job['server']} /usr/bin/sudo /usr/local/bin/occ ${job['container']} files:scan ${job['user']}",
|
||||
hour => $job['hour'],
|
||||
minute => $job['minute'],
|
||||
ok_criteria => ['exit_status=0','max_age=1d'],
|
||||
warn_criteria => ['exit_status=1','max_age=2d'],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# It is a start that will get us user buckets and primary buckets
|
||||
$backup_projects = $location
|
||||
|
@ -109,14 +96,6 @@ class sunetdrive::script (
|
|||
unless => "python3 -m pip list | grep drive-utils | grep ${drive_version}",
|
||||
require => Package['python3'],
|
||||
}
|
||||
file { '/opt/backups':
|
||||
ensure => directory,
|
||||
mode => '0700'
|
||||
}
|
||||
file { '/opt/backups/scripts':
|
||||
ensure => directory,
|
||||
mode => '0700'
|
||||
}
|
||||
file { '/root/.ssh/':
|
||||
ensure => directory,
|
||||
mode => '0700',
|
||||
|
@ -254,11 +233,8 @@ class sunetdrive::script (
|
|||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file_line { 'FIXME_remove_when_s3_migration_done_in_sto3':
|
||||
ensure => 'present',
|
||||
line => '37.156.195.53 s3.sto3.safedc.net',
|
||||
path => '/etc/hosts',
|
||||
match => '^37.156.195.53',
|
||||
file { '/root/tasks/backupsinglenodedb.sh':
|
||||
ensure => absent,
|
||||
}
|
||||
if $environment == 'test' {
|
||||
sunet::scriptherder::cronjob { 'reboot-customer':
|
||||
|
@ -358,13 +334,6 @@ class sunetdrive::script (
|
|||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/opt/backups/scripts/hb.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/backup-hb.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backupmultinodedb':
|
||||
cmd => '/root/tasks/backupmultinodedb.sh',
|
||||
hour => '2',
|
||||
|
@ -391,7 +360,7 @@ class sunetdrive::script (
|
|||
$singlenodes.each | $singlenode| {
|
||||
$multinode = hiera_hash('multinode_mapping')[$singlenode]['server']
|
||||
$multinodeserver = "${multinode}.${site_name}"
|
||||
$nccontainer = "nextcloud-${singlenode}-app-1"
|
||||
$nccontainer = "nextcloud-${singlenode}_app_1"
|
||||
|
||||
sunet::scriptherder::cronjob { "backup${singlenode}db":
|
||||
ensure => absent,
|
||||
|
@ -421,17 +390,17 @@ class sunetdrive::script (
|
|||
}
|
||||
}
|
||||
}
|
||||
$gss_backup_server = $config['gss_backup_server']
|
||||
$lookup_backup_server = $config['lookup_backup_server']
|
||||
sunet::scriptherder::cronjob { 'backupgssdb':
|
||||
ensure => 'absent',
|
||||
cmd => 'true',
|
||||
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backuplookupdb':
|
||||
ensure => 'absent',
|
||||
cmd => 'true',
|
||||
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
|
|
|
@ -64,12 +64,12 @@ class sunetdrive::scriptreceiver()
|
|||
}
|
||||
file { '/root/.bashrc':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/scriptreceiver/bashrc.erb.sh'),
|
||||
content => template('sunetdrive/scriptreceiver/baschrc.erb.sh'),
|
||||
mode => '0644',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/etc/sudoers.d/99-safer_reboot':
|
||||
file { "/etc/sudoers.d/99-safer_reboot":
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/safer_reboot\n",
|
||||
mode => '0440',
|
||||
|
|
|
@ -7,7 +7,7 @@ class sunetdrive::sitemonitornaemon() {
|
|||
$tls_servers_with_port = hiera_array('tls_servers_with_port')
|
||||
$nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0]
|
||||
$nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0]
|
||||
$monitorhost = $facts['networking']['fqdn']
|
||||
$monitorhost = $::fqdn
|
||||
$environment = sunetdrive::get_environment()
|
||||
$influx_passwd = safe_hiera('influx_passwd')
|
||||
$slack_url = safe_hiera('slack_url')
|
||||
|
@ -42,15 +42,11 @@ class sunetdrive::sitemonitornaemon() {
|
|||
}
|
||||
file { '/etc/naemon/conf.d/sunetdrive_thruk_templates.conf':
|
||||
ensure => present,
|
||||
owner => 'naemon',
|
||||
group => 'naemon',
|
||||
content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
nagioscfg::service {'check_scriptherder':
|
||||
hostgroup_name => ['sunetdrive::nrpe'],
|
||||
check_command => 'check_nrpe_1arg_to300!check_scriptherder',
|
||||
description => 'Scriptherder Status',
|
||||
contact_groups => ['naemon-admins'],
|
||||
}
|
||||
nagioscfg::service {'check_galera_cluster':
|
||||
hostgroup_name => ['galera_monitor'],
|
||||
check_command => 'check_nrpe_1arg!check_galera_cluster',
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
config_php='/var/www/html/config/config.php'
|
||||
dbhost="<%= @dbhost %>"
|
||||
mysql_user_password="<%= @mysql_user_password %>"
|
||||
admin_password="<%= @admin_password %>"
|
||||
location="<%= @location %>"
|
||||
bucket="<%= @s3_bucket %>"
|
||||
|
||||
|
@ -13,9 +14,6 @@ if [[ "${user_input}" == "IKnowWhatIAmDoing" ]]; then
|
|||
echo "WARNING: This will delete everything in the database and reinstall Nextcloud."
|
||||
echo "You have 10 seconds to abort by hitting CTRL/C"
|
||||
sleep 10s
|
||||
echo "Setting temp admin password"
|
||||
apt update && apt install -y apg
|
||||
admin_password="$(apg -m 40 | head -1)"
|
||||
echo "Ok, proceeding."
|
||||
echo "Dropping database in 3 seconds"
|
||||
sleep 3s
|
||||
|
@ -50,11 +48,6 @@ EOF
|
|||
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
|
||||
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
|
||||
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
|
||||
echo "Now delete the admin user:"
|
||||
echo " occ user:delete admin"
|
||||
echo "and then create a new admin user:"
|
||||
echo " /usr/local/bin/add_admin_user <username> <email address>"
|
||||
echo ""
|
||||
echo "Please use edit-secrets to add these variables to all Nextcloud servers:"
|
||||
echo "instanceid: DEC::PKCS7[${instanceid}]!"
|
||||
echo "secret: DEC::PKCS7[${secret}]!"
|
||||
|
|
|
@ -1,5 +1,10 @@
|
|||
<?php
|
||||
$CONFIG = array (
|
||||
'app_install_overwrite' =>
|
||||
array (
|
||||
0 => 'globalsiteselector',
|
||||
),
|
||||
|
||||
'apps_paths' =>
|
||||
array (
|
||||
0 =>
|
||||
|
@ -43,12 +48,28 @@ $CONFIG = array (
|
|||
'gs.enabled' => '<%= @gs_enabled %>',
|
||||
'gs.federation' => '<%= @gs_federation %>',
|
||||
'gs.trustedHosts' => ['*.sunet.se'],
|
||||
'htaccess.RewriteBase' => '/',
|
||||
'gss.discovery.manual.mapping.file' => '/var/www/html/mappingfile.json',
|
||||
'gss.discovery.manual.mapping.parameter' => 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6',
|
||||
'gss.discovery.manual.mapping.regex' => true,
|
||||
'gss.jwt.key' => '<%= @gss_jwt_key %>',
|
||||
'gss.master.admin' =>
|
||||
array (
|
||||
<%- index = 0 -%>
|
||||
<%- @gss_master_admin.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
),
|
||||
'gss.master.url' => '<%= @gss_master_url %>',
|
||||
'gss.mode' => '<%= @gss_mode %>',
|
||||
'gss.user.discovery.module' => '\\OCA\\GlobalSiteSelector\\UserDiscoveryModules\\ManualUserMapping',
|
||||
'gss.username_format' => 'sanitize',
|
||||
'installed' => true,
|
||||
'instanceid' => '<%= @instanceid %>',
|
||||
'integrity.check.disabled' => true,
|
||||
'log_type' => 'file',
|
||||
'loglevel' => 1,
|
||||
'loglevel' => 0,
|
||||
'lookup_server' => '<%= @lookup_server %>',
|
||||
'mail_domain' => '<%= @mail_domain %>',
|
||||
'mail_from_address' => '<%= @mail_from_address %>',
|
||||
'mail_sendmailmode' => 'smtp',
|
||||
|
@ -76,7 +97,6 @@ $CONFIG = array (
|
|||
'region' => 'us-east-1',
|
||||
'hostname' => '<%= @s3_host %>',
|
||||
'port' => '',
|
||||
'useMultipartCopy' => false,
|
||||
'objectPrefix' => 'urn:oid:',
|
||||
'autocreate' => false,
|
||||
'use_ssl' => true,
|
||||
|
@ -84,11 +104,29 @@ $CONFIG = array (
|
|||
'legacy_auth' => false,
|
||||
),
|
||||
),
|
||||
'overwrite.cli.url' => 'https://<%= @site_name %>/',
|
||||
'overwrite.cli.url' => 'https://<%= @site_name %>',
|
||||
'overwritehost' => '<%= @site_name %>',
|
||||
'overwriteprotocol' => 'https',
|
||||
'passwordsalt' => '<%= @passwordsalt %>',
|
||||
<% if @environment == 'test' && ! @is_multinode -%>
|
||||
<% if @location == 'gss-test' -%>
|
||||
'redis.cluster' => [
|
||||
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
|
||||
'password' => '<%= @redis_cluster_password %>',
|
||||
'read_timeout' => 0.0,
|
||||
'seeds' => [
|
||||
'redis1.drive.test.sunet.se:6379',
|
||||
'redis2.drive.test.sunet.se:6379',
|
||||
'redis3.drive.test.sunet.se:6379',
|
||||
'redis1.drive.test.sunet.se:6380',
|
||||
'redis2.drive.test.sunet.se:6380',
|
||||
'redis3.drive.test.sunet.se:6380',
|
||||
'redis1.drive.test.sunet.se:6381',
|
||||
'redis2.drive.test.sunet.se:6381',
|
||||
'redis3.drive.test.sunet.se:6381'
|
||||
],
|
||||
'timeout' => 1.1
|
||||
],
|
||||
<% elsif @environment == 'test' && ! @is_multinode -%>
|
||||
'redis.cluster' => [
|
||||
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
|
||||
'password' => '<%= @redis_cluster_password %>',
|
||||
|
@ -106,7 +144,7 @@ $CONFIG = array (
|
|||
],
|
||||
'timeout' => 1.1
|
||||
],
|
||||
<% elsif @environment == 'prod' && ! @is_multinode -%>
|
||||
<% elsif @environment == 'prod' && ! @is_multinode && @location != 'gss-prod' -%>
|
||||
'redis.cluster' => [
|
||||
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
|
||||
'password' => '<%= @redis_cluster_password %>',
|
||||
|
@ -135,9 +173,6 @@ $CONFIG = array (
|
|||
'secret' => '<%= @secret %>',
|
||||
'skeletondirectory' => '<%= @skeletondirectory %>',
|
||||
'templatedirectory' => '',
|
||||
<% if @environment == 'test' -%>
|
||||
'trashbin_retention_obligation' => 'auto, 30',
|
||||
<% end -%>
|
||||
'trusted_domains' =>
|
||||
array (
|
||||
<%- index = 0 -%>
|
||||
|
@ -160,27 +195,20 @@ $CONFIG = array (
|
|||
array (
|
||||
0 => 'admin',
|
||||
1 => 'forcemfa',
|
||||
<%- if @twofactor_enforced_groups -%>
|
||||
<%- index = 2 -%>
|
||||
<%- @twofactor_enforced_groups.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
<%- end -%>
|
||||
),
|
||||
'twofactor_enforced_excluded_groups' =>
|
||||
array (
|
||||
<%- if @twofactor_enforced_excluded_groups -%>
|
||||
<%- index = 0 -%>
|
||||
<%- @twofactor_enforced_excluded_groups.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
<%- end -%>
|
||||
),
|
||||
'updatechecker' => false,
|
||||
'version' => '<%= @nextcloud_version_string %>',
|
||||
<% if @environment == 'test' -%>
|
||||
'versions_retention_obligation' => '<%= @expiration_days_min %>, <%= @expiration_days_max %>',
|
||||
<% end -%>
|
||||
);
|
||||
|
|
|
@ -9,6 +9,8 @@ services:
|
|||
<%- if @hostnet -%>
|
||||
network_mode: host
|
||||
<%- end -%>
|
||||
environment:
|
||||
- NC_PASS=<%= @admin_password%>
|
||||
volumes:
|
||||
- /opt/nextcloud/000-default.conf:/etc/apache2/sites-enabled/000-default.conf
|
||||
- /opt/nextcloud/mpm_prefork.conf:/etc/apache2/mods-available/mpm_prefork.conf
|
||||
|
@ -19,11 +21,13 @@ services:
|
|||
- /opt/nextcloud/complete_reinstall.sh:/complete_reinstall.sh
|
||||
- /opt/nextcloud/config.php:/var/www/html/config/config.php
|
||||
- /opt/nextcloud/nextcloud.log:/var/www/html/data/nextcloud.log
|
||||
- /opt/nextcloud/audit.log:/var/www/html/data/audit.log
|
||||
- /opt/nextcloud/rclone.conf:/rclone.conf
|
||||
<%- if @skeletondirectory -%>
|
||||
- /opt/nextcloud/skeleton:<%= @skeletondirectory %>
|
||||
<%- end -%>
|
||||
<%- if @location =~ /^gss/ -%>
|
||||
- /opt/nextcloud/mappingfile.json:/var/www/html/mappingfile.json
|
||||
<%- end -%>
|
||||
<% if @location =~ /^kau/ -%>
|
||||
- /mnt:/opt/tmp/
|
||||
<%- end -%>
|
||||
|
@ -33,12 +37,14 @@ services:
|
|||
- proxysql_proxysql
|
||||
<%- end -%>
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
- 89.32.32.32
|
||||
<%- if !@hostnet -%>
|
||||
ports:
|
||||
- 443:443
|
||||
<%- end -%>
|
||||
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
|
||||
command: sh -c 'tail -f /var/www/html/data/nextcloud.log | tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
|
||||
tty: true
|
||||
|
||||
<%- if !@hostnet -%>
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [[ "${1}" =~ ^nextcloud ]]; then
|
||||
container=${1}
|
||||
shift
|
||||
else
|
||||
container="nextcloud_app_1"
|
||||
fi
|
||||
|
||||
|
||||
oc_list=$(env| grep 'OC_')
|
||||
if [[ "x${oc_list}" != "x" ]]; then
|
||||
for row in $(echo "${oc_list}"); do
|
||||
MY_VARS="${MY_VARS} -e ${row}"
|
||||
done
|
||||
fi
|
||||
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
|
||||
exit 0
|
||||
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
mountid="${1}"
|
||||
user="${2}"
|
||||
container="${3}"
|
||||
if [[ -z ${mountid} ]] || [[ -z ${user} ]]; then
|
||||
echo "We need a valid mount id and user to proceed"
|
||||
echo "Usage: ${0} <mountid> <user> [<container>]"
|
||||
exit
|
||||
fi
|
||||
|
||||
if [[ -z ${container} ]]; then
|
||||
container="nextcloud_app_1"
|
||||
fi
|
||||
|
||||
occ="/usr/local/bin/occ ${container}"
|
||||
function get_config {
|
||||
${occ} files_external:config ${mountid} ${1} | tr -d '\n\t\r'
|
||||
}
|
||||
|
||||
echo "Gathering information, hang tight."
|
||||
|
||||
echo -n "."
|
||||
bucket="$(get_config bucket)"
|
||||
echo -n "."
|
||||
hostname="$(get_config hostname)"
|
||||
echo -n "."
|
||||
key="$(get_config key)"
|
||||
echo -n "."
|
||||
region="$(get_config region)"
|
||||
echo -n "."
|
||||
secret="$(get_config secret)"
|
||||
jsonfile="/tmp/${user}-user-bucket.json"
|
||||
mount_point="${user/@/-}"
|
||||
mount_point="${mount_point/./-}-user-bucket"
|
||||
|
||||
echo "This will remount the user bucket with mountid ${mountid} for ${user} as project bucket with mountpoint ${mount_point}."
|
||||
read -r -p "Press enter to continue"
|
||||
|
||||
echo '
|
||||
[
|
||||
{
|
||||
"mount_point": "\/'${mount_point}'",
|
||||
"storage": "\\OCA\\Files_External\\Lib\\Storage\\AmazonS3",
|
||||
"authentication_type": "amazons3::accesskey",
|
||||
"configuration": {
|
||||
"bucket": "'${bucket}'",
|
||||
"hostname": "'${hostname}'",
|
||||
"key": "'${key}'",
|
||||
"legacy_auth": false,
|
||||
"port": "443",
|
||||
"region": "'${region}'",
|
||||
"secret": "'${secret}'",
|
||||
"storageClass": "",
|
||||
"useMultipartCopy": false,
|
||||
"use_path_style": true,
|
||||
"use_ssl": true
|
||||
},
|
||||
"options": {
|
||||
"encrypt": true,
|
||||
"previews": true,
|
||||
"enable_sharing": true,
|
||||
"filesystem_check_changes": 0,
|
||||
"encoding_compatibility": false,
|
||||
"readonly": false
|
||||
},
|
||||
"applicable_users": [
|
||||
],
|
||||
"applicable_groups": ["admin"]
|
||||
}
|
||||
]
|
||||
' > "${jsonfile}"
|
||||
|
||||
|
||||
docker cp ${jsonfile} ${container}:/${jsonfile}
|
||||
${occ} files_external:import /${jsonfile}
|
||||
docker exec ${container} rm /${jsonfile}
|
||||
rm ${jsonfile}
|
||||
${occ} files_external:delete ${mountid}
|
|
@ -1,34 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
error_ids=""
|
||||
# Only run if this is the only instance of this script running
|
||||
# note: since this script forks to run pgrep, we need -eq 2 here
|
||||
# shellcheck disable=SC2126
|
||||
if [[ $(pgrep -a -f "${0}" | grep -v scriptherder | wc -l) -eq 2 ]]; then
|
||||
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
|
||||
# a specific host, but will differ between hosts
|
||||
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
|
||||
errors=''
|
||||
for container in $(get_containers); do
|
||||
error_ids="${error_ids} ${container}: "
|
||||
for id in $(nocc "${container}" files_external:list --all --output json | jq '.[].mount_id' | jq .); do
|
||||
nocc "${container}" files_external:scan "${id}" | grep Error
|
||||
# shellcheck disable=SC2181
|
||||
if [[ ${?} -eq 0 ]]; then
|
||||
errors="${errors} ${id}"
|
||||
error_ids="${error_ids} ${id}"
|
||||
fi
|
||||
done
|
||||
done
|
||||
else
|
||||
echo "Another instance of this script is already running, exiting"
|
||||
pgrep -a -f "${0}" | grep -v scriptherder
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -n "${errors}" ]]; then
|
||||
echo "Errors found in the following mounts: ${error_ids}"
|
||||
exit 1
|
||||
fi
|
||||
echo "No errors found"
|
||||
exit 0
|
16
templates/lookup/config.php.erb
Normal file
16
templates/lookup/config.php.erb
Normal file
|
@ -0,0 +1,16 @@
|
|||
|
||||
<?php
|
||||
|
||||
|
||||
$CONFIG = [
|
||||
'DB' => [
|
||||
'host' => "<%= @dbhost %>",
|
||||
'db' => "lookup" ,
|
||||
'user' => "lookup",
|
||||
'pass' => "<%= @mysql_user_password %>",
|
||||
],
|
||||
|
||||
'GLOBAL_SCALE' => true,
|
||||
|
||||
'AUTH_KEY' => "<%= @gss_jwt_key %>",
|
||||
];
|
24
templates/lookup/docker-compose_lookup.yml.erb
Normal file
24
templates/lookup/docker-compose_lookup.yml.erb
Normal file
|
@ -0,0 +1,24 @@
|
|||
version: '3.2'
|
||||
|
||||
services:
|
||||
|
||||
app:
|
||||
image: docker.sunet.se/drive/nextcloud-lookup:<%= @lookup_version %>
|
||||
restart: always
|
||||
volumes:
|
||||
- /opt/lookup/config.php:/var/www/html/config/config.php
|
||||
networks:
|
||||
- default
|
||||
- proxysql_proxysql
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
- 89.32.32.32
|
||||
ports:
|
||||
- 443:443
|
||||
command: apachectl -D FOREGROUND
|
||||
tty: true
|
||||
|
||||
networks:
|
||||
proxysql_proxysql:
|
||||
external: true
|
62
templates/mariadb/05-lookup.sql.erb
Normal file
62
templates/mariadb/05-lookup.sql.erb
Normal file
|
@ -0,0 +1,62 @@
|
|||
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
|
||||
SET time_zone = "+00:00";
|
||||
|
||||
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
|
||||
/*!40101 SET NAMES utf8mb4 */;
|
||||
|
||||
CREATE DATABASE IF NOT EXISTS `lookup` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
USE `lookup`;
|
||||
|
||||
CREATE USER 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
|
||||
GRANT ALL PRIVILEGES ON lookup.* TO 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `emailValidation`;
|
||||
CREATE TABLE IF NOT EXISTS `emailValidation` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`storeId` int(11) NOT NULL,
|
||||
`token` varchar(16) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `token` (`token`),
|
||||
KEY `storeId` (`storeId`)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
DROP TABLE IF EXISTS `store`;
|
||||
CREATE TABLE IF NOT EXISTS `store` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`userId` int(11) NOT NULL,
|
||||
`k` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`v` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`valid` tinyint(1) NOT NULL DEFAULT '0',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `key` (`k`(191)),
|
||||
KEY `value` (`v`(191)),
|
||||
KEY `userId` (`userId`)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=51 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
DROP TABLE IF EXISTS `users`;
|
||||
CREATE TABLE IF NOT EXISTS `users` (
|
||||
`id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`federationId` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `federationId` (`federationId`(191))
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=15 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
DROP TABLE IF EXISTS `toVerify`;
|
||||
CREATE TABLE IF NOT EXISTS `toVerify` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`userId` int(11) NOT NULL,
|
||||
`storeId` int(11) NOT NULL,
|
||||
`property` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`location` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`tries` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
|
||||
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
|
@ -1 +0,0 @@
|
|||
<%= @custdata.to_json %>
|
|
@ -21,6 +21,6 @@ services:
|
|||
- MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %>
|
||||
- BOOTSTRAP=<%= @bootstrap %>
|
||||
- FORCE_BOOTSTRAP=0
|
||||
command: "--wsrep_cluster_address=gcomm://<%= @db_ip.join(',') %>"
|
||||
command: "--wsrep_cluster_address=gcomm://<%= @db_ip[0] %>,<%= @db_ip[1] %>,<%= @db_ip[2] %>"
|
||||
tty: true
|
||||
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
<% basedir="statistics:drive-server-coms" -%>
|
||||
<% cupath="/opt/mariadb/statistics/users/" -%>
|
||||
<% custdata="/opt/mariadb/statistics/custdata.json" -%>
|
||||
status=0
|
||||
|
||||
<% @custdata.each do |cust,data| -%>
|
||||
#Customer <%= cust %> has no billing departments.
|
||||
<% if defined?(data[@environment]["billdomains"]) && data[@environment]["billdomains"] -%>
|
||||
mkdir -p /opt/mariadb/statistics/users/<%= cust %>
|
||||
chmod '0700' /opt/mariadb/statistics/users/<%= cust %>
|
||||
<% data[@environment]["billdomains"].each do |dom| -%>
|
||||
/root/tasks/listusersbydep.sh <%= cust %> <%= dom %> > /opt/mariadb/statistics/users/<%= cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json
|
||||
if jq . <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json &>/dev/null
|
||||
then
|
||||
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json <%= basedir%>/<%= cust %>-<%= @environment%>/
|
||||
[[ $? -eq 0 ]] || { status=1 ; echo "Error: Upload of user data failed." ; }
|
||||
else
|
||||
echo "Error in json data"
|
||||
status=1
|
||||
fi
|
||||
<% end -%>
|
||||
<% end -%>
|
||||
<% end -%>
|
||||
|
||||
if [[ -f <%= custdata %> ]]
|
||||
then
|
||||
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= custdata %> <%= basedir%>/
|
||||
fi
|
||||
|
||||
exit ${status}
|
|
@ -1,24 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
function usage () {
|
||||
printf "Usage:\t%s <customer> <department email domain>\n" "${0##*/}"
|
||||
printf "Example:\t%s uu int.uu.se\n" "${0##*/}"
|
||||
}
|
||||
|
||||
[[ -z "${1}" || -z "${2}" || ! "${1}" =~ ^[a-zA-Z0-9.]{1,200}$ || ! "${2}" =~ ^[a-zA-Z0-9.]{1,200}$ ]] && usage && exit 1
|
||||
|
||||
depdom="${2}"
|
||||
customer="${1}"
|
||||
|
||||
docker exec mariadb_db_1 /bin/bash -c 'mysql -p${MYSQL_ROOT_PASSWORD} -NB -e '\
|
||||
$'\'select JSON_PRETTY(JSON_OBJECTAGG(uid,displayname)) from ('\
|
||||
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_global_scale_users.displayname'\
|
||||
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_global_scale_users'\
|
||||
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_global_scale_users.uid'\
|
||||
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\""'\
|
||||
$' UNION'\
|
||||
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_user_saml_users.displayname'\
|
||||
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_user_saml_users'\
|
||||
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_user_saml_users.uid'\
|
||||
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\"") as users\'' \
|
||||
| sed 's/\\n/\n/g'
|
|
@ -1,4 +0,0 @@
|
|||
#!/bin/bash
|
||||
pw=$(yq -r '.services.db.environment[0]' /opt/mariadb/docker-compose.yml | awk -F '=' '{print $2}')
|
||||
|
||||
docker exec -ti mariadb_db_1 mysql -u root -p"${pw}" "${@}"
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
result="$(docker exec -u root mariadb_backup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
|
||||
result="$(docker exec mariadb_backup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
|
||||
if [[ "${result}" == "Slave_running ON" ]]; then
|
||||
echo "OK: Replica running"
|
||||
exit 0
|
||||
|
|
|
@ -8,12 +8,10 @@ mkdir -p "${backup_dir}"
|
|||
if [[ -z ${customer} ]]; then
|
||||
buopts="--slave-info --safe-slave-backup"
|
||||
dumpopts="--dump-slave"
|
||||
mysql -p"${MYSQL_ROOT_PASSWORD}" -u root -e "stop slave"
|
||||
mysql -p${MYSQL_ROOT_PASSWORD} -e "stop slave"
|
||||
fi
|
||||
# shellcheck disable=SC2086
|
||||
mariadb-backup --backup ${buopts} -u root -p"${MYSQL_ROOT_PASSWORD}" --stream=xbstream | gzip >"${backup_dir}/${stream_name}"
|
||||
# shellcheck disable=SC2086
|
||||
mysqldump --all-databases --single-transaction ${dumpopts} -u root -p"${MYSQL_ROOT_PASSWORD}" | gzip >"${backup_dir}/${dump_name}"
|
||||
mariadb-backup --backup ${buopts} -u root -p${MYSQL_ROOT_PASSWORD} --stream=xbstream | gzip >"${backup_dir}/${stream_name}"
|
||||
mysqldump --all-databases --single-transaction ${dumpopts} -u root -p${MYSQL_ROOT_PASSWORD} | gzip >"${backup_dir}/${dump_name}"
|
||||
if [[ -z ${customer} ]]; then
|
||||
mysql -p"${MYSQL_ROOT_PASSWORD}" -u root -e "start slave"
|
||||
mysql -p${MYSQL_ROOT_PASSWORD} -e "start slave"
|
||||
fi
|
||||
|
|
|
@ -3,7 +3,7 @@ version: '3.2'
|
|||
services:
|
||||
|
||||
mariadb_backup:
|
||||
image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %>
|
||||
image: docker.sunet.se/drive/mariadb
|
||||
container_name: mariadb_backup_mariadb_backup_1
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
|
|
|
@ -13,18 +13,11 @@ dexec="docker exec ${container}"
|
|||
|
||||
password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}')
|
||||
|
||||
mysql="${dexec} mysql -p${password} -u root"
|
||||
mysql="${dexec} mysql -p${password}"
|
||||
|
||||
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_global_scale_users")')" == "1" ]]
|
||||
then
|
||||
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
|
||||
fi
|
||||
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
|
||||
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_user_saml_users")')" == "1" ]]
|
||||
then
|
||||
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_user_saml_users')"
|
||||
fi
|
||||
users="$(echo "${users}" | sort | uniq)"
|
||||
users="${users}
|
||||
$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
|
||||
|
||||
project="statistics"
|
||||
bucket="drive-server-coms"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[statistics]
|
||||
type = webdav
|
||||
url = https://89.45.236.246/remote.php/dav/files/_script/
|
||||
url = https://89.46.20.42/remote.php/dav/files/_script/
|
||||
vendor = nextcloud
|
||||
user = _script
|
||||
pass = <%= @statistics_secret %>
|
||||
|
|
|
@ -131,6 +131,32 @@ define host {
|
|||
<% end -%>
|
||||
use monitor-site
|
||||
}
|
||||
<% if site.match('lookup') %>
|
||||
define service {
|
||||
notes_url https://<%= site %>
|
||||
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
|
||||
check_command check_https
|
||||
check_interval 5
|
||||
check_period 24x7
|
||||
<% if @environment == 'prod' %>
|
||||
contacts slack
|
||||
<% else -%>
|
||||
contact_groups naemon-admins
|
||||
<% end -%>
|
||||
host_name <%= site %>
|
||||
max_check_attempts 3
|
||||
notification_interval 60
|
||||
notification_period 24x7
|
||||
retry_interval 1
|
||||
service_description HTTPS
|
||||
<% if site.match('test') -%>
|
||||
servicegroups test-sites
|
||||
<% else -%>
|
||||
servicegroups prod-sites
|
||||
<% end -%>
|
||||
}
|
||||
<% end -%>
|
||||
<% unless site.match('lookup') %>
|
||||
define service {
|
||||
notes_url https://<%= site %>/status.php
|
||||
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
|
||||
|
@ -138,6 +164,7 @@ define service {
|
|||
check_interval 5
|
||||
check_period 24x7
|
||||
<% cur_cust = site.gsub(/\.drive.*/,'') %>
|
||||
<% cur_cust = cur_cust.gsub(/drive.*/,'gss') %>
|
||||
# 'check_nextcloud' command definition
|
||||
<% if @environment == 'prod' and not site.match('test') and @fullnodes.include?(cur_cust) %>
|
||||
contacts slack
|
||||
|
@ -182,3 +209,4 @@ define service {
|
|||
<% end -%>
|
||||
}
|
||||
<% end -%>
|
||||
<% end -%>
|
||||
|
|
|
@ -3,12 +3,10 @@
|
|||
config_php='/var/www/html/config/config.php'
|
||||
dbhost="<%= @dbhost %>"
|
||||
mysql_user_password="<%= @mysql_user_password %>"
|
||||
admin_password="<%= @admin_password %>"
|
||||
location="<%= @location %>"
|
||||
bucket="<%= @s3_bucket %>"
|
||||
customer="<%= @customer %>"
|
||||
echo "Setting temp admin password"
|
||||
apt update && apt install -y apg
|
||||
admin_password="$(apg -m 40 | head -1)"
|
||||
|
||||
/usr/bin/mysql -e "drop database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
|
||||
/usr/bin/mysql -e "create database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
|
||||
|
@ -37,11 +35,6 @@ EOF
|
|||
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
|
||||
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
|
||||
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
|
||||
echo "Now delete the admin user:"
|
||||
echo " occ <container> user:delete admin"
|
||||
echo "and then create a new admin user:"
|
||||
echo " /usr/local/bin/add_admin_user <username> <email address> <container>"
|
||||
echo ""
|
||||
echo "${customer}_instanceid: DEC::PKCS7[${instanceid}]!"
|
||||
echo "${customer}_secret: DEC::PKCS7[${secret}]!"
|
||||
echo "${customer}_passwordsalt: DEC::PKCS7[${passwordsalt}]!"
|
||||
|
|
|
@ -5,7 +5,7 @@ no_files=30 # Keep this many files as an archive, script is run once a week
|
|||
# a specific host, but will differ between hosts
|
||||
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
|
||||
|
||||
for logfile in $(ls /opt/multinode/*/{nextcloud.log,audit.log,server/server.log}); do
|
||||
for logfile in $(ls /opt/multinode/*/{nextcloud.log,server/server.log}); do
|
||||
if [[ -f ${logfile}.gz.${no_files} ]]; then
|
||||
rm ${logfile}.gz.${no_files}
|
||||
fi
|
||||
|
|
|
@ -11,10 +11,11 @@ services:
|
|||
volumes:
|
||||
- <%= @redis_conf_dir %>:/data
|
||||
command: redis-server /data/redis.conf --loglevel verbose
|
||||
restart: always
|
||||
networks:
|
||||
- proxysql_proxysql
|
||||
restart: always
|
||||
|
||||
networks:
|
||||
proxysql_proxysql:
|
||||
external: true
|
||||
|
||||
|
|
|
@ -13,9 +13,9 @@ services:
|
|||
- /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini
|
||||
- <%= @config_php_path %>:/var/www/html/config/config.php
|
||||
- <%= @nextcloud_log_path %>:/var/www/html/data/nextcloud.log
|
||||
- <%= @audit_log_path %>:/var/www/html/data/audit.log
|
||||
- <%= @rclone_conf_path %>:/rclone.conf
|
||||
|
||||
environment:
|
||||
- NC_PASS=<%= @admin_password%>
|
||||
networks:
|
||||
- default
|
||||
- proxysql_proxysql
|
||||
|
@ -25,7 +25,7 @@ services:
|
|||
- 89.32.32.32
|
||||
ports:
|
||||
- <%= @https_port %>:443
|
||||
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
|
||||
command: apachectl -D FOREGROUND
|
||||
tty: true
|
||||
|
||||
networks:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[statistics]
|
||||
type = webdav
|
||||
url = https://89.45.236.246/remote.php/dav/files/_script/
|
||||
url = https://89.46.20.42/remote.php/dav/files/_script/
|
||||
vendor = nextcloud
|
||||
user = _script
|
||||
pass = <%= @statistics_secret %>
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
services:
|
||||
satosa:
|
||||
environment:
|
||||
- "METADATA_DIR=/etc/satosa/metadata"
|
||||
- "WORKER_TIMEOUT=120"
|
||||
dns:
|
||||
- "89.32.32.32"
|
||||
image: "<%= @image %><% if @tag %>:<%= @tag %><% end %>"
|
||||
pull_policy: "always"
|
||||
ports:
|
||||
- "443:8000"
|
||||
volumes:
|
||||
- "/etc/satosa:/etc/satosa"
|
||||
- "/etc/dehydrated:/etc/dehydrated"
|
||||
alwayshttps:
|
||||
environment:
|
||||
- "ACME_URL=http://acme-c.sunet.se"
|
||||
dns:
|
||||
- "89.32.32.32"
|
||||
image: "docker.sunet.se/always-https"
|
||||
pull_policy: "always"
|
||||
ports:
|
||||
- "80:80"
|
|
@ -6,14 +6,6 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
|
|||
number_of_full_to_keep='<%= @full_backup_retention %>'
|
||||
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
|
||||
customer="<%= @customer %>"
|
||||
environment="<%= @environment %>"
|
||||
declare -A extra_backup_jobs
|
||||
|
||||
#<% if @extra_backup_jobs.any? %>
|
||||
#<% @extra_backup_jobs.each do |client, job| %>
|
||||
extra_backup_jobs["<%= client %>"]="<%= job %>"
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
#<% if @location.start_with?('common') %>
|
||||
declare -a sixmonths=('mau')
|
||||
if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
|
||||
|
@ -21,20 +13,20 @@ if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
|
|||
fi
|
||||
declare -a projects
|
||||
#<% @singlenodes.each do |singlenode| %>
|
||||
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %> <%= singlenode %>")
|
||||
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %>")
|
||||
#<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %>
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= singlenode %>")
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
#<% else %>
|
||||
declare -a projects=("<%= @primary_project %> <%= @mirror_project %> ${customer}")
|
||||
declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
|
||||
#<% @assigned_projects.each do |project| %>
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> ${customer}")
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
|
||||
if [[ ${customer} == 'common' ]]; then
|
||||
projects+=("<%= @location %> <%= @location %>-mirror ${customer}")
|
||||
projects+=("<%= @location %> <%= @location %>-mirror")
|
||||
fi
|
||||
|
||||
|
||||
|
@ -42,30 +34,25 @@ function do_backup {
|
|||
local project="${1}"
|
||||
local mirror="${2}"
|
||||
local bucket="${3}"
|
||||
local customer="${4}"
|
||||
local mirrorbucket="${bucket}-mirror"
|
||||
local mountpoint="/opt/backupmounts/${bucket}"
|
||||
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
|
||||
local oktorun=$? # 1 == this bucket has no other bakup process in progress
|
||||
if [[ ${oktorun} -ne 0 ]]; then
|
||||
mkdir -p ${mountpoint}
|
||||
rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
|
||||
[ ${oktorun} -ne 0 ] && rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
|
||||
rclone mkdir ${mirror}:${mirrorbucket}
|
||||
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
|
||||
if [[ "${extra_backup_jobs[${customer}]:+found}" == "found" ]] && [[ -f "${extra_backup_jobs[${customer}]}" ]]; then
|
||||
${extra_backup_jobs[${customer}]} ${project} ${mirror} ${bucket} ${customer} ${environment}
|
||||
fi
|
||||
[ ${oktorun} -ne 0 ] && duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \
|
||||
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
|
||||
umount ${mountpoint}
|
||||
rmdir ${mountpoint}
|
||||
# Clean up
|
||||
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${mirrorbucket}
|
||||
fi
|
||||
[ ${oktorun} -ne 0 ] && duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
|
||||
--force rclone://${mirror}:/${mirrorbucket}
|
||||
}
|
||||
|
||||
for entry in "${projects[@]}"; do
|
||||
project=$(echo ${entry} | awk '{print $1}')
|
||||
mirror=$(echo ${entry} | awk '{print $2}')
|
||||
customer=$(echo ${entry} | awk '{print $3}')
|
||||
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
|
||||
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
|
||||
if [[ ${?} -eq 124 ]]; then
|
||||
|
@ -75,9 +62,9 @@ for entry in "${projects[@]}"; do
|
|||
fi
|
||||
# If bucket is above 50 GB we fork
|
||||
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer} &
|
||||
do_backup ${project} ${mirror} ${bucket} &
|
||||
else
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer}
|
||||
do_backup ${project} ${mirror} ${bucket}
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
#!/bin/bash
|
||||
project="${1}"
|
||||
mirror="${2}"
|
||||
bucket="${3}"
|
||||
customer="${4}"
|
||||
environment="${5}"
|
||||
|
||||
rsync -e "ssh -i ${HOME}/.ssh/id_script" -avz "/opt/backupmounts/${bucket}" "sd-${environment}@sd-${environment}-backup.hb.se:~/sd-${environment}/${bucket}"
|
|
@ -20,7 +20,7 @@ for project in $(ls ${data_dir}); do
|
|||
if [[ "${issixmonths}" == "true" ]]; then
|
||||
number_of_full_to_keep=6
|
||||
fi
|
||||
max_num_inc=$((50 * number_of_full_to_keep))
|
||||
max_num_inc=$((32 * number_of_full_to_keep))
|
||||
max_num_full=$((2 * number_of_full_to_keep))
|
||||
|
||||
tabular_data=$(cat "${data_dir}/${project}/${bucket}.dat")
|
||||
|
|
|
@ -25,7 +25,7 @@ dirty=0
|
|||
primary=''
|
||||
declare -a users=( 'admin' )
|
||||
cd "${tempdir}" || echo "Could not cd to tempdir"
|
||||
declare -a projects=( $("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml) )
|
||||
declare -a projects=( "${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml )
|
||||
if [[ "${include_userbuckets}" == "true" ]]; then
|
||||
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
|
||||
projects+=( "${primary}" )
|
||||
|
|
|
@ -5,7 +5,7 @@ shift
|
|||
include_userbuckets="${1}"
|
||||
shift
|
||||
environment="<%= @environment %>"
|
||||
container="nextcloud-${customer}-app-1"
|
||||
container="nextcloud-${customer}_app_1"
|
||||
|
||||
yq="/usr/local/bin/yq"
|
||||
if ! [[ -x ${yq} ]]; then
|
||||
|
|
|
@ -4,14 +4,14 @@ customer="${1}"
|
|||
multinode="${2}"
|
||||
environment="<%= @environment %>"
|
||||
location="${customer}-${environment}"
|
||||
userjson=$(ssh "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud-${customer}-app-1")
|
||||
userjson=$(ssh "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud-${customer}_app_1")
|
||||
project="statistics"
|
||||
bucket="drive-server-coms"
|
||||
base_dir="${project}:${bucket}"
|
||||
stat_dir="/opt/statistics"
|
||||
customer_dir="${stat_dir}/${location}"
|
||||
mkdir -p "${customer_dir}"
|
||||
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}"
|
||||
rclone mkdir "${base_dir}/${location}"
|
||||
echo "${userjson}" | jq . >"${customer_dir}/users.json"
|
||||
status=${?}
|
||||
if [[ ${status} -eq 0 ]]; then
|
||||
|
|
|
@ -17,7 +17,7 @@ if [[ "${ENVIRONMENT}" == "prod" ]]; then
|
|||
fi
|
||||
if [[ "${CUSTOMER}" == "common" ]]; then
|
||||
customer=""
|
||||
types="multinode"
|
||||
types="multinode gss"
|
||||
fi
|
||||
|
||||
domain="${customer}drive.${env}sunet.se"
|
||||
|
|
|
@ -14,7 +14,7 @@ endpoint = <%= @s3_host_mirror %>
|
|||
acl = private
|
||||
[statistics]
|
||||
type = webdav
|
||||
url = https://89.45.236.246/remote.php/dav/files/_script/
|
||||
url = https://89.46.20.42/remote.php/dav/files/_script/
|
||||
vendor = nextcloud
|
||||
user = _script
|
||||
pass = <%= @statistics_secret %>
|
||||
|
|
|
@ -30,10 +30,7 @@ def add_downtime(fqdn: str,
|
|||
post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, action)
|
||||
headers = {'X-Thruk-Auth-Key': apikey}
|
||||
try:
|
||||
requests.post(post_url, data=data, headers=headers)
|
||||
except Exception:
|
||||
print("Failed to add downtime for {}".format(fqdn))
|
||||
|
||||
|
||||
def remove_downtime(fqdn: str,
|
||||
|
@ -45,15 +42,12 @@ def remove_downtime(fqdn: str,
|
|||
get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format(
|
||||
monitor_host, fqdn)
|
||||
headers = {'X-Thruk-Auth-Key': apikey}
|
||||
try:
|
||||
req = requests.get(get_url, headers=headers)
|
||||
action = 'del_active_service_downtimes'
|
||||
for service in req.json()[0]['services']:
|
||||
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, urllib.parse.quote(service), action)
|
||||
requests.post(post_url, headers=headers)
|
||||
except Exception:
|
||||
print("Failed to remove downtime for {}".format(fqdn))
|
||||
|
||||
|
||||
def main() -> int:
|
||||
|
@ -67,10 +61,14 @@ def main() -> int:
|
|||
reboot_command = ['sudo /usr/local/bin/safer_reboot']
|
||||
|
||||
if customers[0] == "common":
|
||||
customers = ["multinode"]
|
||||
customers = ["gss", "lookup", "multinode"]
|
||||
for customer in customers:
|
||||
backup_type = "backup"
|
||||
if customer == "multinode":
|
||||
if customer == "gss":
|
||||
backup_type = "gssbackup"
|
||||
elif customer == "lookup":
|
||||
backup_type = "lookupbackup"
|
||||
elif customer == "multinode":
|
||||
backup_command = ['sudo /home/script/bin/backup_multinode_db.sh']
|
||||
backup_type = "multinode-db"
|
||||
|
||||
|
|
|
@ -30,10 +30,7 @@ def add_downtime(fqdn: str,
|
|||
post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, action)
|
||||
headers = {'X-Thruk-Auth-Key': apikey}
|
||||
try:
|
||||
requests.post(post_url, data=data, headers=headers)
|
||||
except Exception:
|
||||
print("Failed to add downtime for: {}".format(fqdn))
|
||||
|
||||
|
||||
def remove_downtime(fqdn: str,
|
||||
|
@ -45,15 +42,12 @@ def remove_downtime(fqdn: str,
|
|||
get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format(
|
||||
monitor_host, fqdn)
|
||||
headers = {'X-Thruk-Auth-Key': apikey}
|
||||
try:
|
||||
req = requests.get(get_url, headers=headers)
|
||||
action = 'del_active_service_downtimes'
|
||||
for service in req.json()[0]['services']:
|
||||
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, urllib.parse.quote(service), action)
|
||||
requests.post(post_url, headers=headers)
|
||||
except Exception:
|
||||
print("Failed to remove downtime for: {}".format(fqdn))
|
||||
|
||||
|
||||
def run_command(command: list) -> tuple:
|
||||
|
@ -89,8 +83,9 @@ def main() -> int:
|
|||
server_type = "node"
|
||||
backup_type = "backup"
|
||||
if customer == "common":
|
||||
print("GSS no longer exists, bailing out.")
|
||||
sys.exit(0)
|
||||
customer = "gss"
|
||||
server_type = "gss"
|
||||
backup_type = "gssbackup"
|
||||
|
||||
backup = build_fqdn(customer, environment, 1, backup_type)
|
||||
print("\tRunning backup command at {}".format(backup))
|
||||
|
|
|
@ -83,11 +83,7 @@ alias la='ls -A'
|
|||
alias l='ls -CF'
|
||||
|
||||
if [[ $(hostname) =~ redis ]]; then
|
||||
if [ -f /opt/redis/node-0/server.conf ]; then
|
||||
alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
|
||||
else
|
||||
alias redis-cli="redis-cli -a $(grep requirepass /opt/redict/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Alias definitions.
|
|
@ -1,31 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-w', '--warning',
|
||||
help='Warning threashold',
|
||||
required=True)
|
||||
parser.add_argument('-c', '--critical',
|
||||
help='Critical threashold',
|
||||
required=True)
|
||||
|
||||
args = parser.parse_args()
|
||||
warning = int(args.warning)
|
||||
critical = int(args.critical)
|
||||
|
||||
with open('/proc/uptime', 'r') as f:
|
||||
uptime_seconds = float(f.readline().split()[0])
|
||||
days = int(uptime_seconds / 86400)
|
||||
|
||||
status = "OK"
|
||||
exit = 0
|
||||
if days > warning:
|
||||
status = "WARNING"
|
||||
exit = 1
|
||||
if days > critical:
|
||||
status = "CRITICAL"
|
||||
exit = 2
|
||||
|
||||
print(f"{status}: uptime {days} days | uptime={days};{warning};{critical};")
|
||||
sys.exit(exit)
|
|
@ -11,7 +11,7 @@ function usage {
|
|||
exit 1
|
||||
}
|
||||
|
||||
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*-app-1$ ]]; then
|
||||
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*_app_1$ ]]; then
|
||||
usage
|
||||
fi
|
||||
if ! [[ ${bucket} =~ ^[a-zA-Z0-9]+ ]]; then
|
||||
|
@ -26,7 +26,7 @@ echo "$(date): Start executing create_bucket.sh ${1} ${2} ${3}"
|
|||
|
||||
rclone_config="/opt/nextcloud/rclone.conf"
|
||||
if [[ "${container}" != "nextcloud_app_1" ]]; then
|
||||
customer=$(echo "${container}" | sed -e 's/^nextcloud-//' -e 's/-app-1$//')
|
||||
customer=$(echo "${container}" | sed -e 's/^nextcloud-//' -e 's/_app_1$//')
|
||||
rclone_config="/opt/multinode/${customer}/rclone.conf"
|
||||
fi
|
||||
|
||||
|
@ -38,7 +38,7 @@ preexisting="$(docker exec -u www-data -i "${container}" php --define apc.enable
|
|||
if [[ -z ${preexisting} ]]; then
|
||||
docker exec -u www-data -i "${container}" php --define apc.enable_cli=1 /var/www/html/occ files_external:create "${user_bucket_name}" \
|
||||
amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
|
||||
-c useMultipartCopy=false amazons3::accesskey --user ${user}
|
||||
amazons3::accesskey --user ${user}
|
||||
for shareid in $(docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:export ${user} | jq -r '.[].mount_id'); do
|
||||
docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:option ${shareid} enable_sharing true
|
||||
done
|
||||
|
|
|
@ -7,7 +7,7 @@ bucket=${4}
|
|||
user=${5}
|
||||
/usr/local/bin/occ files_external:create "${bucket}" \
|
||||
amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
|
||||
-c useMultipartCopy=false amazons3::accesskey --user "${user}"
|
||||
amazons3::accesskey --user "${user}"
|
||||
for shareid in $(/usr/local/bin/occ files_external:export "${user}" | jq -r '.[].mount_id'); do
|
||||
/usr/local/bin/occ files_external:option "${shareid}" enable_sharing true
|
||||
done
|
||||
|
|
|
@ -9,7 +9,7 @@ function usage {
|
|||
exit 1
|
||||
}
|
||||
|
||||
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*-app-1$ ]]; then
|
||||
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*_app_1$ ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
container=${1}
|
||||
|
||||
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*-app-1$ ]]; then
|
||||
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*_app_1$ ]]; then
|
||||
echo "Usage: ${0} <nextcloud container name>"
|
||||
echo "Example : ${0} nextcloud_app_1"
|
||||
exit 1
|
||||
|
|
Loading…
Add table
Reference in a new issue