diff --git a/manifests/script.pp b/manifests/script.pp index 6547a0a..f4a9e84 100644 --- a/manifests/script.pp +++ b/manifests/script.pp @@ -20,6 +20,7 @@ class sunetdrive::script ( $local_path = '/tmp/rclone-current-linux-amd64.deb' $singlenodes = lookup('singlenodes') $multinodes = keys(lookup('multinode_mapping')) + $extra_backup_jobs = pick($config['extra_backup_jobs'], {}) if $customer == 'mdu' { $eppn_suffix = 'mdh.se' diff --git a/manifests/scriptreceiver.pp b/manifests/scriptreceiver.pp index 825fb1f..9c8e319 100644 --- a/manifests/scriptreceiver.pp +++ b/manifests/scriptreceiver.pp @@ -64,7 +64,7 @@ class sunetdrive::scriptreceiver() } file { '/root/.bashrc': ensure => file, - content => template('sunetdrive/scriptreceiver/baschrc.erb.sh'), + content => template('sunetdrive/scriptreceiver/bashrc.erb.sh'), mode => '0644', owner => 'root', group => 'root', diff --git a/manifests/sitemonitornaemon.pp b/manifests/sitemonitornaemon.pp index 0739be6..a1b08c4 100644 --- a/manifests/sitemonitornaemon.pp +++ b/manifests/sitemonitornaemon.pp @@ -47,6 +47,12 @@ class sunetdrive::sitemonitornaemon() { content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'), mode => '0644', } + nagioscfg::service {'check_scriptherder': + hostgroup_name => ['nrpe'], + check_command => 'check_nrpe_1arg_to30!check_scriptherder', + description => 'Scriptherder Status', + contact_groups => ['naemon-admins'], + } nagioscfg::service {'check_galera_cluster': hostgroup_name => ['galera_monitor'], check_command => 'check_nrpe_1arg!check_galera_cluster', diff --git a/templates/mariadb_backup/listusers.erb.sh b/templates/mariadb_backup/listusers.erb.sh index 115115a..ec8d794 100644 --- a/templates/mariadb_backup/listusers.erb.sh +++ b/templates/mariadb_backup/listusers.erb.sh @@ -15,9 +15,16 @@ password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}') mysql="${dexec} mysql -p${password}" -users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')" -users="${users} -$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')" +if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_global_scale_users")')" == "1" ]] +then + users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')" +fi +users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')" +if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_user_saml_users")')" == "1" ]] +then + users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_user_saml_users')" +fi +users="$(echo "${users}" | sort | uniq)" project="statistics" bucket="drive-server-coms" diff --git a/templates/script/backup-all-buckets.erb.sh b/templates/script/backup-all-buckets.erb.sh index 200a5a1..d808c42 100644 --- a/templates/script/backup-all-buckets.erb.sh +++ b/templates/script/backup-all-buckets.erb.sh @@ -6,6 +6,12 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk number_of_full_to_keep='<%= @full_backup_retention %>' fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process customer="<%= @customer %>" +declare -A extra_backup_jobs +#<% if @extra_backup_jobs.any? %> +#<% @extra_backup_jobs.each do |client, job| %> +extra_backup_jobs["<%= client %>"]="<%= job %>" +#<% end %> +#<% end %> #<% if @location.start_with?('common') %> declare -a sixmonths=('mau') if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then @@ -13,20 +19,20 @@ if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then fi declare -a projects #<% @singlenodes.each do |singlenode| %> -projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %>") +projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %> <%= singlenode %>") #<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %> -projects+=("<%= project['project'] %> <%= project['mirror_project'] %>") +projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= singlenode %>") #<% end %> #<% end %> #<% else %> -declare -a projects=("<%= @primary_project %> <%= @mirror_project %>") +declare -a projects=("<%= @primary_project %> <%= @mirror_project %> <%= customer %>") #<% @assigned_projects.each do |project| %> -projects+=("<%= project['project'] %> <%= project['mirror_project'] %>") +projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= customer %>") #<% end %> #<% end %> if [[ ${customer} == 'common' ]]; then - projects+=("<%= @location %> <%= @location %>-mirror") + projects+=("<%= @location %> <%= @location %>-mirror ${customer}") fi @@ -34,15 +40,17 @@ function do_backup { local project="${1}" local mirror="${2}" local bucket="${3}" + local customer="${4}" local mirrorbucket="${bucket}-mirror" local mountpoint="/opt/backupmounts/${bucket}" ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null local oktorun=$? # 1 == this bucket has no other bakup process in progress mkdir -p ${mountpoint} - [ ${oktorun} -ne 0 ] && rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h + [[ ${oktorun} -ne 0 ]] && rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h rclone mkdir ${mirror}:${mirrorbucket} - [ ${oktorun} -ne 0 ] && duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \ - --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket} + [[ ${oktorun} -ne 0 ]] && duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \ + --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket} && \ + [[ -n "${extra_backup_jobs[${customer}]}" ]] && [[ -f ${extra_backup_jobs[${customer} ]] && ${extra_backup_jobs[${customer}]} umount ${mountpoint} rmdir ${mountpoint} # Clean up @@ -53,6 +61,7 @@ function do_backup { for entry in "${projects[@]}"; do project=$(echo ${entry} | awk '{print $1}') mirror=$(echo ${entry} | awk '{print $2}') + customer=$(echo ${entry} | awk '{print $3}') for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do maybesize=$(timeout 30s rclone size --json ${project}:${bucket}) if [[ ${?} -eq 124 ]]; then @@ -62,9 +71,9 @@ for entry in "${projects[@]}"; do fi # If bucket is above 50 GB we fork if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then - do_backup ${project} ${mirror} ${bucket} & + do_backup ${project} ${mirror} ${bucket} ${customer} & else - do_backup ${project} ${mirror} ${bucket} + do_backup ${project} ${mirror} ${bucket} ${customer} fi done done diff --git a/templates/scriptreceiver/baschrc.erb.sh b/templates/scriptreceiver/bashrc.erb.sh similarity index 100% rename from templates/scriptreceiver/baschrc.erb.sh rename to templates/scriptreceiver/bashrc.erb.sh