Merge branch 'main' of git+ssh://platform.sunet.se:22022/Drive/sunetdrive

This commit is contained in:
Micke Nordin 2024-08-14 14:48:47 +02:00
commit c8051b065a
8 changed files with 104 additions and 62 deletions

View file

@ -56,6 +56,17 @@ class sunetdrive::script (
} }
$site_name = $config['site_name'] $site_name = $config['site_name']
$user_bucket_name = $config['user_bucket_name'] $user_bucket_name = $config['user_bucket_name']
if $config['user_scans'] {
$config['user_scans'].each |$job| {
sunet::scriptherder::cronjob { $job['name']:
cmd => "ssh -t -l script ${job['server']} /usr/bin/sudo /usr/local/bin/occ ${job['container']} files:scan ${job['user']}",
hour => $job['hour'],
minute => $job['minute'],
ok_criteria => ['exit_status=0','max_age=1d'],
warn_criteria => ['exit_status=1','max_age=2d'],
}
}
}
# It is a start that will get us user buckets and primary buckets # It is a start that will get us user buckets and primary buckets
$backup_projects = $location $backup_projects = $location
@ -97,6 +108,10 @@ class sunetdrive::script (
unless => "python3 -m pip list | grep drive-utils | grep ${drive_version}", unless => "python3 -m pip list | grep drive-utils | grep ${drive_version}",
require => Package['python3'], require => Package['python3'],
} }
file { '/opt/backups/scripts':
ensure => directory,
mode => '0700'
}
file { '/root/.ssh/': file { '/root/.ssh/':
ensure => directory, ensure => directory,
mode => '0700', mode => '0700',
@ -250,7 +265,7 @@ class sunetdrive::script (
if $customer in ['extern', 'gih', 'hkr', 'suni', 'common', 'su', 'lnu'] { if $customer in ['extern', 'gih', 'hkr', 'suni', 'common', 'su', 'lnu'] {
sunet::scriptherder::cronjob { 'makebuckets': sunet::scriptherder::cronjob { 'makebuckets':
ensure => absent, ensure => absent,
cmd => 'bin/true', cmd => 'bin/true',
} }
} else { } else {
sunet::scriptherder::cronjob { 'makebuckets': sunet::scriptherder::cronjob { 'makebuckets':
@ -335,6 +350,13 @@ class sunetdrive::script (
group => 'root', group => 'root',
mode => '0700', mode => '0700',
} }
file { '/opt/backups/scripts/hb.sh':
ensure => file,
content => template('sunetdrive/script/backup-hb.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
sunet::scriptherder::cronjob { 'backupmultinodedb': sunet::scriptherder::cronjob { 'backupmultinodedb':
cmd => '/root/tasks/backupmultinodedb.sh', cmd => '/root/tasks/backupmultinodedb.sh',
hour => '2', hour => '2',

View file

@ -45,6 +45,12 @@ class sunetdrive::sitemonitornaemon() {
content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'), content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'),
mode => '0644', mode => '0644',
} }
nagioscfg::service {'check_scriptherder':
hostgroup_name => ['nrpe'],
check_command => 'check_nrpe_1arg_to30!check_scriptherder',
description => 'Scriptherder Status',
contact_groups => ['naemon-admins'],
}
nagioscfg::service {'check_galera_cluster': nagioscfg::service {'check_galera_cluster':
hostgroup_name => ['galera_monitor'], hostgroup_name => ['galera_monitor'],
check_command => 'check_nrpe_1arg!check_galera_cluster', check_command => 'check_nrpe_1arg!check_galera_cluster',

View file

@ -68,7 +68,7 @@ $CONFIG = array (
'instanceid' => '<%= @instanceid %>', 'instanceid' => '<%= @instanceid %>',
'integrity.check.disabled' => true, 'integrity.check.disabled' => true,
'log_type' => 'file', 'log_type' => 'file',
'loglevel' => 0, 'loglevel' => 1,
'lookup_server' => '<%= @lookup_server %>', 'lookup_server' => '<%= @lookup_server %>',
'mail_domain' => '<%= @mail_domain %>', 'mail_domain' => '<%= @mail_domain %>',
'mail_from_address' => '<%= @mail_from_address %>', 'mail_from_address' => '<%= @mail_from_address %>',

View file

@ -13,7 +13,5 @@ services:
- 89.46.20.75 - 89.46.20.75
- 89.46.21.29 - 89.46.21.29
- 89.32.32.32 - 89.32.32.32
ports:
- 443:443
command: apachectl -D FOREGROUND command: apachectl -D FOREGROUND
tty: true tty: true

View file

@ -6,7 +6,9 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
number_of_full_to_keep='<%= @full_backup_retention %>' number_of_full_to_keep='<%= @full_backup_retention %>'
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
customer="<%= @customer %>" customer="<%= @customer %>"
environment="<%= @environment %>"
declare -A extra_backup_jobs declare -A extra_backup_jobs
#<% if @extra_backup_jobs.any? %> #<% if @extra_backup_jobs.any? %>
#<% @extra_backup_jobs.each do |client, job| %> #<% @extra_backup_jobs.each do |client, job| %>
extra_backup_jobs["<%= client %>"]="<%= job %>" extra_backup_jobs["<%= client %>"]="<%= job %>"
@ -15,7 +17,7 @@ extra_backup_jobs["<%= client %>"]="<%= job %>"
#<% if @location.start_with?('common') %> #<% if @location.start_with?('common') %>
declare -a sixmonths=('mau') declare -a sixmonths=('mau')
if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
number_of_full_to_keep=6 number_of_full_to_keep=6
fi fi
declare -a projects declare -a projects
#<% @singlenodes.each do |singlenode| %> #<% @singlenodes.each do |singlenode| %>
@ -25,9 +27,9 @@ projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= singl
#<% end %> #<% end %>
#<% end %> #<% end %>
#<% else %> #<% else %>
declare -a projects=("<%= @primary_project %> <%= @mirror_project %> <%= customer %>") declare -a projects=("<%= @primary_project %> <%= @mirror_project %> ${customer}")
#<% @assigned_projects.each do |project| %> #<% @assigned_projects.each do |project| %>
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= customer %>") projects+=("<%= project['project'] %> <%= project['mirror_project'] %> ${customer}")
#<% end %> #<% end %>
#<% end %> #<% end %>
@ -37,43 +39,45 @@ fi
function do_backup { function do_backup {
local project="${1}" local project="${1}"
local mirror="${2}" local mirror="${2}"
local bucket="${3}" local bucket="${3}"
local customer="${4}" local customer="${4}"
local mirrorbucket="${bucket}-mirror" local mirrorbucket="${bucket}-mirror"
local mountpoint="/opt/backupmounts/${bucket}" local mountpoint="/opt/backupmounts/${bucket}"
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
local oktorun=$? # 1 == this bucket has no other bakup process in progress local oktorun=$? # 1 == this bucket has no other bakup process in progress
mkdir -p ${mountpoint} if [[ ${oktorun} -ne 0 ]]; then
[[ ${oktorun} -ne 0 ]] && rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h mkdir -p ${mountpoint}
rclone mkdir ${mirror}:${mirrorbucket} rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
[[ ${oktorun} -ne 0 ]] && duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \ rclone mkdir ${mirror}:${mirrorbucket}
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket} && \ duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
[[ -n "${extra_backup_jobs[${customer}]}" ]] && [[ -f ${extra_backup_jobs[${customer} ]] && ${extra_backup_jobs[${customer}]} if [[ "${extra_backup_jobs[${customer}]:+found}" == "found" ]] && [[ -f "${extra_backup_jobs[${customer}]}" ]]; then
umount ${mountpoint} ${extra_backup_jobs[${customer}]} ${project} ${mirror} ${bucket} ${customer} ${environment}
rmdir ${mountpoint} fi
# Clean up umount ${mountpoint}
[ ${oktorun} -ne 0 ] && duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \ rmdir ${mountpoint}
--force rclone://${mirror}:/${mirrorbucket} # Clean up
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${mirrorbucket}
fi
} }
for entry in "${projects[@]}"; do for entry in "${projects[@]}"; do
project=$(echo ${entry} | awk '{print $1}') project=$(echo ${entry} | awk '{print $1}')
mirror=$(echo ${entry} | awk '{print $2}') mirror=$(echo ${entry} | awk '{print $2}')
customer=$(echo ${entry} | awk '{print $3}') customer=$(echo ${entry} | awk '{print $3}')
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
maybesize=$(timeout 30s rclone size --json ${project}:${bucket}) maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
if [[ ${?} -eq 124 ]]; then if [[ ${?} -eq 124 ]]; then
size=$((${fork_limit} * 1000000001)) size=$((${fork_limit} * 1000000001))
else else
size=$(echo ${maybesize} | jq -r '.bytes' ) size=$(echo ${maybesize} | jq -r '.bytes' )
fi fi
# If bucket is above 50 GB we fork # If bucket is above 50 GB we fork
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
do_backup ${project} ${mirror} ${bucket} ${customer} & do_backup ${project} ${mirror} ${bucket} ${customer} &
else else
do_backup ${project} ${mirror} ${bucket} ${customer} do_backup ${project} ${mirror} ${bucket} ${customer}
fi fi
done done
done done

View file

@ -0,0 +1,8 @@
#!/bin/bash
project="${1}"
mirror="${2}"
bucket="${3}"
customer="${4}"
environment="${5}"
rsync -e "ssh -i ${HOME}/.ssh/id_script" -avz "/opt/backupmounts/${bucket}" "sd-${environment}@sd-${environment}-backup.hb.se:~/sd-${environment}/${bucket}"

View file

@ -7,16 +7,16 @@ include_userbuckets="<%= @include_userbuckets %>"
container="nextcloud_app_1" container="nextcloud_app_1"
yq="/usr/local/bin/yq" yq="/usr/local/bin/yq"
if ! [[ -x ${yq} ]]; then if ! [[ -x ${yq} ]]; then
pip install yq pip install yq
fi fi
declare -a directories declare -a directories
if [[ -n ${1} ]]; then if [[ -n ${1} ]]; then
directories=("${@}") directories=("${@}")
else else
directories+=("Arbetsmaterial") directories+=("Arbetsmaterial")
directories+=("Bevarande") directories+=("Bevarande")
directories+=("Gallringsbart") directories+=("Gallringsbart")
fi fi
olddir="${PWD}" olddir="${PWD}"
@ -25,31 +25,31 @@ dirty=0
primary='' primary=''
declare -a users=( 'admin' ) declare -a users=( 'admin' )
cd "${tempdir}" || echo "Could not cd to tempdir" cd "${tempdir}" || echo "Could not cd to tempdir"
declare -a projects=( "${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml ) declare -a projects=( $("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml) )
if [[ "${include_userbuckets}" == "true" ]]; then if [[ "${include_userbuckets}" == "true" ]]; then
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml) primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
projects+=( "${primary}" ) projects+=( "${primary}" )
fi fi
for project in "${projects[@]}"; do for project in "${projects[@]}"; do
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do
count=$(rclone size --json "${project}:${bucket}" | jq -r .count) count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
if [[ ${count} -gt 0 ]]; then if [[ ${count} -gt 0 ]]; then
echo "Skipping ${project}:${bucket} because it has stuff in it already" echo "Skipping ${project}:${bucket} because it has stuff in it already"
continue continue
fi fi
for directory in "${directories[@]}"; do for directory in "${directories[@]}"; do
dirty=1 dirty=1
if [[ -n ${primary} ]] && [[ ${project} == "${primary}" ]] ; then if [[ -n ${primary} ]] && [[ ${project} == "${primary}" ]] ; then
user=$(echo "${bucket}" | awk -F '-' '{print $1}') user=$(echo "${bucket}" | awk -F '-' '{print $1}')
users+=( "${user}@${eppn_suffix}" ) users+=( "${user}@${eppn_suffix}" )
fi fi
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty" echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
temp="README.md" temp="README.md"
echo "**${directory}**" >"${temp}" echo "**${directory}**" >"${temp}"
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}" echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}" rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
done done
done done
done done
cd "${olddir}" || echo "could not cd to home dir" cd "${olddir}" || echo "could not cd to home dir"
rmdir "${tempdir}" rmdir "${tempdir}"

View file

@ -83,7 +83,11 @@ alias la='ls -A'
alias l='ls -CF' alias l='ls -CF'
if [[ $(hostname) =~ redis ]]; then if [[ $(hostname) =~ redis ]]; then
alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning" if [ -f /opt/redis/node-0/server.conf ]; then
alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
else
alias redis-cli="redis-cli -a $(grep requirepass /opt/redict/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
fi
fi fi
# Alias definitions. # Alias definitions.