Compare commits

..

3 commits

Author SHA1 Message Date
Micke Nordin adeaea5a37 Fix bug in script 2024-06-26 10:04:41 +02:00
Micke Nordin 4a45670bec Fix syntax error 2024-06-26 10:04:41 +02:00
Micke Nordin 3738e703b6 Clean up logic 2024-06-26 10:04:41 +02:00
3 changed files with 58 additions and 56 deletions

View file

@ -250,7 +250,7 @@ class sunetdrive::script (
if $customer in ['extern', 'gih', 'hkr', 'suni', 'common', 'su', 'lnu'] { if $customer in ['extern', 'gih', 'hkr', 'suni', 'common', 'su', 'lnu'] {
sunet::scriptherder::cronjob { 'makebuckets': sunet::scriptherder::cronjob { 'makebuckets':
ensure => absent, ensure => absent,
cmd => 'bin/true', cmd => 'bin/true',
} }
} else { } else {
sunet::scriptherder::cronjob { 'makebuckets': sunet::scriptherder::cronjob { 'makebuckets':

View file

@ -15,7 +15,7 @@ extra_backup_jobs["<%= client %>"]="<%= job %>"
#<% if @location.start_with?('common') %> #<% if @location.start_with?('common') %>
declare -a sixmonths=('mau') declare -a sixmonths=('mau')
if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
number_of_full_to_keep=6 number_of_full_to_keep=6
fi fi
declare -a projects declare -a projects
#<% @singlenodes.each do |singlenode| %> #<% @singlenodes.each do |singlenode| %>
@ -37,43 +37,45 @@ fi
function do_backup { function do_backup {
local project="${1}" local project="${1}"
local mirror="${2}" local mirror="${2}"
local bucket="${3}" local bucket="${3}"
local customer="${4}" local customer="${4}"
local mirrorbucket="${bucket}-mirror" local mirrorbucket="${bucket}-mirror"
local mountpoint="/opt/backupmounts/${bucket}" local mountpoint="/opt/backupmounts/${bucket}"
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
local oktorun=$? # 1 == this bucket has no other bakup process in progress local oktorun=$? # 1 == this bucket has no other bakup process in progress
mkdir -p ${mountpoint} if [[ ${oktorun} -ne 0 ]]; then
[[ ${oktorun} -ne 0 ]] && rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h mkdir -p ${mountpoint}
rclone mkdir ${mirror}:${mirrorbucket} rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
[[ ${oktorun} -ne 0 ]] && duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \ rclone mkdir ${mirror}:${mirrorbucket}
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket} && \ duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
[[ -n "${extra_backup_jobs[${customer}]}" ]] && [[ -f ${extra_backup_jobs[${customer} ]] && ${extra_backup_jobs[${customer}]} if [[ "${extra_backup_jobs[${customer}]:+none}" != "none" ]] && [[ -f "${extra_backup_jobs[${customer}]}" ]]; then
umount ${mountpoint} ${extra_backup_jobs[${customer}]}
rmdir ${mountpoint} fi
# Clean up umount ${mountpoint}
[ ${oktorun} -ne 0 ] && duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \ rmdir ${mountpoint}
--force rclone://${mirror}:/${mirrorbucket} # Clean up
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${mirrorbucket}
fi
} }
for entry in "${projects[@]}"; do for entry in "${projects[@]}"; do
project=$(echo ${entry} | awk '{print $1}') project=$(echo ${entry} | awk '{print $1}')
mirror=$(echo ${entry} | awk '{print $2}') mirror=$(echo ${entry} | awk '{print $2}')
customer=$(echo ${entry} | awk '{print $3}') customer=$(echo ${entry} | awk '{print $3}')
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
maybesize=$(timeout 30s rclone size --json ${project}:${bucket}) maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
if [[ ${?} -eq 124 ]]; then if [[ ${?} -eq 124 ]]; then
size=$((${fork_limit} * 1000000001)) size=$((${fork_limit} * 1000000001))
else else
size=$(echo ${maybesize} | jq -r '.bytes' ) size=$(echo ${maybesize} | jq -r '.bytes' )
fi fi
# If bucket is above 50 GB we fork # If bucket is above 50 GB we fork
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
do_backup ${project} ${mirror} ${bucket} ${customer} & do_backup ${project} ${mirror} ${bucket} ${customer} &
else else
do_backup ${project} ${mirror} ${bucket} ${customer} do_backup ${project} ${mirror} ${bucket} ${customer}
fi fi
done done
done done

View file

@ -7,16 +7,16 @@ include_userbuckets="<%= @include_userbuckets %>"
container="nextcloud_app_1" container="nextcloud_app_1"
yq="/usr/local/bin/yq" yq="/usr/local/bin/yq"
if ! [[ -x ${yq} ]]; then if ! [[ -x ${yq} ]]; then
pip install yq pip install yq
fi fi
declare -a directories declare -a directories
if [[ -n ${1} ]]; then if [[ -n ${1} ]]; then
directories=("${@}") directories=("${@}")
else else
directories+=("Arbetsmaterial") directories+=("Arbetsmaterial")
directories+=("Bevarande") directories+=("Bevarande")
directories+=("Gallringsbart") directories+=("Gallringsbart")
fi fi
olddir="${PWD}" olddir="${PWD}"
@ -25,31 +25,31 @@ dirty=0
primary='' primary=''
declare -a users=( 'admin' ) declare -a users=( 'admin' )
cd "${tempdir}" || echo "Could not cd to tempdir" cd "${tempdir}" || echo "Could not cd to tempdir"
declare -a projects=( "${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml ) declare -a projects=( $("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml) )
if [[ "${include_userbuckets}" == "true" ]]; then if [[ "${include_userbuckets}" == "true" ]]; then
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml) primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
projects+=( "${primary}" ) projects+=( "${primary}" )
fi fi
for project in "${projects[@]}"; do for project in "${projects[@]}"; do
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do
count=$(rclone size --json "${project}:${bucket}" | jq -r .count) count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
if [[ ${count} -gt 0 ]]; then if [[ ${count} -gt 0 ]]; then
echo "Skipping ${project}:${bucket} because it has stuff in it already" echo "Skipping ${project}:${bucket} because it has stuff in it already"
continue continue
fi fi
for directory in "${directories[@]}"; do for directory in "${directories[@]}"; do
dirty=1 dirty=1
if [[ -n ${primary} ]] && [[ ${project} == "${primary}" ]] ; then if [[ -n ${primary} ]] && [[ ${project} == "${primary}" ]] ; then
user=$(echo "${bucket}" | awk -F '-' '{print $1}') user=$(echo "${bucket}" | awk -F '-' '{print $1}')
users+=( "${user}@${eppn_suffix}" ) users+=( "${user}@${eppn_suffix}" )
fi fi
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty" echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
temp="README.md" temp="README.md"
echo "**${directory}**" >"${temp}" echo "**${directory}**" >"${temp}"
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}" echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}" rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
done done
done done
done done
cd "${olddir}" || echo "could not cd to home dir" cd "${olddir}" || echo "could not cd to home dir"
rmdir "${tempdir}" rmdir "${tempdir}"