diff --git a/templates/script/backup-all-buckets.erb.sh b/templates/script/backup-all-buckets.erb.sh index c15b605..96ccd29 100644 --- a/templates/script/backup-all-buckets.erb.sh +++ b/templates/script/backup-all-buckets.erb.sh @@ -15,7 +15,7 @@ extra_backup_jobs["<%= client %>"]="<%= job %>" #<% if @location.start_with?('common') %> declare -a sixmonths=('mau') if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then - number_of_full_to_keep=6 + number_of_full_to_keep=6 fi declare -a projects #<% @singlenodes.each do |singlenode| %> @@ -37,47 +37,45 @@ fi function do_backup { - local project="${1}" - local mirror="${2}" - local bucket="${3}" - local customer="${4}" - local mirrorbucket="${bucket}-mirror" - local mountpoint="/opt/backupmounts/${bucket}" - ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null - local oktorun=$? # 1 == this bucket has no other bakup process in progress - mkdir -p ${mountpoint} - if [[ ${oktorun} -ne 0 ]]; then + local project="${1}" + local mirror="${2}" + local bucket="${3}" + local customer="${4}" + local mirrorbucket="${bucket}-mirror" + local mountpoint="/opt/backupmounts/${bucket}" + ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null + local oktorun=$? # 1 == this bucket has no other bakup process in progress + if [[ ${oktorun} -ne 0 ]]; then + mkdir -p ${mountpoint} rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h rclone mkdir ${mirror}:${mirrorbucket} - duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \ - --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket} - if [[ -n "${extra_backup_jobs[${customer}]}" ]] && [[ -f ${extra_backup_jobs[${customer} ]]; then + duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket} + if [[ "${extra_backup_jobs[${customer}]:+none}" != "none" ]] && [[ -f "${extra_backup_jobs[${customer}]}" ]]; then ${extra_backup_jobs[${customer}]} fi umount ${mountpoint} rmdir ${mountpoint} - # Clean up - duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \ - --force rclone://${mirror}:/${mirrorbucket} + # Clean up + duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${mirrorbucket} fi } for entry in "${projects[@]}"; do - project=$(echo ${entry} | awk '{print $1}') - mirror=$(echo ${entry} | awk '{print $2}') - customer=$(echo ${entry} | awk '{print $3}') - for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do - maybesize=$(timeout 30s rclone size --json ${project}:${bucket}) + project=$(echo ${entry} | awk '{print $1}') + mirror=$(echo ${entry} | awk '{print $2}') + customer=$(echo ${entry} | awk '{print $3}') + for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do + maybesize=$(timeout 30s rclone size --json ${project}:${bucket}) if [[ ${?} -eq 124 ]]; then size=$((${fork_limit} * 1000000001)) else size=$(echo ${maybesize} | jq -r '.bytes' ) fi - # If bucket is above 50 GB we fork - if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then - do_backup ${project} ${mirror} ${bucket} ${customer} & - else - do_backup ${project} ${mirror} ${bucket} ${customer} - fi - done + # If bucket is above 50 GB we fork + if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then + do_backup ${project} ${mirror} ${bucket} ${customer} & + else + do_backup ${project} ${mirror} ${bucket} ${customer} + fi + done done