2023-02-13 09:44:56 +00:00
|
|
|
#!/bin/bash
|
|
|
|
# Backup all buckets
|
|
|
|
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
|
|
|
|
# a specific host, but will differ between hosts
|
|
|
|
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
|
|
|
|
number_of_full_to_keep='<%= @full_backup_retention %>'
|
|
|
|
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
|
|
|
|
customer="<%= @customer %>"
|
2024-06-12 08:55:11 +00:00
|
|
|
declare -A extra_backup_jobs
|
|
|
|
#<% if @extra_backup_jobs.any? %>
|
|
|
|
#<% @extra_backup_jobs.each do |client, job| %>
|
|
|
|
extra_backup_jobs["<%= client %>"]="<%= job %>"
|
|
|
|
#<% end %>
|
|
|
|
#<% end %>
|
2023-02-13 09:44:56 +00:00
|
|
|
#<% if @location.start_with?('common') %>
|
|
|
|
declare -a sixmonths=('mau')
|
|
|
|
if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
|
|
|
|
number_of_full_to_keep=6
|
|
|
|
fi
|
|
|
|
declare -a projects
|
|
|
|
#<% @singlenodes.each do |singlenode| %>
|
2024-06-12 08:55:11 +00:00
|
|
|
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %> <%= singlenode %>")
|
2023-02-13 09:44:56 +00:00
|
|
|
#<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %>
|
2024-06-12 08:55:11 +00:00
|
|
|
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= singlenode %>")
|
2023-02-13 09:44:56 +00:00
|
|
|
#<% end %>
|
|
|
|
#<% end %>
|
|
|
|
#<% else %>
|
2024-06-12 08:55:11 +00:00
|
|
|
declare -a projects=("<%= @primary_project %> <%= @mirror_project %> <%= customer %>")
|
2023-02-13 09:44:56 +00:00
|
|
|
#<% @assigned_projects.each do |project| %>
|
2024-06-12 08:55:11 +00:00
|
|
|
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= customer %>")
|
2023-02-13 09:44:56 +00:00
|
|
|
#<% end %>
|
|
|
|
#<% end %>
|
|
|
|
|
|
|
|
if [[ ${customer} == 'common' ]]; then
|
2024-06-12 08:55:11 +00:00
|
|
|
projects+=("<%= @location %> <%= @location %>-mirror <%= customer %>")
|
2023-02-13 09:44:56 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
|
|
function do_backup {
|
|
|
|
local project="${1}"
|
|
|
|
local mirror="${2}"
|
|
|
|
local bucket="${3}"
|
2024-06-12 08:55:11 +00:00
|
|
|
local customer="${4}"
|
2023-02-13 09:44:56 +00:00
|
|
|
local mirrorbucket="${bucket}-mirror"
|
|
|
|
local mountpoint="/opt/backupmounts/${bucket}"
|
2023-10-12 06:59:17 +00:00
|
|
|
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
|
2023-10-12 07:21:57 +00:00
|
|
|
local oktorun=$? # 1 == this bucket has no other bakup process in progress
|
2023-02-13 09:44:56 +00:00
|
|
|
mkdir -p ${mountpoint}
|
2024-06-12 08:55:11 +00:00
|
|
|
[[ ${oktorun} -ne 0 ]] && rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
|
2023-02-13 09:44:56 +00:00
|
|
|
rclone mkdir ${mirror}:${mirrorbucket}
|
2024-06-12 08:55:11 +00:00
|
|
|
[[ ${oktorun} -ne 0 ]] && duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \
|
|
|
|
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket} && \
|
|
|
|
[[ -n "${extra_backup_jobs[${customer}]}" ]] && [[ -f ${extra_backup_jobs[${customer} ]] && ${extra_backup_jobs[${customer}]}
|
2023-02-13 09:44:56 +00:00
|
|
|
umount ${mountpoint}
|
|
|
|
rmdir ${mountpoint}
|
|
|
|
# Clean up
|
2023-10-12 06:59:17 +00:00
|
|
|
[ ${oktorun} -ne 0 ] && duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
|
2023-02-13 09:44:56 +00:00
|
|
|
--force rclone://${mirror}:/${mirrorbucket}
|
|
|
|
}
|
|
|
|
|
|
|
|
for entry in "${projects[@]}"; do
|
|
|
|
project=$(echo ${entry} | awk '{print $1}')
|
|
|
|
mirror=$(echo ${entry} | awk '{print $2}')
|
2024-06-12 08:55:11 +00:00
|
|
|
customer=$(echo ${entry} | awk '{print $3}')
|
2023-02-13 09:44:56 +00:00
|
|
|
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
|
|
|
|
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
|
|
|
|
if [[ ${?} -eq 124 ]]; then
|
|
|
|
size=$((${fork_limit} * 1000000001))
|
|
|
|
else
|
|
|
|
size=$(echo ${maybesize} | jq -r '.bytes' )
|
|
|
|
fi
|
|
|
|
# If bucket is above 50 GB we fork
|
|
|
|
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
|
2024-06-12 08:55:11 +00:00
|
|
|
do_backup ${project} ${mirror} ${bucket} ${customer} &
|
2023-02-13 09:44:56 +00:00
|
|
|
else
|
2024-06-12 08:55:11 +00:00
|
|
|
do_backup ${project} ${mirror} ${bucket} ${customer}
|
2023-02-13 09:44:56 +00:00
|
|
|
fi
|
|
|
|
done
|
|
|
|
done
|