Compare commits
No commits in common. "a836417db2e0eac5486cc05db0da369c42a4563d" and "5c46f5bc38b3c393ef8e0b0e6cfa88d49b29bc0c" have entirely different histories.
a836417db2
...
5c46f5bc38
|
@ -6,12 +6,6 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
|
|||
number_of_full_to_keep='<%= @full_backup_retention %>'
|
||||
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
|
||||
customer="<%= @customer %>"
|
||||
declare -A extra_backup_jobs
|
||||
#<% if @extra_backup_jobs.any? %>
|
||||
#<% @extra_backup_jobs.each do |client, job| %>
|
||||
extra_backup_jobs["<%= client %>"]="<%= job %>"
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
#<% if @location.start_with?('common') %>
|
||||
declare -a sixmonths=('mau')
|
||||
if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
|
||||
|
@ -19,20 +13,20 @@ if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
|
|||
fi
|
||||
declare -a projects
|
||||
#<% @singlenodes.each do |singlenode| %>
|
||||
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %> <%= singlenode %>")
|
||||
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %>")
|
||||
#<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %>
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= singlenode %>")
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
#<% else %>
|
||||
declare -a projects=("<%= @primary_project %> <%= @mirror_project %> <%= customer %>")
|
||||
declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
|
||||
#<% @assigned_projects.each do |project| %>
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= customer %>")
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
|
||||
if [[ ${customer} == 'common' ]]; then
|
||||
projects+=("<%= @location %> <%= @location %>-mirror ${customer}")
|
||||
projects+=("<%= @location %> <%= @location %>-mirror")
|
||||
fi
|
||||
|
||||
|
||||
|
@ -40,17 +34,15 @@ function do_backup {
|
|||
local project="${1}"
|
||||
local mirror="${2}"
|
||||
local bucket="${3}"
|
||||
local customer="${4}"
|
||||
local mirrorbucket="${bucket}-mirror"
|
||||
local mountpoint="/opt/backupmounts/${bucket}"
|
||||
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
|
||||
local oktorun=$? # 1 == this bucket has no other bakup process in progress
|
||||
mkdir -p ${mountpoint}
|
||||
[[ ${oktorun} -ne 0 ]] && rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
|
||||
[ ${oktorun} -ne 0 ] && rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
|
||||
rclone mkdir ${mirror}:${mirrorbucket}
|
||||
[[ ${oktorun} -ne 0 ]] && duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \
|
||||
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket} && \
|
||||
[[ -n "${extra_backup_jobs[${customer}]}" ]] && [[ -f ${extra_backup_jobs[${customer} ]] && ${extra_backup_jobs[${customer}]}
|
||||
[ ${oktorun} -ne 0 ] && duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \
|
||||
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
|
||||
umount ${mountpoint}
|
||||
rmdir ${mountpoint}
|
||||
# Clean up
|
||||
|
@ -61,7 +53,6 @@ function do_backup {
|
|||
for entry in "${projects[@]}"; do
|
||||
project=$(echo ${entry} | awk '{print $1}')
|
||||
mirror=$(echo ${entry} | awk '{print $2}')
|
||||
customer=$(echo ${entry} | awk '{print $3}')
|
||||
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
|
||||
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
|
||||
if [[ ${?} -eq 124 ]]; then
|
||||
|
@ -71,9 +62,9 @@ for entry in "${projects[@]}"; do
|
|||
fi
|
||||
# If bucket is above 50 GB we fork
|
||||
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer} &
|
||||
do_backup ${project} ${mirror} ${bucket} &
|
||||
else
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer}
|
||||
do_backup ${project} ${mirror} ${bucket}
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
|
|
@ -11,7 +11,7 @@ base_dir="${project}:${bucket}"
|
|||
stat_dir="/opt/statistics"
|
||||
customer_dir="${stat_dir}/${location}"
|
||||
mkdir -p "${customer_dir}"
|
||||
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}"
|
||||
rclone mkdir "${base_dir}/${location}"
|
||||
echo "${userjson}" | jq . >"${customer_dir}/users.json"
|
||||
status=${?}
|
||||
if [[ ${status} -eq 0 ]]; then
|
||||
|
|
Loading…
Reference in a new issue