Compare commits
3 commits
6e5831367e
...
adeaea5a37
Author | SHA1 | Date | |
---|---|---|---|
Micke Nordin | adeaea5a37 | ||
Micke Nordin | 4a45670bec | ||
Micke Nordin | 3738e703b6 |
|
@ -250,7 +250,7 @@ class sunetdrive::script (
|
|||
if $customer in ['extern', 'gih', 'hkr', 'suni', 'common', 'su', 'lnu'] {
|
||||
sunet::scriptherder::cronjob { 'makebuckets':
|
||||
ensure => absent,
|
||||
cmd => 'bin/true',
|
||||
cmd => 'bin/true',
|
||||
}
|
||||
} else {
|
||||
sunet::scriptherder::cronjob { 'makebuckets':
|
||||
|
|
|
@ -15,7 +15,7 @@ extra_backup_jobs["<%= client %>"]="<%= job %>"
|
|||
#<% if @location.start_with?('common') %>
|
||||
declare -a sixmonths=('mau')
|
||||
if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
|
||||
number_of_full_to_keep=6
|
||||
number_of_full_to_keep=6
|
||||
fi
|
||||
declare -a projects
|
||||
#<% @singlenodes.each do |singlenode| %>
|
||||
|
@ -37,43 +37,45 @@ fi
|
|||
|
||||
|
||||
function do_backup {
|
||||
local project="${1}"
|
||||
local mirror="${2}"
|
||||
local bucket="${3}"
|
||||
local customer="${4}"
|
||||
local mirrorbucket="${bucket}-mirror"
|
||||
local mountpoint="/opt/backupmounts/${bucket}"
|
||||
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
|
||||
local oktorun=$? # 1 == this bucket has no other bakup process in progress
|
||||
mkdir -p ${mountpoint}
|
||||
[[ ${oktorun} -ne 0 ]] && rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
|
||||
rclone mkdir ${mirror}:${mirrorbucket}
|
||||
[[ ${oktorun} -ne 0 ]] && duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \
|
||||
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket} && \
|
||||
[[ -n "${extra_backup_jobs[${customer}]}" ]] && [[ -f ${extra_backup_jobs[${customer} ]] && ${extra_backup_jobs[${customer}]}
|
||||
umount ${mountpoint}
|
||||
rmdir ${mountpoint}
|
||||
# Clean up
|
||||
[ ${oktorun} -ne 0 ] && duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
|
||||
--force rclone://${mirror}:/${mirrorbucket}
|
||||
local project="${1}"
|
||||
local mirror="${2}"
|
||||
local bucket="${3}"
|
||||
local customer="${4}"
|
||||
local mirrorbucket="${bucket}-mirror"
|
||||
local mountpoint="/opt/backupmounts/${bucket}"
|
||||
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
|
||||
local oktorun=$? # 1 == this bucket has no other bakup process in progress
|
||||
if [[ ${oktorun} -ne 0 ]]; then
|
||||
mkdir -p ${mountpoint}
|
||||
rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
|
||||
rclone mkdir ${mirror}:${mirrorbucket}
|
||||
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
|
||||
if [[ "${extra_backup_jobs[${customer}]:+none}" != "none" ]] && [[ -f "${extra_backup_jobs[${customer}]}" ]]; then
|
||||
${extra_backup_jobs[${customer}]}
|
||||
fi
|
||||
umount ${mountpoint}
|
||||
rmdir ${mountpoint}
|
||||
# Clean up
|
||||
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${mirrorbucket}
|
||||
fi
|
||||
}
|
||||
|
||||
for entry in "${projects[@]}"; do
|
||||
project=$(echo ${entry} | awk '{print $1}')
|
||||
mirror=$(echo ${entry} | awk '{print $2}')
|
||||
customer=$(echo ${entry} | awk '{print $3}')
|
||||
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
|
||||
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
|
||||
project=$(echo ${entry} | awk '{print $1}')
|
||||
mirror=$(echo ${entry} | awk '{print $2}')
|
||||
customer=$(echo ${entry} | awk '{print $3}')
|
||||
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
|
||||
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
|
||||
if [[ ${?} -eq 124 ]]; then
|
||||
size=$((${fork_limit} * 1000000001))
|
||||
else
|
||||
size=$(echo ${maybesize} | jq -r '.bytes' )
|
||||
fi
|
||||
# If bucket is above 50 GB we fork
|
||||
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer} &
|
||||
else
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer}
|
||||
fi
|
||||
done
|
||||
# If bucket is above 50 GB we fork
|
||||
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer} &
|
||||
else
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer}
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
|
|
@ -7,16 +7,16 @@ include_userbuckets="<%= @include_userbuckets %>"
|
|||
container="nextcloud_app_1"
|
||||
yq="/usr/local/bin/yq"
|
||||
if ! [[ -x ${yq} ]]; then
|
||||
pip install yq
|
||||
pip install yq
|
||||
fi
|
||||
|
||||
declare -a directories
|
||||
if [[ -n ${1} ]]; then
|
||||
directories=("${@}")
|
||||
directories=("${@}")
|
||||
else
|
||||
directories+=("Arbetsmaterial")
|
||||
directories+=("Bevarande")
|
||||
directories+=("Gallringsbart")
|
||||
directories+=("Arbetsmaterial")
|
||||
directories+=("Bevarande")
|
||||
directories+=("Gallringsbart")
|
||||
fi
|
||||
|
||||
olddir="${PWD}"
|
||||
|
@ -25,31 +25,31 @@ dirty=0
|
|||
primary=''
|
||||
declare -a users=( 'admin' )
|
||||
cd "${tempdir}" || echo "Could not cd to tempdir"
|
||||
declare -a projects=( "${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml )
|
||||
declare -a projects=( $("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml) )
|
||||
if [[ "${include_userbuckets}" == "true" ]]; then
|
||||
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
|
||||
projects+=( "${primary}" )
|
||||
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
|
||||
projects+=( "${primary}" )
|
||||
fi
|
||||
for project in "${projects[@]}"; do
|
||||
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do
|
||||
count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
|
||||
if [[ ${count} -gt 0 ]]; then
|
||||
echo "Skipping ${project}:${bucket} because it has stuff in it already"
|
||||
continue
|
||||
fi
|
||||
for directory in "${directories[@]}"; do
|
||||
dirty=1
|
||||
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do
|
||||
count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
|
||||
if [[ ${count} -gt 0 ]]; then
|
||||
echo "Skipping ${project}:${bucket} because it has stuff in it already"
|
||||
continue
|
||||
fi
|
||||
for directory in "${directories[@]}"; do
|
||||
dirty=1
|
||||
if [[ -n ${primary} ]] && [[ ${project} == "${primary}" ]] ; then
|
||||
user=$(echo "${bucket}" | awk -F '-' '{print $1}')
|
||||
users+=( "${user}@${eppn_suffix}" )
|
||||
fi
|
||||
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
|
||||
temp="README.md"
|
||||
echo "**${directory}**" >"${temp}"
|
||||
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
|
||||
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
|
||||
done
|
||||
done
|
||||
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
|
||||
temp="README.md"
|
||||
echo "**${directory}**" >"${temp}"
|
||||
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
|
||||
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
|
||||
done
|
||||
done
|
||||
done
|
||||
cd "${olddir}" || echo "could not cd to home dir"
|
||||
rmdir "${tempdir}"
|
||||
|
|
Loading…
Reference in a new issue