Compare commits
574 commits
mandersson
...
main
Author | SHA1 | Date | |
---|---|---|---|
dcb0ca8e48 | |||
7090f7ca5e | |||
3007fc6de4 | |||
4a270b9f65 | |||
b2942339d1 | |||
919ff971b4 | |||
06d7fabf4b | |||
800b2c4afc | |||
1654186ddc | |||
2c974bd178 | |||
41e9045833 | |||
7e53313d94 | |||
ebe6f9640d | |||
74f96705f1 | |||
c7e337f3c4 | |||
2944823676 | |||
fc0a790bd1 | |||
a4a9e492f5 | |||
d760626e0b | |||
ec6cda87fb | |||
00c494193a | |||
3315e17346 | |||
f1c816cba1 | |||
331db3e24c | |||
a7d737c8c4 | |||
2a273c649d | |||
f2774b9c38 | |||
0ded3c46aa | |||
602fb12a7f | |||
fd0ed43035 | |||
1f30a23a5a | |||
def0a0a77b | |||
|
09deca0172 | ||
0e154b0e53 | |||
9ee8f6ea5d | |||
7a977b79a2 | |||
|
2625269aba | ||
19015d9061 | |||
9ac6531700 | |||
28a94fca51 | |||
20709f5dba | |||
38cd097f71 | |||
8027abaa82 | |||
1868c2a0b7 | |||
0a415b5129 | |||
250396dbb4 | |||
ab2d70303f | |||
2ba22cd30b | |||
ec95b0754f | |||
61daa44afa | |||
9c1e10ff4c | |||
0154a533ce | |||
ca8f0d5b89 | |||
63b780028f | |||
7e0f744e5d | |||
1d5d61b4a7 | |||
31f4c1eb9b | |||
b6c0bb2f46 | |||
9e6e33e137 | |||
977bd84f50 | |||
c7de56f73f | |||
2abf9ecf27 | |||
501ca8edfb | |||
fff058613a | |||
369492be90 | |||
856bc3b2c5 | |||
ebdf6f3b46 | |||
53e9a65b9f | |||
0400e89f36 | |||
571535f569 | |||
09ee93515c | |||
9f721c38e8 | |||
d4fae26777 | |||
c4f95af173 | |||
e0c4ddcd6e | |||
24a5abbefe | |||
65a236dcaa | |||
29aecdfe2a | |||
2c58c02864 | |||
c0c964282b | |||
8f7cd413ff | |||
536a4a11d3 | |||
2883693967 | |||
86a5d1d307 | |||
60c776088e | |||
af323bdfd9 | |||
c2db1c8671 | |||
d9b4ff9f34 | |||
a942487f30 | |||
67b72f387a | |||
7c1456702f | |||
d62724d63d | |||
5c89469f3a | |||
9ef9bab2cd | |||
32339a3fbf | |||
601ff83b9f | |||
67b46d3f75 | |||
11ab90e981 | |||
0f746f67ce | |||
b09c79cc3d | |||
4bc3a128ac | |||
e31f4c8c61 | |||
19473d6d82 | |||
4561e417c6 | |||
64d8ad253d | |||
b100e18eea | |||
80e1034695 | |||
ab039d217b | |||
75cbaed902 | |||
39285f075f | |||
ba792bd851 | |||
1c43f32440 | |||
7f39e96bf9 | |||
3e5a76a37d | |||
a4b4f4c013 | |||
c3ce7089ad | |||
6cff186121 | |||
8e2ba68b80 | |||
d648c987c7 | |||
49c3a69282 | |||
9a58486d11 | |||
596f05f099 | |||
849bd7f846 | |||
c3fbf4c1ce | |||
62d172b2f5 | |||
2c61a00ebd | |||
a98fe62a43 | |||
8e5e7fe34f | |||
60fc3ef307 | |||
43081e0955 | |||
99f768708a | |||
30952f7448 | |||
0e83c47dd1 | |||
b769f1637a | |||
25d97470da | |||
ce745ff6b0 | |||
1aae63ba07 | |||
5913537c34 | |||
932d91364e | |||
812a56cf99 | |||
d5e88dd431 | |||
f27a9c353c | |||
bff0aff823 | |||
a8060b7774 | |||
dac082f4f5 | |||
48f8f6d690 | |||
deabba2e22 | |||
22bfeaf244 | |||
d6b4098932 | |||
42363a9aaf | |||
4dd95bdb39 | |||
714e2d98f6 | |||
64e3746628 | |||
a563e16309 | |||
124203cd92 | |||
6365549250 | |||
9a367b0955 | |||
a5580567af | |||
a69ae092d1 | |||
df6557a91b | |||
4dcdf87f32 | |||
2258108232 | |||
38825adce8 | |||
3baf31c7ed | |||
221c89564a | |||
6d3bca981d | |||
2e171b1d04 | |||
22785da3e0 | |||
67d9e825a5 | |||
b6bd77a52b | |||
38c56a07cd | |||
fa5a34aeba | |||
76700f595d | |||
bb8f2d8b67 | |||
2f47e6e109 | |||
a2ee732d5d | |||
453e72a79d | |||
662d9592fa | |||
1a223f2846 | |||
25309ea51f | |||
72d71a69c7 | |||
28b6584a6c | |||
d597ec106a | |||
d4eb4af079 | |||
4e45dd1733 | |||
2c7acd1ec6 | |||
992f74d3a1 | |||
2f682f8bd8 | |||
9ee7f6f7b8 | |||
bf3ed2e92f | |||
6d2d863076 | |||
8391219068 | |||
f36659353d | |||
2b9e199899 | |||
71bca06a08 | |||
fb58999b22 | |||
91b9955db2 | |||
1d3970c134 | |||
1086053d50 | |||
57b2ca6316 | |||
d3d0807d43 | |||
cb13190197 | |||
671b5f95df | |||
dc8ff6a712 | |||
124c2f6c5d | |||
142fc78f6b | |||
4b4d0626cf | |||
e3a729e1a8 | |||
8cae1a668a | |||
7177dfa12e | |||
76640002c0 | |||
b8a16bd700 | |||
90372ddd01 | |||
5487bb9d9c | |||
d27990988a | |||
12d07a5ab0 | |||
3eb87725af | |||
5c228af37a | |||
a507c0f43f | |||
e8e8a9ef7d | |||
6a5e2aed62 | |||
374afd5e5a | |||
eecb8a9e34 | |||
1e3d79542c | |||
37d8165cc5 | |||
73d96f7f6d | |||
36131d0b21 | |||
9a419cd214 | |||
86fea06b02 | |||
b6bfa49bd7 | |||
16b645b845 | |||
92cd7dc19f | |||
5dc6a9e574 | |||
959e0e6228 | |||
e3a511521b | |||
c28c6ac91b | |||
4fee7b1042 | |||
cb2a62349d | |||
06c4f4ec68 | |||
da0e74313d | |||
aea23bbf86 | |||
3abec90157 | |||
57958e6475 | |||
4504de3160 | |||
9de3ab2c60 | |||
e8c5a9e8ac | |||
4ec58c3b61 | |||
fdb57e1b99 | |||
9fb353de79 | |||
1cc0e86664 | |||
ce6774487a | |||
2bed7d8914 | |||
7222cf5703 | |||
a64b2533bb | |||
6865d6833f | |||
958ff7daa9 | |||
c649a7a6b4 | |||
ac9e29d39e | |||
56f8a5d149 | |||
cb35c3bbf7 | |||
b280257882 | |||
d3cb9b9e14 | |||
39384f7e40 | |||
6969510b5d | |||
170d1cfc77 | |||
7be06a1215 | |||
bff3c3620b | |||
880e0401cc | |||
04061b41c1 | |||
5f425cf2bf | |||
7f7743b538 | |||
8510d4e958 | |||
7dad764539 | |||
cebb4929a9 | |||
f95fc106c9 | |||
64e5e61879 | |||
9a37885da6 | |||
ed08626dc8 | |||
f0cb47dd97 | |||
29acc076b3 | |||
d57135d906 | |||
1d6e08b852 | |||
978d073bde | |||
8e2f1d032d | |||
66093b13b8 | |||
1438c94ca5 | |||
5c3fa64086 | |||
2fb9e754cc | |||
f03e6241bf | |||
3c7fe38458 | |||
6853993a34 | |||
1ab9fbc14e | |||
d680ed8597 | |||
96734c8736 | |||
f948bfb4ae | |||
b7a17f7520 | |||
ecc158b48e | |||
6445c0434e | |||
330f167a2c | |||
a0df922493 | |||
9648aa184c | |||
1cede4b87c | |||
438e2c24e0 | |||
46d1db97bc | |||
ad0b7abff9 | |||
88c8e159bc | |||
3818772b87 | |||
f79d7acb02 | |||
1eaa62a150 | |||
a4d0294c10 | |||
b24c8ec683 | |||
90569f54d3 | |||
2aba03fe41 | |||
52f563692d | |||
f2abb855ba | |||
4b3e9c0da1 | |||
8e982592c2 | |||
2b543373ed | |||
a089d02cc4 | |||
a6d680aaed | |||
af2385dffe | |||
6aa7238ee2 | |||
f06aa65ec5 | |||
f973b83d1b | |||
a8f2bb9bb6 | |||
751ad087e6 | |||
f886d53f3c | |||
a51cc9fdc4 | |||
a329a46491 | |||
c9e7d8eb2c | |||
ac85bddc8a | |||
49f00af783 | |||
d471c7f271 | |||
9b2490e415 | |||
58af85571b | |||
6664271b49 | |||
dc16a41e0f | |||
b1ae0c0465 | |||
862f94b4eb | |||
1c9365e121 | |||
1ff784e5e4 | |||
3304d408a5 | |||
38447ceb37 | |||
e0f6d3bafa | |||
f531a60514 | |||
8646f1eda7 | |||
f5f97e1c3a | |||
81341be6e7 | |||
51ddfa985c | |||
158f982aed | |||
a7fe6cecc7 | |||
45ac19bec8 | |||
a74899332e | |||
d63c586998 | |||
beb69036a1 | |||
74af996a76 | |||
e76c22f6d5 | |||
b065ac7ae6 | |||
019a3d9792 | |||
6224fe3e96 | |||
cc2a9e9ebf | |||
daa647beb9 | |||
1ecf6c8128 | |||
4111ac7500 | |||
8b0d90e4eb | |||
e3a66f95df | |||
b096522c36 | |||
d7e7e260c2 | |||
52869ca089 | |||
1a39e0dbec | |||
e0c06d4571 | |||
f29e074ded | |||
c761c56052 | |||
026c5c5d03 | |||
7460019ec5 | |||
5c0e05394d | |||
f2b8406826 | |||
6d78fdde7c | |||
15beb333ec | |||
8ca506f608 | |||
00d4c96d28 | |||
3e98d5f477 | |||
bcbcb6e469 | |||
4b13d54f23 | |||
60d37feb61 | |||
301d451c3a | |||
bbee5643d6 | |||
01b3c4cf56 | |||
d3e0f41ff1 | |||
8b74bd9c75 | |||
5c16a783e9 | |||
2b590b3f01 | |||
aade6fb2c7 | |||
f2c31c79cd | |||
b79eb5d4bf | |||
606b14db17 | |||
b4e496bf58 | |||
e4093f5e40 | |||
b573820906 | |||
141f9334f2 | |||
07ddb2220a | |||
722a8bdb68 | |||
d832f2a274 | |||
0bf3ebfc53 | |||
ccdb53310d | |||
07d54eba92 | |||
62bb2da068 | |||
f5297850d9 | |||
652f2fbdf7 | |||
66a81768c8 | |||
260fd526fe | |||
ac917b9811 | |||
4ec608adf2 | |||
b66e072cf7 | |||
e3697719d2 | |||
766c337cb2 | |||
24c2fcbdfc | |||
b7e94c2259 | |||
cf0a778781 | |||
15a752a65d | |||
cd1a4ba19c | |||
2c216513d3 | |||
8ba567e385 | |||
911edbed58 | |||
b4b099b237 | |||
f8dce117a8 | |||
1a63b4884f | |||
eeca6dfbe9 | |||
66bd2e9bab | |||
473f702943 | |||
d135f415d0 | |||
dbac473c26 | |||
7673aca60e | |||
fc2079e2fa | |||
f339aa2865 | |||
705d3c8732 | |||
1822ecd683 | |||
74768985cc | |||
b0ded4d0ef | |||
98a02615cc | |||
d0260d7c05 | |||
dd022213a6 | |||
e20e41edf8 | |||
663021dc61 | |||
59e137e918 | |||
d7c724570f | |||
ffc5170fc0 | |||
ea09557c1a | |||
9364391ad5 | |||
6a5000a557 | |||
e4ccdde169 | |||
589f62fc0a | |||
6827c0af7c | |||
bff8d8ab22 | |||
2152b5b4a9 | |||
75c3a77c6d | |||
eac5b172f2 | |||
05bde6adb6 | |||
d8eaa122e3 | |||
7402202b9a | |||
60cb53e631 | |||
c134f0771f | |||
8acd0c40b8 | |||
2c13b2f51d | |||
04a3c96250 | |||
3651afd778 | |||
728b152f85 | |||
c8051b065a | |||
41dd736b20 | |||
08be347419 | |||
9285c155db | |||
76fe97382d | |||
12ca587858 | |||
cdb1157172 | |||
39b571f3d1 | |||
d30dabc534 | |||
d353069660 | |||
d696c19242 | |||
eef539928e | |||
4cf8cf3adb | |||
7f7d38c87e | |||
adeaea5a37 | |||
4a45670bec | |||
3738e703b6 | |||
bac25d27c8 | |||
42831948e1 | |||
db92a5d255 | |||
6e5831367e | |||
1dea001ed9 | |||
765e0b41cb | |||
4d6e6f67af | |||
f21a489e46 | |||
b20c4c7397 | |||
16496dfded | |||
a836417db2 | |||
a76397aafd | |||
ea51f0b56b | |||
6c8a4f430b | |||
097fce5f87 | |||
5c46f5bc38 | |||
bd1519430f | |||
5f47ce7fca | |||
45321052de | |||
717350780f | |||
a03d4437e2 | |||
35362991d0 | |||
a4d0afe9fd | |||
c90a6ac9a1 | |||
3ad7d95161 | |||
450f31e2d5 | |||
840d2000a6 | |||
00a2f1f425 | |||
b3d9873176 | |||
bf5abc073e | |||
aa2a869203 | |||
5b7c702573 | |||
cbf4086993 | |||
5f74eaaab0 | |||
05814cfd24 | |||
4376aad6e2 | |||
c485935f30 | |||
6299dad4fd | |||
718bc0b496 | |||
472977fde4 | |||
eba9546cdc | |||
a16c9d68e8 | |||
2f548f2431 | |||
47bf118116 | |||
72cf4e570c | |||
51a695ed9e | |||
02454ebd39 | |||
cb16f03710 | |||
5ba11977b0 | |||
27d0bc15cc | |||
01fc5a9e86 | |||
bd074c73d5 | |||
d33f80273d | |||
4a8a4d642c | |||
df4fe9985b | |||
d13b22e3a2 | |||
36f163414f | |||
77ad3ac26d | |||
069eb7ce93 | |||
f626156c4c | |||
314149c7f3 | |||
181f2e52fe | |||
49386124c3 | |||
3fce5c6fab | |||
0837f82d87 | |||
922de25fd8 | |||
7f19e0de62 | |||
49243acaa9 | |||
f10864ff91 | |||
668c1389d5 | |||
0d505d2482 | |||
8aa9c14d35 | |||
|
c425f90b0f | ||
|
3b82d5b0e3 | ||
|
31b1bcb92a | ||
|
a150d23f1a | ||
8cf9f0eafa | |||
c83e8b2982 | |||
2f629c289c | |||
ec679013e3 | |||
2ba33ad5d6 | |||
689c625b90 | |||
440bcec0d2 | |||
3f1ea1b694 | |||
3899dab9b3 | |||
af873e7133 | |||
65a6b1787d | |||
4e496bcb13 | |||
a5f5ddb293 | |||
333d712548 |
66 changed files with 901 additions and 490 deletions
|
@ -1,5 +1,9 @@
|
|||
#!/bin/bash
|
||||
|
||||
if ! [[ $(hostname) =~ monitor ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
repo="/var/cache/cosmos/repo"
|
||||
common="${repo}/global/overlay/etc/hiera/data/common.yaml"
|
||||
|
||||
|
@ -21,7 +25,7 @@ for environment in test prod; do
|
|||
version=$(yq -r ".${key}" "${group}")
|
||||
print_fact "${customer}" "${environment}" "${version}"
|
||||
done
|
||||
for customer in $(yq -r '.fullnodes[]' "${common}") gss; do
|
||||
for customer in $(yq -r '.fullnodes[]' "${common}"); do
|
||||
group="${repo}/${customer}-common/overlay/etc/hiera/data/group.yaml"
|
||||
version=$(yq -r ".${key}" "${group}")
|
||||
print_fact "${customer}" "${environment}" "${version}"
|
||||
|
|
|
@ -1,14 +1,8 @@
|
|||
# Lets determin who the customer is by looking at the hostname
|
||||
function sunetdrive::get_customer() >> String {
|
||||
$hostnameparts = split($facts['fqdn'],'\.')
|
||||
$hostnameparts = split($facts['networking']['fqdn'],'\.')
|
||||
if $hostnameparts[1] == 'drive' {
|
||||
if $hostnameparts[0] =~ /^gss/ {
|
||||
return 'gss'
|
||||
} elsif $hostnameparts[0] =~ /^lookup/ {
|
||||
return 'lookup'
|
||||
} else {
|
||||
return 'common'
|
||||
}
|
||||
return 'common'
|
||||
} elsif $hostnameparts[0] =~ /idp-proxy/ {
|
||||
return 'common'
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Lets determin where we are by looking at the hostname
|
||||
function sunetdrive::get_environment() >> String {
|
||||
$hostname = $facts['fqdn']
|
||||
$hostname = $facts['networking']['fqdn']
|
||||
if $hostname =~ /^.*\.drive\.sunet\.se$/ {
|
||||
if $hostname =~ /^.*\.pilot\.drive\.sunet\.se$/ {
|
||||
return 'pilot'
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Lets determin where we are by looking at the hostname
|
||||
function sunetdrive::get_node_number() >> Integer {
|
||||
Integer(regsubst($::fqdn, /^[a-zA-Z\-]+(\d).*$/, '\\1'))
|
||||
Integer(regsubst($facts['networking']['fqdn'], /^[a-zA-Z\-]+(\d).*$/, '\\1'))
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ define sunetdrive::app_type (
|
|||
$override_config = undef,
|
||||
$override_compose = undef
|
||||
) {
|
||||
include sunet::packages::netcat_openbsd
|
||||
# Config from group.yaml and customer specific conf
|
||||
$environment = sunetdrive::get_environment()
|
||||
$customer = sunetdrive::get_customer()
|
||||
|
@ -14,7 +15,6 @@ define sunetdrive::app_type (
|
|||
# The config used
|
||||
$config = $override_config
|
||||
# Other settings
|
||||
$admin_password = $config[ 'admin_password' ]
|
||||
$dbhost = $config[ 'dbhost' ]
|
||||
$dbname = $config[ 'dbname' ]
|
||||
$dbuser = $config[ 'dbuser' ]
|
||||
|
@ -30,10 +30,10 @@ define sunetdrive::app_type (
|
|||
$config = hiera_hash($environment)
|
||||
$skeletondirectory = $config['skeletondirectory']
|
||||
# Other settings
|
||||
$admin_password = safe_hiera('admin_password')
|
||||
$dbhost = 'proxysql_proxysql_1'
|
||||
$dbname = 'nextcloud'
|
||||
$dbuser = 'nextcloud'
|
||||
$full_backup_retention = hiera('full_backup_retention')
|
||||
$instanceid = safe_hiera('instanceid')
|
||||
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||
$passwordsalt = safe_hiera('passwordsalt')
|
||||
|
@ -43,8 +43,8 @@ define sunetdrive::app_type (
|
|||
$s3_secret = safe_hiera('s3_secret')
|
||||
$secret = safe_hiera('secret')
|
||||
}
|
||||
$twofactor_enforced_groups = hiera_array('twofactor_enforced_groups')
|
||||
$twofactor_enforced_excluded_groups = hiera_array('twofactor_enforced_excluded_groups')
|
||||
$twofactor_enforced_groups = $config['twofactor_enforced_groups']
|
||||
$twofactor_enforced_excluded_groups = $config['twofactor_enforced_excluded_groups']
|
||||
$nextcloud_version = hiera("nextcloud_version_${environment}")
|
||||
$nextcloud_version_string = split($nextcloud_version, '[-]')[0]
|
||||
# Common settings for multinode and full nodes
|
||||
|
@ -61,20 +61,16 @@ define sunetdrive::app_type (
|
|||
$php_memory_limit_mb = 512
|
||||
}
|
||||
if $::facts['dockerhost2'] == 'yes' {
|
||||
$hostnet = 'yes'
|
||||
$hostnet = true
|
||||
}
|
||||
|
||||
# These are encrypted values from local.eyaml
|
||||
|
||||
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||
$smtppassword = safe_hiera('smtp_password')
|
||||
|
||||
#These are global values from common.yaml
|
||||
$gs_enabled = hiera('gs_enabled')
|
||||
$gs_federation = hiera('gs_federation')
|
||||
$gss_master_admin = hiera_array('gss_master_admin')
|
||||
$gss_master_url = hiera("gss_master_url_${environment}")
|
||||
$lookup_server = hiera("lookup_server_${environment}")
|
||||
$mail_domain = hiera("mail_domain_${environment}")
|
||||
$mail_smtphost = hiera("mail_smtphost_${environment}")
|
||||
$mail_from_address = hiera("mail_from_address_${environment}")
|
||||
|
@ -82,9 +78,6 @@ define sunetdrive::app_type (
|
|||
$smtpuser = hiera("smtp_user_${environment}")
|
||||
$tug_office = hiera_array('tug_office')
|
||||
|
||||
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
|
||||
$gss_mode = hiera('gss_mode')
|
||||
|
||||
# These are global values from common.yaml but can be overridden in group.yaml
|
||||
$drive_email_template_text_left = $config['drive_email_template_text_left']
|
||||
$drive_email_template_plain_text_left = $config['drive_email_template_plain_text_left']
|
||||
|
@ -92,9 +85,39 @@ define sunetdrive::app_type (
|
|||
$lb_servers = hiera_hash($environment)['lb_servers']
|
||||
$document_servers = hiera_hash($environment)['document_servers']
|
||||
|
||||
# Calculate some values
|
||||
$expiration_months = max(12, $full_backup_retention)
|
||||
$expiration_days_min = $expiration_months * 31
|
||||
$expiration_days_max = $expiration_months * 31 + 93
|
||||
|
||||
unless $is_multinode{
|
||||
user { 'www-data': ensure => present, system => true }
|
||||
|
||||
file { '/usr/local/bin/get_containers':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/get_containers'),
|
||||
mode => '0744',
|
||||
}
|
||||
if ($nodenumber == 3) {
|
||||
file { '/usr/local/bin/scan_external_mounts':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/scan_external_mounts.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
|
||||
cmd => '/usr/local/bin/scan_external_mounts',
|
||||
hour => '1',
|
||||
minute => '20',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
}
|
||||
file { '/opt/nextcloud/cron.sh':
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
|
@ -108,16 +131,22 @@ define sunetdrive::app_type (
|
|||
minute => '*/5',
|
||||
}
|
||||
file { '/opt/nextcloud/user-sync.sh':
|
||||
ensure => file,
|
||||
ensure => absent,
|
||||
}
|
||||
file { '/usr/local/bin/nocc':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
content => template('sunetdrive/application/user-sync.erb.sh'),
|
||||
content => template('sunetdrive/application/nocc.erb'),
|
||||
mode => '0740',
|
||||
}
|
||||
-> cron { 'gss_user_sync':
|
||||
command => '/opt/nextcloud/user-sync.sh',
|
||||
user => 'root',
|
||||
minute => '*/5',
|
||||
file { '/etc/sudoers.d/99-nocc':
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/usr/local/bin/occ':
|
||||
ensure => present,
|
||||
|
@ -142,12 +171,21 @@ define sunetdrive::app_type (
|
|||
content => template('sunetdrive/application/upgrade23-25.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/remount_user_bucket_as_project.sh':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/remount_user_bucket_as_project.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/opt/rotate/conf.d/nextcloud.conf':
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => "#This file is managed by puppet\n#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n",
|
||||
content => "#This file is managed by puppet
|
||||
#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n/opt/nextcloud/audit.log:180:256\n",
|
||||
mode => '0644',
|
||||
}
|
||||
file { '/opt/rotate/conf.d/redis.conf':
|
||||
|
@ -196,7 +234,14 @@ define sunetdrive::app_type (
|
|||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
mode => '0640',
|
||||
}
|
||||
file { '/opt/nextcloud/audit.log':
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
}
|
||||
file { '/opt/nextcloud/rclone.conf':
|
||||
ensure => file,
|
||||
|
@ -293,23 +338,7 @@ define sunetdrive::app_type (
|
|||
mode => '0744',
|
||||
}
|
||||
}
|
||||
if $location =~ /^gss-test/ {
|
||||
file { '/opt/nextcloud/mappingfile.json':
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/mappingfile-test.json.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
} elsif $location =~ /^gss/ {
|
||||
file { '/opt/nextcloud/mappingfile.json':
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/mappingfile-prod.json.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
} elsif $location =~ /^kau/ {
|
||||
if $location =~ /^kau/ {
|
||||
file { '/mnt':
|
||||
ensure => directory,
|
||||
owner => 'www-data',
|
||||
|
|
|
@ -38,8 +38,8 @@ define sunetdrive::cloudimage(
|
|||
resolver => ['130.242.80.14', '130.242.80.99'],
|
||||
search => $search,
|
||||
#
|
||||
repo => $::cosmos_repo_origin_url,
|
||||
tagpattern => $::cosmos_tag_pattern,
|
||||
repo => $facts['cosmos_repo_origin_url'],
|
||||
tagpattern => $facts['cosmos_tag_pattern'],
|
||||
#
|
||||
cpus => $cpus,
|
||||
memory => $memory,
|
||||
|
|
53
manifests/common.pp
Normal file
53
manifests/common.pp
Normal file
|
@ -0,0 +1,53 @@
|
|||
|
||||
# Common class
|
||||
class sunetdrive::common {
|
||||
include sunet::tools
|
||||
include sunet::motd
|
||||
include apt
|
||||
include apparmor
|
||||
include sunet::packages::jq
|
||||
if $::facts['sunet_nftables_enabled'] != 'yes' {
|
||||
warning('Enabling UFW')
|
||||
include ufw
|
||||
} else {
|
||||
if $facts['networking']['hostname'] =~ /^kube[wc]/ {
|
||||
warning('Setting nftables to installed but disabled')
|
||||
ensure_resource ('class','sunet::nftables::init', { enabled => false })
|
||||
} else {
|
||||
warning('Enabling nftables')
|
||||
ensure_resource ('class','sunet::nftables::init', { })
|
||||
}
|
||||
}
|
||||
package {'sysstat': ensure => 'latest'}
|
||||
package {'needrestart': ensure => installed}
|
||||
service {'sysstat': provider => 'systemd'}
|
||||
file_line { 'enable_sa':
|
||||
ensure => 'present',
|
||||
line => 'ENABLED="true"',
|
||||
path => '/etc/default/sysstat',
|
||||
match => 'ENABLED="false"',
|
||||
require => Package['sysstat'],
|
||||
}
|
||||
file_line { 'sa_cron_comment':
|
||||
ensure => 'present',
|
||||
line => '# Activity reports every 2 minutes everyday',
|
||||
path => '/etc/cron.d/sysstat',
|
||||
match => '^#\ Activity\ reports\ every\ 10\ minutes\ everyday',
|
||||
require => Package['sysstat'],
|
||||
notify => Service['sysstat'],
|
||||
}
|
||||
file_line { 'sa_cron':
|
||||
ensure => 'present',
|
||||
line => '*/2 * * * * root command -v debian-sa1 > /dev/null && debian-sa1 1 1',
|
||||
path => '/etc/cron.d/sysstat',
|
||||
match => '^5-55/10',
|
||||
require => Package['sysstat'],
|
||||
notify => Service['sysstat'],
|
||||
}
|
||||
if $::facts['os']['distro']['id'] == 'Debian' {
|
||||
exec { 'sysstat_systemd_timer':
|
||||
command => 'systemctl enable --now sysstat-collect.timer',
|
||||
unless => 'systemctl is-enabled --quiet sysstat-collect.timer',
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,7 +18,7 @@ define sunetdrive::db_type(
|
|||
$mariadb_dir = '/etc/mariadb'
|
||||
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
|
||||
$server_id = 1000 + Integer($facts['networking']['hostname'][-1])
|
||||
ensure_resource('file',$mariadb_dir, { ensure => directory, recurse => true } )
|
||||
ensure_resource('file',$mariadb_dir, { ensure => directory } )
|
||||
$dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
|
||||
$dirs.each |$dir| {
|
||||
ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
|
||||
|
@ -33,7 +33,7 @@ define sunetdrive::db_type(
|
|||
$ports = [3306, 4444, 4567, 4568]
|
||||
if $location =~ /^multinode/ {
|
||||
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['kube'] + $config['kube_v6']
|
||||
} elsif $location == 'sunet-test' {
|
||||
} elsif $location == 'sunet-test' or $location == 'sunet-prod' {
|
||||
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6']
|
||||
} else {
|
||||
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6
|
||||
|
@ -46,11 +46,7 @@ define sunetdrive::db_type(
|
|||
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
|
||||
|
||||
|
||||
if $location =~ /^lookup/ {
|
||||
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '05-lookup.sql']
|
||||
} else {
|
||||
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql']
|
||||
}
|
||||
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql']
|
||||
$sql_files.each |$sql_file|{
|
||||
file { "${mariadb_dir}/init/${sql_file}":
|
||||
ensure => present,
|
||||
|
@ -90,6 +86,11 @@ define sunetdrive::db_type(
|
|||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
file { '/usr/local/bin/mysql':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/mysql.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/size-test':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/size-test.erb'),
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
include apt
|
||||
# Wrapper for sunet::dockerhost to do thiss specific things
|
||||
class sunetdrive::dockerhost(
|
||||
String $version = safe_hiera('docker_version'),
|
||||
|
|
|
@ -4,10 +4,8 @@ class sunetdrive::infra_script (
|
|||
$location = undef
|
||||
) {
|
||||
$environment = sunetdrive::get_environment()
|
||||
$customer = "common"
|
||||
$customer = 'common'
|
||||
$config = hiera_hash($environment)
|
||||
$gss_backup_server = $config['gss_backup_server']
|
||||
$lookup_backup_server = $config['lookup_backup_server']
|
||||
$ssh_config = "Host *.sunet.se
|
||||
User script
|
||||
IdentityFile /root/.ssh/id_script"
|
||||
|
@ -59,14 +57,16 @@ class sunetdrive::infra_script (
|
|||
mode => '0700',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backupgssdb':
|
||||
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
|
||||
ensure => 'absent',
|
||||
cmd => 'true',
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backuplookupdb':
|
||||
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
|
||||
ensure => 'absent',
|
||||
cmd => 'true',
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
#Class for SUNET-Drive-Lookup-Server
|
||||
class sunetdrive::lookup (
|
||||
$bootstrap = undef,
|
||||
$location = undef
|
||||
) {
|
||||
|
||||
$environment = sunetdrive::get_environment()
|
||||
|
||||
|
||||
# Firewall settings
|
||||
$nextcloud_ip = hiera_array("${location}_app", [])
|
||||
$tug_office = hiera_array('tug_office')
|
||||
|
||||
$dbhost = 'proxysql_proxysql_1'
|
||||
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||
$lookup_version = hiera("lookup_version_${environment}")
|
||||
|
||||
#Create users
|
||||
user { 'www-data': ensure => present, system => true }
|
||||
|
||||
file { '/opt/lookup/config.php':
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/lookup/config.php.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
sunet::docker_compose { 'drive_lookup_docker_compose':
|
||||
content => template('sunetdrive/lookup/docker-compose_lookup.yml.erb'),
|
||||
service_name => 'lookup',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Lookup server',
|
||||
}
|
||||
|
||||
sunet::misc::ufw_allow { 'https':
|
||||
from => '0.0.0.0/0',
|
||||
port => 443,
|
||||
}
|
||||
}
|
|
@ -10,9 +10,9 @@ class sunetdrive::mariadb (
|
|||
$quorum_id = $facts['networking']['fqdn']
|
||||
$quorum_password = safe_hiera('quorum_password')
|
||||
$db = sunetdrive::db_type { 'base_db':
|
||||
bootstrap => $bootstrap,
|
||||
tag_mariadb => $tag_mariadb,
|
||||
location => $location,
|
||||
bootstrap => $bootstrap,
|
||||
tag_mariadb => $tag_mariadb,
|
||||
location => $location,
|
||||
}
|
||||
file { '/etc/quorum.conf':
|
||||
ensure => file,
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
# This is a asyncronous replica of the Maria DB Cluster for SUNET Drive
|
||||
class sunetdrive::mariadb_backup($tag_mariadb=undef, $location=undef) {
|
||||
include sunet::packages::netcat_openbsd
|
||||
$dirs = [ 'datadir', 'init', 'conf', 'backups' ]
|
||||
$dirs.each | $dir | {
|
||||
ensure_resource('file',"/opt/mariadb_backup/${dir}", { ensure => directory, recurse => true } )
|
||||
}
|
||||
# Config from group.yaml
|
||||
$environment = sunetdrive::get_environment()
|
||||
$mariadb_version = hiera("mariadb_version_${environment}")
|
||||
$config = hiera_hash($environment)
|
||||
$first_db = $config['first_db']
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ class sunetdrive::multinode (
|
|||
)
|
||||
{
|
||||
include sunet::packages::yq
|
||||
$myname = $facts['hostname']
|
||||
$myname = $facts['networking']['hostname']
|
||||
$is_multinode = true;
|
||||
$environment = sunetdrive::get_environment()
|
||||
$lb_servers = hiera_hash($environment)['lb_servers']
|
||||
|
@ -14,8 +14,6 @@ class sunetdrive::multinode (
|
|||
$db_ip = hiera_hash($environment)['db']
|
||||
$admin_password = hiera('admin_password')
|
||||
$cluster_admin_password = hiera('cluster_admin_password')
|
||||
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
|
||||
$gss_mode = hiera('gss_mode')
|
||||
|
||||
$twofactor_enforced_groups = []
|
||||
$twofactor_enforced_excluded_groups = []
|
||||
|
@ -40,6 +38,44 @@ class sunetdrive::multinode (
|
|||
user { 'www-data': ensure => present, system => true }
|
||||
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
|
||||
ensure_resource('file', '/opt/nextcloud' , { ensure => directory, recurse => true } )
|
||||
file { '/usr/local/bin/get_containers':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/get_containers'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/scan_external_mounts':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/scan_external_mounts.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
|
||||
cmd => '/usr/local/bin/scan_external_mounts',
|
||||
hour => '1',
|
||||
minute => '20',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
file { '/usr/local/bin/nocc':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/nocc.erb'),
|
||||
mode => '0740',
|
||||
}
|
||||
file { '/etc/sudoers.d/99-nocc':
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/usr/local/bin/occ':
|
||||
ensure => present,
|
||||
force => true,
|
||||
|
@ -74,14 +110,6 @@ class sunetdrive::multinode (
|
|||
content => template('sunetdrive/multinode/get_non_paying_customers.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/get_containers':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/multinode/get_containers'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/restart_and_prune':
|
||||
ensure => present,
|
||||
force => true,
|
||||
|
@ -211,26 +239,36 @@ MACAddressPolicy=none'
|
|||
hour => '0',
|
||||
weekday => '0',
|
||||
}
|
||||
if $nodenumber == '2' {
|
||||
cron { 'add_back_bucket_for_karin_nordgren':
|
||||
command => '(/usr/local/bin/occ nextcloud-kmh_app_1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh_app_1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true',
|
||||
user => 'root',
|
||||
minute => '*/10',
|
||||
}
|
||||
}
|
||||
# if $nodenumber == '2' {
|
||||
# cron { 'add_back_bucket_for_karin_nordgren':
|
||||
# command => '(/usr/local/bin/occ nextcloud-kmh-app-1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh-app-1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true',
|
||||
# user => 'root',
|
||||
# minute => '*/10',
|
||||
# }
|
||||
# }
|
||||
$customers.each | $index, $customer | {
|
||||
$customer_config_full = hiera_hash($customer)
|
||||
$customer_config = $customer_config_full[$environment]
|
||||
cron { "multinode_cron_${customer}":
|
||||
command => "/opt/nextcloud/cron.sh nextcloud-${customer}_app_1",
|
||||
command => "/opt/nextcloud/cron.sh nextcloud-${customer}-app-1",
|
||||
require => File['/opt/nextcloud/cron.sh'],
|
||||
user => 'root',
|
||||
minute => '*/10',
|
||||
}
|
||||
if $environment == 'prod' {
|
||||
$s3_bucket = "primary-${customer}-drive.sunet.se"
|
||||
if 'primary_bucket' in $customer_config.keys() {
|
||||
$s3_bucket = $customer_config['primary_bucket']
|
||||
} else {
|
||||
$s3_bucket = "primary-${customer}-drive.sunet.se"
|
||||
}
|
||||
$site_name = "${customer}.drive.sunet.se"
|
||||
$trusted_proxies = ['lb1.drive.sunet.se','lb2.drive.sunet.se', 'lb3.drive.sunet.se', 'lb4.drive.sunet.se']
|
||||
} else {
|
||||
$s3_bucket = "primary-${customer}-${environment}.sunet.se"
|
||||
if 'primary_bucket' in $customer_config.keys() {
|
||||
$s3_bucket = $customer_config['primary_bucket']
|
||||
} else {
|
||||
$s3_bucket = "primary-${customer}-${environment}.sunet.se"
|
||||
}
|
||||
$site_name = "${customer}.drive.${environment}.sunet.se"
|
||||
$trusted_proxies = ["lb1.drive.${environment}.sunet.se","lb2.drive.${environment}.sunet.se",
|
||||
"lb3.drive.${environment}.sunet.se","lb4.drive.${environment}.sunet.se"]
|
||||
|
@ -239,8 +277,6 @@ MACAddressPolicy=none'
|
|||
$apache_error_path = "/opt/multinode/${customer}/404.html"
|
||||
$config_php_path = "/opt/multinode/${customer}/config.php"
|
||||
$cron_log_path ="/opt/multinode/${customer}/cron.log"
|
||||
$customer_config_full = hiera_hash($customer)
|
||||
$customer_config = $customer_config_full[$environment]
|
||||
|
||||
$dbhost = 'proxysql_proxysql_1'
|
||||
$dbname = "nextcloud_${customer}"
|
||||
|
@ -248,14 +284,12 @@ MACAddressPolicy=none'
|
|||
|
||||
$gs_enabled = hiera('gs_enabled')
|
||||
$gs_federation = hiera('gs_federation')
|
||||
$gss_master_admin = hiera_array('gss_master_admin')
|
||||
$gss_master_url = hiera("gss_master_url_${environment}")
|
||||
$https_port = hiera_hash('multinode_mapping')[$customer]['port']
|
||||
$lookup_server = hiera("lookup_server_${environment}")
|
||||
$mail_domain = hiera("mail_domain_${environment}")
|
||||
$mail_from_address = hiera("mail_from_address_${environment}")
|
||||
$mail_smtphost = hiera("mail_smtphost_${environment}")
|
||||
$nextcloud_log_path ="/opt/multinode/${customer}/nextcloud.log"
|
||||
$audit_log_path ="/opt/multinode/${customer}/audit.log"
|
||||
if $customer_config['nextcloud_version'] {
|
||||
$nextcloud_version = $customer_config['nextcloud_version']
|
||||
} else {
|
||||
|
@ -265,11 +299,16 @@ MACAddressPolicy=none'
|
|||
$rclone_conf_path = "/opt/multinode/${customer}/rclone.conf"
|
||||
$redis_conf_dir = "/opt/multinode/${customer}/server"
|
||||
$redis_conf_path = "${redis_conf_dir}/redis.conf"
|
||||
$redis_host= "redis-${customer}_redis-server_1"
|
||||
if $::facts['sunet_nftables_enabled'] == 'yes' {
|
||||
$redis_host= "redis-${customer}-redis-server-1"
|
||||
} else {
|
||||
$redis_host= "redis-${customer}_redis-server_1"
|
||||
}
|
||||
|
||||
$s3_host = $customer_config['s3_host']
|
||||
$s3_usepath = hiera('s3_usepath')
|
||||
$smtpuser = hiera("smtp_user_${environment}")
|
||||
$trusted_domains = [$site_name, $facts['fqdn'], 'localhost']
|
||||
$trusted_domains = [$site_name, $facts['networking']['fqdn'], 'localhost']
|
||||
$tug_office = hiera_array('tug_office')
|
||||
if $customer_config['twofactor_enforced_groups'] {
|
||||
$twofactor_enforced_groups = $customer_config['twofactor_enforced_groups']
|
||||
|
@ -277,6 +316,15 @@ MACAddressPolicy=none'
|
|||
if $customer_config['twofactor_enforced_excluded_groups'] {
|
||||
$twofactor_enforced_excluded_groups = $customer_config['twofactor_enforced_excluded_groups']
|
||||
}
|
||||
if $customer_config['full_backup_retention'] {
|
||||
$full_backup_retention = $customer_config['full_backup_retention']
|
||||
} else {
|
||||
$full_backup_retention = hiera('full_backup_retention')
|
||||
}
|
||||
# Calculate some values
|
||||
$expiration_months = max(12, $full_backup_retention)
|
||||
$expiration_days_min = $expiration_months * 31
|
||||
$expiration_days_max = $expiration_months * 31 + 93
|
||||
|
||||
# Secrets from local.eyaml
|
||||
$admin_password = safe_hiera("${customer}_admin_password")
|
||||
|
@ -289,7 +337,6 @@ MACAddressPolicy=none'
|
|||
$secret = safe_hiera("${customer}_secret")
|
||||
$passwordsalt= safe_hiera("${customer}_passwordsalt")
|
||||
$redis_host_password = safe_hiera("${customer}_redis_host_password")
|
||||
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||
$smtppassword = safe_hiera('smtp_password')
|
||||
|
||||
$extra_config = {
|
||||
|
@ -376,6 +423,13 @@ MACAddressPolicy=none'
|
|||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
file { $audit_log_path:
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
file { $rclone_conf_path:
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
|
@ -391,10 +445,18 @@ MACAddressPolicy=none'
|
|||
content => template('sunetdrive/multinode/complete_reinstall.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
# Open ports
|
||||
sunet::misc::ufw_allow { "https_port_${customer}":
|
||||
from => '0.0.0.0',
|
||||
port => $https_port,
|
||||
if $::facts['sunet_nftables_enabled'] == 'yes' {
|
||||
$name = "https_port_${customer}"
|
||||
ensure_resource('sunet::nftables::ufw_allow_compat', $name, {
|
||||
from => ['0.0.0.0/0', '::/0'],
|
||||
port => $https_port,
|
||||
})
|
||||
} else {
|
||||
# Open ports
|
||||
sunet::misc::ufw_allow { "https_port_${customer}":
|
||||
from => '0.0.0.0',
|
||||
port => $https_port,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,4 +14,61 @@ class sunetdrive::multinode_db(){
|
|||
mode => '0744',
|
||||
}
|
||||
}
|
||||
if $facts["networking"]["fqdn"] =~ /^multinode-db1\.drive\.(test\.){1}sunet\.se$/ {
|
||||
$statistics_secret = safe_hiera('statistics_secret')
|
||||
notify { 'hostmessage':
|
||||
message => 'We are on multinode-db1. Set up statistics environment.',
|
||||
}
|
||||
$custdata=$customers.reduce({}) |$memo, $value| {
|
||||
$memo + {$value => lookup($value)}
|
||||
}
|
||||
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
|
||||
$local_path = '/tmp/rclone-current-linux-amd64.deb'
|
||||
exec { 'rclone_deb':
|
||||
command => "/usr/bin/wget -q ${rclone_url} -O ${local_path}",
|
||||
creates => $local_path,
|
||||
}
|
||||
package { 'rclone':
|
||||
ensure => installed,
|
||||
provider => dpkg,
|
||||
source => $local_path,
|
||||
require => Exec['rclone_deb'],
|
||||
}
|
||||
file { '/root/.rclone.conf':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb_backup/rclone.conf.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
}
|
||||
file { '/root/tasks/listusersbydep.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb/listusersdep.sh.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/tasks/genusersondepartmentlists.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb/genuserdeplists.sh.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file {'/opt/mariadb/statistics/custdata.json':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb/custconfig.json.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'genuserdeplists':
|
||||
cmd => '/root/tasks/genusersondepartmentlists.sh',
|
||||
hour => '2',
|
||||
minute => '5',
|
||||
ok_criteria => ['exit_status=0','max_age=30h'],
|
||||
warn_criteria => ['exit_status=1', 'max_age=60h'],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
79
manifests/nrpe.pp
Normal file
79
manifests/nrpe.pp
Normal file
|
@ -0,0 +1,79 @@
|
|||
|
||||
# NRPE class
|
||||
class sunetdrive::nrpe(
|
||||
$loadw = '15,10,5',
|
||||
$loadc = '30,25,20',
|
||||
$procsw = 150,
|
||||
$procsc = 200,
|
||||
) {
|
||||
|
||||
require apt
|
||||
class { 'sunet::nagios':
|
||||
command_timeout => 600,
|
||||
loadw => $loadw,
|
||||
loadc => $loadc,
|
||||
procsw => $procsw,
|
||||
procsc => $procsc,
|
||||
}
|
||||
package {'nagios-plugins-contrib': ensure => latest}
|
||||
if ($facts['os']['name'] == 'Ubuntu' and versioncmp($facts['os']['release']['full'], '22.04') >= 0 ){
|
||||
$mem_w = '90'
|
||||
$mem_c = '95'
|
||||
} else {
|
||||
$mem_w = '10'
|
||||
$mem_c = '5'
|
||||
}
|
||||
$checks = ['nrpe_check_memory']
|
||||
$checks.each |$check| {
|
||||
ensure_resource("sunet::nagios::${check}", "nagios-nrpe-${check}")
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_entropy':
|
||||
command_line => '/usr/lib/nagios/plugins/check_entropy -w 256'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_ntp_time':
|
||||
command_line => '/usr/lib/nagios/plugins/check_ntp_time -H ntp.se'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_scriptherder':
|
||||
command_line => '/usr/local/bin/scriptherder --mode check'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_needrestart_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_needrestart_check',
|
||||
command_line => '/usr/sbin/needrestart -p -l'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_galera_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_galera_check',
|
||||
command_line => '/usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_galera_cluster':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_proxysql_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_proxysql_check',
|
||||
command_line => '/usr/lib/nagios/plugins/check_proxysql_server, /usr/lib/nagios/plugins/check_mysql_server_status'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_proxysql_server':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_proxysql_server'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_replication_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_replication_check',
|
||||
command_line => '/usr/local/bin/check_replication'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_async_replication':
|
||||
command_line => '/usr/bin/sudo /usr/local/bin/check_replication'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_backup_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_backup_check',
|
||||
command_line => '/usr/local/bin/check_backups'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_backups':
|
||||
command_line => '/usr/bin/sudo /usr/local/bin/check_backups'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_mysql_server_status':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_mysql_server_status'
|
||||
}
|
||||
}
|
|
@ -16,14 +16,14 @@ class sunetdrive::onlyoffice () {
|
|||
command => "docker login registry.gitlab.collabora.com -u ${token_name} -p ${access_token}",
|
||||
}
|
||||
sunet::collabora::docs { 'sunet-onlyoffice':
|
||||
dns => [ '89.32.32.32' ],
|
||||
extra_hosts => $extra_hosts,
|
||||
dns => [ '89.32.32.32' ],
|
||||
extra_hosts => $extra_hosts,
|
||||
extra_volumes => ['/opt/collabora/coolwsd.xml:/etc/coolwsd/coolwsd.xml'],
|
||||
docker_tag => $docker_tag,
|
||||
docker_image => 'registry.gitlab.collabora.com/productivity/collabora-online-nc',
|
||||
docker_tag => $docker_tag,
|
||||
docker_image => 'registry.gitlab.collabora.com/productivity/collabora-online-nc',
|
||||
}
|
||||
file {'/opt/collabora/coolwsd.xml':
|
||||
ensure => present,
|
||||
ensure => present,
|
||||
content => template('sunetdrive/document/coolwsd.xml.erb'),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ class sunetdrive::proxysql (
|
|||
$location = undef,
|
||||
$proxysql_container_name = 'proxysql_proxysql_1',
|
||||
$manage_config = true,
|
||||
$manage_network = true,
|
||||
) {
|
||||
|
||||
# Config from group.yaml
|
||||
|
@ -11,6 +12,8 @@ class sunetdrive::proxysql (
|
|||
$config = hiera_hash($environment)
|
||||
$db_ip = $config['db']
|
||||
$nextcloud_ip = $config['app']
|
||||
$nextcloud_ipv6 = $config['app_v6']
|
||||
$nextcloud_ip_all = $nextcloud_ip + $nextcloud_ipv6
|
||||
$proxysql_ok_num = length($nextcloud_ip)
|
||||
$proxysql_warn_num = $proxysql_ok_num - 1
|
||||
|
||||
|
@ -26,8 +29,8 @@ class sunetdrive::proxysql (
|
|||
$mysql_user = safe_hiera('mysql_user')
|
||||
|
||||
$transaction_persistent = 1
|
||||
if $::facts['dockerhost2'] == 'yes' {
|
||||
$hostnet = 'yes'
|
||||
if $::facts['dockerhost2'] == 'yes' and $manage_network {
|
||||
$hostnet = true
|
||||
}
|
||||
|
||||
file { '/usr/local/bin/proxysql':
|
||||
|
@ -78,7 +81,7 @@ class sunetdrive::proxysql (
|
|||
iif => 'ens3',
|
||||
}
|
||||
sunet::nftables::docker_expose { 'proxysql':
|
||||
allow_clients => ['any'],
|
||||
allow_clients => $nextcloud_ip_all,
|
||||
port => 6032,
|
||||
iif => 'ens3',
|
||||
}
|
||||
|
|
|
@ -17,4 +17,10 @@ class sunetdrive::redis_cluster (
|
|||
content => template('sunetdrive/redis_cluster/reset_cluster.erb.sh'),
|
||||
mode => '0700',
|
||||
}
|
||||
exec { 'set_permissions_directory':
|
||||
command => 'chown -R 999:root /opt/redis/node-*'
|
||||
}
|
||||
exec { 'set_permissions_files':
|
||||
command => 'chown -R 999:999 /opt/redis/node-*/*'
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,17 +29,23 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
|
|||
}
|
||||
}
|
||||
}
|
||||
sunet::docker_run {'satosa':
|
||||
image => $image,
|
||||
imagetag => $tag,
|
||||
dns => ['89.32.32.32'],
|
||||
volumes => ['/etc/satosa:/etc/satosa','/etc/dehydrated:/etc/dehydrated'],
|
||||
ports => ['443:8000'],
|
||||
env => ['METADATA_DIR=/etc/satosa/metadata', 'WORKER_TIMEOUT=120']
|
||||
$dehydrated_status = $dehydrated_name ? {
|
||||
undef => 'absent',
|
||||
default => 'present'
|
||||
}
|
||||
file {'/opt/docker_run':
|
||||
ensure => 'absent',
|
||||
}
|
||||
sunet::docker_compose { 'satosa':
|
||||
content => template('sunetdrive/satosa/docker-compose.yml.erb'),
|
||||
service_name => 'satosa',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Satosa',
|
||||
}
|
||||
file {'/etc/satosa/proxy_conf.yaml':
|
||||
content => inline_template("<%= @merged_conf.to_yaml %>\n"),
|
||||
notify => Sunet::Docker_run['satosa']
|
||||
notify => Sunet::Docker_compose['satosa']
|
||||
}
|
||||
$plugins = hiera('satosa_config')
|
||||
sort(keys($plugins)).each |$n| {
|
||||
|
@ -47,23 +53,13 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
|
|||
$fn = $plugins[$n]
|
||||
file { $fn:
|
||||
content => inline_template("<%= @conf.to_yaml %>\n"),
|
||||
notify => Sunet::Docker_run['satosa']
|
||||
notify => Sunet::Docker_compose['satosa']
|
||||
}
|
||||
}
|
||||
sunet::misc::ufw_allow { 'satosa-allow-https':
|
||||
from => 'any',
|
||||
port => '443'
|
||||
}
|
||||
$dehydrated_status = $dehydrated_name ? {
|
||||
undef => 'absent',
|
||||
default => 'present'
|
||||
}
|
||||
sunet::docker_run {'alwayshttps':
|
||||
ensure => $dehydrated_status,
|
||||
image => 'docker.sunet.se/always-https',
|
||||
ports => ['80:80'],
|
||||
env => ['ACME_URL=http://acme-c.sunet.se']
|
||||
}
|
||||
sunet::misc::ufw_allow { 'satosa-allow-http':
|
||||
ensure => $dehydrated_status,
|
||||
from => 'any',
|
||||
|
@ -78,12 +74,6 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
|
|||
cert_file => '/etc/satosa/https.crt'
|
||||
}
|
||||
}
|
||||
file { '/opt/satosa':
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
-> file { '/opt/satosa/restart.sh':
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
|
|
|
@ -4,6 +4,7 @@ class sunetdrive::script (
|
|||
$location = undef
|
||||
) {
|
||||
include sunet::packages::python3_pip
|
||||
include sunet::packages::kopia
|
||||
$environment = sunetdrive::get_environment()
|
||||
$customer = sunetdrive::get_customer()
|
||||
$apikey_test = safe_hiera('monitor_apikey_test')
|
||||
|
@ -20,6 +21,7 @@ class sunetdrive::script (
|
|||
$local_path = '/tmp/rclone-current-linux-amd64.deb'
|
||||
$singlenodes = lookup('singlenodes')
|
||||
$multinodes = keys(lookup('multinode_mapping'))
|
||||
$extra_backup_jobs = pick($config['extra_backup_jobs'], {})
|
||||
|
||||
if $customer == 'mdu' {
|
||||
$eppn_suffix = 'mdh.se'
|
||||
|
@ -55,6 +57,17 @@ class sunetdrive::script (
|
|||
}
|
||||
$site_name = $config['site_name']
|
||||
$user_bucket_name = $config['user_bucket_name']
|
||||
if $config['user_scans'] {
|
||||
$config['user_scans'].each |$job| {
|
||||
sunet::scriptherder::cronjob { $job['name']:
|
||||
cmd => "ssh -t -l script ${job['server']} /usr/bin/sudo /usr/local/bin/occ ${job['container']} files:scan ${job['user']}",
|
||||
hour => $job['hour'],
|
||||
minute => $job['minute'],
|
||||
ok_criteria => ['exit_status=0','max_age=1d'],
|
||||
warn_criteria => ['exit_status=1','max_age=2d'],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# It is a start that will get us user buckets and primary buckets
|
||||
$backup_projects = $location
|
||||
|
@ -96,6 +109,14 @@ class sunetdrive::script (
|
|||
unless => "python3 -m pip list | grep drive-utils | grep ${drive_version}",
|
||||
require => Package['python3'],
|
||||
}
|
||||
file { '/opt/backups':
|
||||
ensure => directory,
|
||||
mode => '0700'
|
||||
}
|
||||
file { '/opt/backups/scripts':
|
||||
ensure => directory,
|
||||
mode => '0700'
|
||||
}
|
||||
file { '/root/.ssh/':
|
||||
ensure => directory,
|
||||
mode => '0700',
|
||||
|
@ -233,8 +254,11 @@ class sunetdrive::script (
|
|||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/tasks/backupsinglenodedb.sh':
|
||||
ensure => absent,
|
||||
file_line { 'FIXME_remove_when_s3_migration_done_in_sto3':
|
||||
ensure => 'present',
|
||||
line => '37.156.195.53 s3.sto3.safedc.net',
|
||||
path => '/etc/hosts',
|
||||
match => '^37.156.195.53',
|
||||
}
|
||||
if $environment == 'test' {
|
||||
sunet::scriptherder::cronjob { 'reboot-customer':
|
||||
|
@ -249,7 +273,7 @@ class sunetdrive::script (
|
|||
if $customer in ['extern', 'gih', 'hkr', 'suni', 'common', 'su', 'lnu'] {
|
||||
sunet::scriptherder::cronjob { 'makebuckets':
|
||||
ensure => absent,
|
||||
cmd => 'bin/true',
|
||||
cmd => 'bin/true',
|
||||
}
|
||||
} else {
|
||||
sunet::scriptherder::cronjob { 'makebuckets':
|
||||
|
@ -334,6 +358,13 @@ class sunetdrive::script (
|
|||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/opt/backups/scripts/hb.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/backup-hb.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backupmultinodedb':
|
||||
cmd => '/root/tasks/backupmultinodedb.sh',
|
||||
hour => '2',
|
||||
|
@ -360,7 +391,7 @@ class sunetdrive::script (
|
|||
$singlenodes.each | $singlenode| {
|
||||
$multinode = hiera_hash('multinode_mapping')[$singlenode]['server']
|
||||
$multinodeserver = "${multinode}.${site_name}"
|
||||
$nccontainer = "nextcloud-${singlenode}_app_1"
|
||||
$nccontainer = "nextcloud-${singlenode}-app-1"
|
||||
|
||||
sunet::scriptherder::cronjob { "backup${singlenode}db":
|
||||
ensure => absent,
|
||||
|
@ -390,17 +421,17 @@ class sunetdrive::script (
|
|||
}
|
||||
}
|
||||
}
|
||||
$gss_backup_server = $config['gss_backup_server']
|
||||
$lookup_backup_server = $config['lookup_backup_server']
|
||||
sunet::scriptherder::cronjob { 'backupgssdb':
|
||||
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
|
||||
ensure => 'absent',
|
||||
cmd => 'true',
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backuplookupdb':
|
||||
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
|
||||
ensure => 'absent',
|
||||
cmd => 'true',
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
|
|
|
@ -64,12 +64,12 @@ class sunetdrive::scriptreceiver()
|
|||
}
|
||||
file { '/root/.bashrc':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/scriptreceiver/baschrc.erb.sh'),
|
||||
content => template('sunetdrive/scriptreceiver/bashrc.erb.sh'),
|
||||
mode => '0644',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { "/etc/sudoers.d/99-safer_reboot":
|
||||
file { '/etc/sudoers.d/99-safer_reboot':
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/safer_reboot\n",
|
||||
mode => '0440',
|
||||
|
|
|
@ -7,7 +7,7 @@ class sunetdrive::sitemonitornaemon() {
|
|||
$tls_servers_with_port = hiera_array('tls_servers_with_port')
|
||||
$nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0]
|
||||
$nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0]
|
||||
$monitorhost = $::fqdn
|
||||
$monitorhost = $facts['networking']['fqdn']
|
||||
$environment = sunetdrive::get_environment()
|
||||
$influx_passwd = safe_hiera('influx_passwd')
|
||||
$slack_url = safe_hiera('slack_url')
|
||||
|
@ -42,11 +42,15 @@ class sunetdrive::sitemonitornaemon() {
|
|||
}
|
||||
file { '/etc/naemon/conf.d/sunetdrive_thruk_templates.conf':
|
||||
ensure => present,
|
||||
owner => 'naemon',
|
||||
group => 'naemon',
|
||||
content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
nagioscfg::service {'check_scriptherder':
|
||||
hostgroup_name => ['sunetdrive::nrpe'],
|
||||
check_command => 'check_nrpe_1arg_to300!check_scriptherder',
|
||||
description => 'Scriptherder Status',
|
||||
contact_groups => ['naemon-admins'],
|
||||
}
|
||||
nagioscfg::service {'check_galera_cluster':
|
||||
hostgroup_name => ['galera_monitor'],
|
||||
check_command => 'check_nrpe_1arg!check_galera_cluster',
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
config_php='/var/www/html/config/config.php'
|
||||
dbhost="<%= @dbhost %>"
|
||||
mysql_user_password="<%= @mysql_user_password %>"
|
||||
admin_password="<%= @admin_password %>"
|
||||
location="<%= @location %>"
|
||||
bucket="<%= @s3_bucket %>"
|
||||
|
||||
|
@ -14,6 +13,9 @@ if [[ "${user_input}" == "IKnowWhatIAmDoing" ]]; then
|
|||
echo "WARNING: This will delete everything in the database and reinstall Nextcloud."
|
||||
echo "You have 10 seconds to abort by hitting CTRL/C"
|
||||
sleep 10s
|
||||
echo "Setting temp admin password"
|
||||
apt update && apt install -y apg
|
||||
admin_password="$(apg -m 40 | head -1)"
|
||||
echo "Ok, proceeding."
|
||||
echo "Dropping database in 3 seconds"
|
||||
sleep 3s
|
||||
|
@ -48,6 +50,11 @@ EOF
|
|||
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
|
||||
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
|
||||
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
|
||||
echo "Now delete the admin user:"
|
||||
echo " occ user:delete admin"
|
||||
echo "and then create a new admin user:"
|
||||
echo " /usr/local/bin/add_admin_user <username> <email address>"
|
||||
echo ""
|
||||
echo "Please use edit-secrets to add these variables to all Nextcloud servers:"
|
||||
echo "instanceid: DEC::PKCS7[${instanceid}]!"
|
||||
echo "secret: DEC::PKCS7[${secret}]!"
|
||||
|
|
|
@ -1,10 +1,5 @@
|
|||
<?php
|
||||
$CONFIG = array (
|
||||
'app_install_overwrite' =>
|
||||
array (
|
||||
0 => 'globalsiteselector',
|
||||
),
|
||||
|
||||
'apps_paths' =>
|
||||
array (
|
||||
0 =>
|
||||
|
@ -48,28 +43,12 @@ $CONFIG = array (
|
|||
'gs.enabled' => '<%= @gs_enabled %>',
|
||||
'gs.federation' => '<%= @gs_federation %>',
|
||||
'gs.trustedHosts' => ['*.sunet.se'],
|
||||
'gss.discovery.manual.mapping.file' => '/var/www/html/mappingfile.json',
|
||||
'gss.discovery.manual.mapping.parameter' => 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6',
|
||||
'gss.discovery.manual.mapping.regex' => true,
|
||||
'gss.jwt.key' => '<%= @gss_jwt_key %>',
|
||||
'gss.master.admin' =>
|
||||
array (
|
||||
<%- index = 0 -%>
|
||||
<%- @gss_master_admin.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
),
|
||||
'gss.master.url' => '<%= @gss_master_url %>',
|
||||
'gss.mode' => '<%= @gss_mode %>',
|
||||
'gss.user.discovery.module' => '\\OCA\\GlobalSiteSelector\\UserDiscoveryModules\\ManualUserMapping',
|
||||
'gss.username_format' => 'sanitize',
|
||||
'htaccess.RewriteBase' => '/',
|
||||
'installed' => true,
|
||||
'instanceid' => '<%= @instanceid %>',
|
||||
'integrity.check.disabled' => true,
|
||||
'log_type' => 'file',
|
||||
'loglevel' => 0,
|
||||
'lookup_server' => '<%= @lookup_server %>',
|
||||
'loglevel' => 1,
|
||||
'mail_domain' => '<%= @mail_domain %>',
|
||||
'mail_from_address' => '<%= @mail_from_address %>',
|
||||
'mail_sendmailmode' => 'smtp',
|
||||
|
@ -97,6 +76,7 @@ $CONFIG = array (
|
|||
'region' => 'us-east-1',
|
||||
'hostname' => '<%= @s3_host %>',
|
||||
'port' => '',
|
||||
'useMultipartCopy' => false,
|
||||
'objectPrefix' => 'urn:oid:',
|
||||
'autocreate' => false,
|
||||
'use_ssl' => true,
|
||||
|
@ -104,29 +84,11 @@ $CONFIG = array (
|
|||
'legacy_auth' => false,
|
||||
),
|
||||
),
|
||||
'overwrite.cli.url' => 'https://<%= @site_name %>',
|
||||
'overwrite.cli.url' => 'https://<%= @site_name %>/',
|
||||
'overwritehost' => '<%= @site_name %>',
|
||||
'overwriteprotocol' => 'https',
|
||||
'passwordsalt' => '<%= @passwordsalt %>',
|
||||
<% if @location == 'gss-test' -%>
|
||||
'redis.cluster' => [
|
||||
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
|
||||
'password' => '<%= @redis_cluster_password %>',
|
||||
'read_timeout' => 0.0,
|
||||
'seeds' => [
|
||||
'redis1.drive.test.sunet.se:6379',
|
||||
'redis2.drive.test.sunet.se:6379',
|
||||
'redis3.drive.test.sunet.se:6379',
|
||||
'redis1.drive.test.sunet.se:6380',
|
||||
'redis2.drive.test.sunet.se:6380',
|
||||
'redis3.drive.test.sunet.se:6380',
|
||||
'redis1.drive.test.sunet.se:6381',
|
||||
'redis2.drive.test.sunet.se:6381',
|
||||
'redis3.drive.test.sunet.se:6381'
|
||||
],
|
||||
'timeout' => 1.1
|
||||
],
|
||||
<% elsif @environment == 'test' && ! @is_multinode -%>
|
||||
<% if @environment == 'test' && ! @is_multinode -%>
|
||||
'redis.cluster' => [
|
||||
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
|
||||
'password' => '<%= @redis_cluster_password %>',
|
||||
|
@ -144,7 +106,7 @@ $CONFIG = array (
|
|||
],
|
||||
'timeout' => 1.1
|
||||
],
|
||||
<% elsif @environment == 'prod' && ! @is_multinode && @location != 'gss-prod' -%>
|
||||
<% elsif @environment == 'prod' && ! @is_multinode -%>
|
||||
'redis.cluster' => [
|
||||
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
|
||||
'password' => '<%= @redis_cluster_password %>',
|
||||
|
@ -173,6 +135,9 @@ $CONFIG = array (
|
|||
'secret' => '<%= @secret %>',
|
||||
'skeletondirectory' => '<%= @skeletondirectory %>',
|
||||
'templatedirectory' => '',
|
||||
<% if @environment == 'test' -%>
|
||||
'trashbin_retention_obligation' => 'auto, 30',
|
||||
<% end -%>
|
||||
'trusted_domains' =>
|
||||
array (
|
||||
<%- index = 0 -%>
|
||||
|
@ -195,20 +160,27 @@ $CONFIG = array (
|
|||
array (
|
||||
0 => 'admin',
|
||||
1 => 'forcemfa',
|
||||
<%- index = 2 -%>
|
||||
<%- @twofactor_enforced_groups.each do |item| -%>
|
||||
<%- if @twofactor_enforced_groups -%>
|
||||
<%- index = 2 -%>
|
||||
<%- @twofactor_enforced_groups.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
<%- end -%>
|
||||
),
|
||||
'twofactor_enforced_excluded_groups' =>
|
||||
array (
|
||||
<%- index = 0 -%>
|
||||
<%- @twofactor_enforced_excluded_groups.each do |item| -%>
|
||||
<%- if @twofactor_enforced_excluded_groups -%>
|
||||
<%- index = 0 -%>
|
||||
<%- @twofactor_enforced_excluded_groups.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
<%- end -%>
|
||||
),
|
||||
'updatechecker' => false,
|
||||
'version' => '<%= @nextcloud_version_string %>',
|
||||
<% if @environment == 'test' -%>
|
||||
'versions_retention_obligation' => '<%= @expiration_days_min %>, <%= @expiration_days_max %>',
|
||||
<% end -%>
|
||||
);
|
||||
|
|
|
@ -9,8 +9,6 @@ services:
|
|||
<%- if @hostnet -%>
|
||||
network_mode: host
|
||||
<%- end -%>
|
||||
environment:
|
||||
- NC_PASS=<%= @admin_password%>
|
||||
volumes:
|
||||
- /opt/nextcloud/000-default.conf:/etc/apache2/sites-enabled/000-default.conf
|
||||
- /opt/nextcloud/mpm_prefork.conf:/etc/apache2/mods-available/mpm_prefork.conf
|
||||
|
@ -21,13 +19,11 @@ services:
|
|||
- /opt/nextcloud/complete_reinstall.sh:/complete_reinstall.sh
|
||||
- /opt/nextcloud/config.php:/var/www/html/config/config.php
|
||||
- /opt/nextcloud/nextcloud.log:/var/www/html/data/nextcloud.log
|
||||
- /opt/nextcloud/audit.log:/var/www/html/data/audit.log
|
||||
- /opt/nextcloud/rclone.conf:/rclone.conf
|
||||
<%- if @skeletondirectory -%>
|
||||
- /opt/nextcloud/skeleton:<%= @skeletondirectory %>
|
||||
<%- end -%>
|
||||
<%- if @location =~ /^gss/ -%>
|
||||
- /opt/nextcloud/mappingfile.json:/var/www/html/mappingfile.json
|
||||
<%- end -%>
|
||||
<% if @location =~ /^kau/ -%>
|
||||
- /mnt:/opt/tmp/
|
||||
<%- end -%>
|
||||
|
@ -37,14 +33,12 @@ services:
|
|||
- proxysql_proxysql
|
||||
<%- end -%>
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
- 89.32.32.32
|
||||
<%- if !@hostnet -%>
|
||||
ports:
|
||||
- 443:443
|
||||
<%- end -%>
|
||||
command: sh -c 'tail -f /var/www/html/data/nextcloud.log | tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
|
||||
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
|
||||
tty: true
|
||||
|
||||
<%- if !@hostnet -%>
|
||||
|
|
21
templates/application/nocc.erb
Executable file
21
templates/application/nocc.erb
Executable file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [[ "${1}" =~ ^nextcloud ]]; then
|
||||
container=${1}
|
||||
shift
|
||||
else
|
||||
container="nextcloud_app_1"
|
||||
fi
|
||||
|
||||
|
||||
oc_list=$(env| grep 'OC_')
|
||||
if [[ "x${oc_list}" != "x" ]]; then
|
||||
for row in $(echo "${oc_list}"); do
|
||||
MY_VARS="${MY_VARS} -e ${row}"
|
||||
done
|
||||
fi
|
||||
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
|
||||
exit 0
|
||||
|
||||
|
79
templates/application/remount_user_bucket_as_project.sh
Executable file
79
templates/application/remount_user_bucket_as_project.sh
Executable file
|
@ -0,0 +1,79 @@
|
|||
#!/bin/bash
|
||||
|
||||
mountid="${1}"
|
||||
user="${2}"
|
||||
container="${3}"
|
||||
if [[ -z ${mountid} ]] || [[ -z ${user} ]]; then
|
||||
echo "We need a valid mount id and user to proceed"
|
||||
echo "Usage: ${0} <mountid> <user> [<container>]"
|
||||
exit
|
||||
fi
|
||||
|
||||
if [[ -z ${container} ]]; then
|
||||
container="nextcloud_app_1"
|
||||
fi
|
||||
|
||||
occ="/usr/local/bin/occ ${container}"
|
||||
function get_config {
|
||||
${occ} files_external:config ${mountid} ${1} | tr -d '\n\t\r'
|
||||
}
|
||||
|
||||
echo "Gathering information, hang tight."
|
||||
|
||||
echo -n "."
|
||||
bucket="$(get_config bucket)"
|
||||
echo -n "."
|
||||
hostname="$(get_config hostname)"
|
||||
echo -n "."
|
||||
key="$(get_config key)"
|
||||
echo -n "."
|
||||
region="$(get_config region)"
|
||||
echo -n "."
|
||||
secret="$(get_config secret)"
|
||||
jsonfile="/tmp/${user}-user-bucket.json"
|
||||
mount_point="${user/@/-}"
|
||||
mount_point="${mount_point/./-}-user-bucket"
|
||||
|
||||
echo "This will remount the user bucket with mountid ${mountid} for ${user} as project bucket with mountpoint ${mount_point}."
|
||||
read -r -p "Press enter to continue"
|
||||
|
||||
echo '
|
||||
[
|
||||
{
|
||||
"mount_point": "\/'${mount_point}'",
|
||||
"storage": "\\OCA\\Files_External\\Lib\\Storage\\AmazonS3",
|
||||
"authentication_type": "amazons3::accesskey",
|
||||
"configuration": {
|
||||
"bucket": "'${bucket}'",
|
||||
"hostname": "'${hostname}'",
|
||||
"key": "'${key}'",
|
||||
"legacy_auth": false,
|
||||
"port": "443",
|
||||
"region": "'${region}'",
|
||||
"secret": "'${secret}'",
|
||||
"storageClass": "",
|
||||
"useMultipartCopy": false,
|
||||
"use_path_style": true,
|
||||
"use_ssl": true
|
||||
},
|
||||
"options": {
|
||||
"encrypt": true,
|
||||
"previews": true,
|
||||
"enable_sharing": true,
|
||||
"filesystem_check_changes": 0,
|
||||
"encoding_compatibility": false,
|
||||
"readonly": false
|
||||
},
|
||||
"applicable_users": [
|
||||
],
|
||||
"applicable_groups": ["admin"]
|
||||
}
|
||||
]
|
||||
' > "${jsonfile}"
|
||||
|
||||
|
||||
docker cp ${jsonfile} ${container}:/${jsonfile}
|
||||
${occ} files_external:import /${jsonfile}
|
||||
docker exec ${container} rm /${jsonfile}
|
||||
rm ${jsonfile}
|
||||
${occ} files_external:delete ${mountid}
|
34
templates/application/scan_external_mounts.sh
Normal file
34
templates/application/scan_external_mounts.sh
Normal file
|
@ -0,0 +1,34 @@
|
|||
#!/bin/bash
|
||||
|
||||
error_ids=""
|
||||
# Only run if this is the only instance of this script running
|
||||
# note: since this script forks to run pgrep, we need -eq 2 here
|
||||
# shellcheck disable=SC2126
|
||||
if [[ $(pgrep -a -f "${0}" | grep -v scriptherder | wc -l) -eq 2 ]]; then
|
||||
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
|
||||
# a specific host, but will differ between hosts
|
||||
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
|
||||
errors=''
|
||||
for container in $(get_containers); do
|
||||
error_ids="${error_ids} ${container}: "
|
||||
for id in $(nocc "${container}" files_external:list --all --output json | jq '.[].mount_id' | jq .); do
|
||||
nocc "${container}" files_external:scan "${id}" | grep Error
|
||||
# shellcheck disable=SC2181
|
||||
if [[ ${?} -eq 0 ]]; then
|
||||
errors="${errors} ${id}"
|
||||
error_ids="${error_ids} ${id}"
|
||||
fi
|
||||
done
|
||||
done
|
||||
else
|
||||
echo "Another instance of this script is already running, exiting"
|
||||
pgrep -a -f "${0}" | grep -v scriptherder
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -n "${errors}" ]]; then
|
||||
echo "Errors found in the following mounts: ${error_ids}"
|
||||
exit 1
|
||||
fi
|
||||
echo "No errors found"
|
||||
exit 0
|
|
@ -1,16 +0,0 @@
|
|||
|
||||
<?php
|
||||
|
||||
|
||||
$CONFIG = [
|
||||
'DB' => [
|
||||
'host' => "<%= @dbhost %>",
|
||||
'db' => "lookup" ,
|
||||
'user' => "lookup",
|
||||
'pass' => "<%= @mysql_user_password %>",
|
||||
],
|
||||
|
||||
'GLOBAL_SCALE' => true,
|
||||
|
||||
'AUTH_KEY' => "<%= @gss_jwt_key %>",
|
||||
];
|
|
@ -1,24 +0,0 @@
|
|||
version: '3.2'
|
||||
|
||||
services:
|
||||
|
||||
app:
|
||||
image: docker.sunet.se/drive/nextcloud-lookup:<%= @lookup_version %>
|
||||
restart: always
|
||||
volumes:
|
||||
- /opt/lookup/config.php:/var/www/html/config/config.php
|
||||
networks:
|
||||
- default
|
||||
- proxysql_proxysql
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
- 89.32.32.32
|
||||
ports:
|
||||
- 443:443
|
||||
command: apachectl -D FOREGROUND
|
||||
tty: true
|
||||
|
||||
networks:
|
||||
proxysql_proxysql:
|
||||
external: true
|
|
@ -1,62 +0,0 @@
|
|||
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
|
||||
SET time_zone = "+00:00";
|
||||
|
||||
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
|
||||
/*!40101 SET NAMES utf8mb4 */;
|
||||
|
||||
CREATE DATABASE IF NOT EXISTS `lookup` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
USE `lookup`;
|
||||
|
||||
CREATE USER 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
|
||||
GRANT ALL PRIVILEGES ON lookup.* TO 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `emailValidation`;
|
||||
CREATE TABLE IF NOT EXISTS `emailValidation` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`storeId` int(11) NOT NULL,
|
||||
`token` varchar(16) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `token` (`token`),
|
||||
KEY `storeId` (`storeId`)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
DROP TABLE IF EXISTS `store`;
|
||||
CREATE TABLE IF NOT EXISTS `store` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`userId` int(11) NOT NULL,
|
||||
`k` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`v` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`valid` tinyint(1) NOT NULL DEFAULT '0',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `key` (`k`(191)),
|
||||
KEY `value` (`v`(191)),
|
||||
KEY `userId` (`userId`)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=51 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
DROP TABLE IF EXISTS `users`;
|
||||
CREATE TABLE IF NOT EXISTS `users` (
|
||||
`id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`federationId` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `federationId` (`federationId`(191))
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=15 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
DROP TABLE IF EXISTS `toVerify`;
|
||||
CREATE TABLE IF NOT EXISTS `toVerify` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`userId` int(11) NOT NULL,
|
||||
`storeId` int(11) NOT NULL,
|
||||
`property` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`location` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`tries` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
|
||||
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
1
templates/mariadb/custconfig.json.erb
Normal file
1
templates/mariadb/custconfig.json.erb
Normal file
|
@ -0,0 +1 @@
|
|||
<%= @custdata.to_json %>
|
|
@ -21,6 +21,6 @@ services:
|
|||
- MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %>
|
||||
- BOOTSTRAP=<%= @bootstrap %>
|
||||
- FORCE_BOOTSTRAP=0
|
||||
command: "--wsrep_cluster_address=gcomm://<%= @db_ip[0] %>,<%= @db_ip[1] %>,<%= @db_ip[2] %>"
|
||||
command: "--wsrep_cluster_address=gcomm://<%= @db_ip.join(',') %>"
|
||||
tty: true
|
||||
|
||||
|
|
32
templates/mariadb/genuserdeplists.sh.erb
Normal file
32
templates/mariadb/genuserdeplists.sh.erb
Normal file
|
@ -0,0 +1,32 @@
|
|||
#!/bin/bash
|
||||
|
||||
<% basedir="statistics:drive-server-coms" -%>
|
||||
<% cupath="/opt/mariadb/statistics/users/" -%>
|
||||
<% custdata="/opt/mariadb/statistics/custdata.json" -%>
|
||||
status=0
|
||||
|
||||
<% @custdata.each do |cust,data| -%>
|
||||
#Customer <%= cust %> has no billing departments.
|
||||
<% if defined?(data[@environment]["billdomains"]) && data[@environment]["billdomains"] -%>
|
||||
mkdir -p /opt/mariadb/statistics/users/<%= cust %>
|
||||
chmod '0700' /opt/mariadb/statistics/users/<%= cust %>
|
||||
<% data[@environment]["billdomains"].each do |dom| -%>
|
||||
/root/tasks/listusersbydep.sh <%= cust %> <%= dom %> > /opt/mariadb/statistics/users/<%= cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json
|
||||
if jq . <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json &>/dev/null
|
||||
then
|
||||
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json <%= basedir%>/<%= cust %>-<%= @environment%>/
|
||||
[[ $? -eq 0 ]] || { status=1 ; echo "Error: Upload of user data failed." ; }
|
||||
else
|
||||
echo "Error in json data"
|
||||
status=1
|
||||
fi
|
||||
<% end -%>
|
||||
<% end -%>
|
||||
<% end -%>
|
||||
|
||||
if [[ -f <%= custdata %> ]]
|
||||
then
|
||||
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= custdata %> <%= basedir%>/
|
||||
fi
|
||||
|
||||
exit ${status}
|
24
templates/mariadb/listusersdep.sh.erb
Normal file
24
templates/mariadb/listusersdep.sh.erb
Normal file
|
@ -0,0 +1,24 @@
|
|||
#!/bin/bash
|
||||
|
||||
function usage () {
|
||||
printf "Usage:\t%s <customer> <department email domain>\n" "${0##*/}"
|
||||
printf "Example:\t%s uu int.uu.se\n" "${0##*/}"
|
||||
}
|
||||
|
||||
[[ -z "${1}" || -z "${2}" || ! "${1}" =~ ^[a-zA-Z0-9.]{1,200}$ || ! "${2}" =~ ^[a-zA-Z0-9.]{1,200}$ ]] && usage && exit 1
|
||||
|
||||
depdom="${2}"
|
||||
customer="${1}"
|
||||
|
||||
docker exec mariadb_db_1 /bin/bash -c 'mysql -p${MYSQL_ROOT_PASSWORD} -NB -e '\
|
||||
$'\'select JSON_PRETTY(JSON_OBJECTAGG(uid,displayname)) from ('\
|
||||
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_global_scale_users.displayname'\
|
||||
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_global_scale_users'\
|
||||
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_global_scale_users.uid'\
|
||||
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\""'\
|
||||
$' UNION'\
|
||||
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_user_saml_users.displayname'\
|
||||
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_user_saml_users'\
|
||||
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_user_saml_users.uid'\
|
||||
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\"") as users\'' \
|
||||
| sed 's/\\n/\n/g'
|
4
templates/mariadb/mysql.erb.sh
Normal file
4
templates/mariadb/mysql.erb.sh
Normal file
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
pw=$(yq -r '.services.db.environment[0]' /opt/mariadb/docker-compose.yml | awk -F '=' '{print $2}')
|
||||
|
||||
docker exec -ti mariadb_db_1 mysql -u root -p"${pw}" "${@}"
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
result="$(docker exec mariadb_backup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
|
||||
result="$(docker exec -u root mariadb_backup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
|
||||
if [[ "${result}" == "Slave_running ON" ]]; then
|
||||
echo "OK: Replica running"
|
||||
exit 0
|
||||
|
|
|
@ -8,10 +8,12 @@ mkdir -p "${backup_dir}"
|
|||
if [[ -z ${customer} ]]; then
|
||||
buopts="--slave-info --safe-slave-backup"
|
||||
dumpopts="--dump-slave"
|
||||
mysql -p${MYSQL_ROOT_PASSWORD} -e "stop slave"
|
||||
mysql -p"${MYSQL_ROOT_PASSWORD}" -u root -e "stop slave"
|
||||
fi
|
||||
mariadb-backup --backup ${buopts} -u root -p${MYSQL_ROOT_PASSWORD} --stream=xbstream | gzip >"${backup_dir}/${stream_name}"
|
||||
mysqldump --all-databases --single-transaction ${dumpopts} -u root -p${MYSQL_ROOT_PASSWORD} | gzip >"${backup_dir}/${dump_name}"
|
||||
# shellcheck disable=SC2086
|
||||
mariadb-backup --backup ${buopts} -u root -p"${MYSQL_ROOT_PASSWORD}" --stream=xbstream | gzip >"${backup_dir}/${stream_name}"
|
||||
# shellcheck disable=SC2086
|
||||
mysqldump --all-databases --single-transaction ${dumpopts} -u root -p"${MYSQL_ROOT_PASSWORD}" | gzip >"${backup_dir}/${dump_name}"
|
||||
if [[ -z ${customer} ]]; then
|
||||
mysql -p${MYSQL_ROOT_PASSWORD} -e "start slave"
|
||||
mysql -p"${MYSQL_ROOT_PASSWORD}" -u root -e "start slave"
|
||||
fi
|
||||
|
|
|
@ -3,7 +3,7 @@ version: '3.2'
|
|||
services:
|
||||
|
||||
mariadb_backup:
|
||||
image: docker.sunet.se/drive/mariadb
|
||||
image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %>
|
||||
container_name: mariadb_backup_mariadb_backup_1
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
|
|
|
@ -13,11 +13,18 @@ dexec="docker exec ${container}"
|
|||
|
||||
password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}')
|
||||
|
||||
mysql="${dexec} mysql -p${password}"
|
||||
mysql="${dexec} mysql -p${password} -u root"
|
||||
|
||||
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
|
||||
users="${users}
|
||||
$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
|
||||
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_global_scale_users")')" == "1" ]]
|
||||
then
|
||||
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
|
||||
fi
|
||||
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
|
||||
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_user_saml_users")')" == "1" ]]
|
||||
then
|
||||
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_user_saml_users')"
|
||||
fi
|
||||
users="$(echo "${users}" | sort | uniq)"
|
||||
|
||||
project="statistics"
|
||||
bucket="drive-server-coms"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[statistics]
|
||||
type = webdav
|
||||
url = https://89.46.20.42/remote.php/dav/files/_script/
|
||||
url = https://89.45.236.246/remote.php/dav/files/_script/
|
||||
vendor = nextcloud
|
||||
user = _script
|
||||
pass = <%= @statistics_secret %>
|
||||
|
|
|
@ -131,32 +131,6 @@ define host {
|
|||
<% end -%>
|
||||
use monitor-site
|
||||
}
|
||||
<% if site.match('lookup') %>
|
||||
define service {
|
||||
notes_url https://<%= site %>
|
||||
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
|
||||
check_command check_https
|
||||
check_interval 5
|
||||
check_period 24x7
|
||||
<% if @environment == 'prod' %>
|
||||
contacts slack
|
||||
<% else -%>
|
||||
contact_groups naemon-admins
|
||||
<% end -%>
|
||||
host_name <%= site %>
|
||||
max_check_attempts 3
|
||||
notification_interval 60
|
||||
notification_period 24x7
|
||||
retry_interval 1
|
||||
service_description HTTPS
|
||||
<% if site.match('test') -%>
|
||||
servicegroups test-sites
|
||||
<% else -%>
|
||||
servicegroups prod-sites
|
||||
<% end -%>
|
||||
}
|
||||
<% end -%>
|
||||
<% unless site.match('lookup') %>
|
||||
define service {
|
||||
notes_url https://<%= site %>/status.php
|
||||
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
|
||||
|
@ -164,7 +138,6 @@ define service {
|
|||
check_interval 5
|
||||
check_period 24x7
|
||||
<% cur_cust = site.gsub(/\.drive.*/,'') %>
|
||||
<% cur_cust = cur_cust.gsub(/drive.*/,'gss') %>
|
||||
# 'check_nextcloud' command definition
|
||||
<% if @environment == 'prod' and not site.match('test') and @fullnodes.include?(cur_cust) %>
|
||||
contacts slack
|
||||
|
@ -209,4 +182,3 @@ define service {
|
|||
<% end -%>
|
||||
}
|
||||
<% end -%>
|
||||
<% end -%>
|
||||
|
|
|
@ -3,10 +3,12 @@
|
|||
config_php='/var/www/html/config/config.php'
|
||||
dbhost="<%= @dbhost %>"
|
||||
mysql_user_password="<%= @mysql_user_password %>"
|
||||
admin_password="<%= @admin_password %>"
|
||||
location="<%= @location %>"
|
||||
bucket="<%= @s3_bucket %>"
|
||||
customer="<%= @customer %>"
|
||||
echo "Setting temp admin password"
|
||||
apt update && apt install -y apg
|
||||
admin_password="$(apg -m 40 | head -1)"
|
||||
|
||||
/usr/bin/mysql -e "drop database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
|
||||
/usr/bin/mysql -e "create database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
|
||||
|
@ -35,6 +37,11 @@ EOF
|
|||
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
|
||||
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
|
||||
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
|
||||
echo "Now delete the admin user:"
|
||||
echo " occ <container> user:delete admin"
|
||||
echo "and then create a new admin user:"
|
||||
echo " /usr/local/bin/add_admin_user <username> <email address> <container>"
|
||||
echo ""
|
||||
echo "${customer}_instanceid: DEC::PKCS7[${instanceid}]!"
|
||||
echo "${customer}_secret: DEC::PKCS7[${secret}]!"
|
||||
echo "${customer}_passwordsalt: DEC::PKCS7[${passwordsalt}]!"
|
||||
|
|
|
@ -5,7 +5,7 @@ no_files=30 # Keep this many files as an archive, script is run once a week
|
|||
# a specific host, but will differ between hosts
|
||||
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
|
||||
|
||||
for logfile in $(ls /opt/multinode/*/{nextcloud.log,server/server.log}); do
|
||||
for logfile in $(ls /opt/multinode/*/{nextcloud.log,audit.log,server/server.log}); do
|
||||
if [[ -f ${logfile}.gz.${no_files} ]]; then
|
||||
rm ${logfile}.gz.${no_files}
|
||||
fi
|
||||
|
|
|
@ -11,11 +11,10 @@ services:
|
|||
volumes:
|
||||
- <%= @redis_conf_dir %>:/data
|
||||
command: redis-server /data/redis.conf --loglevel verbose
|
||||
restart: always
|
||||
networks:
|
||||
- proxysql_proxysql
|
||||
restart: always
|
||||
|
||||
networks:
|
||||
proxysql_proxysql:
|
||||
external: true
|
||||
|
||||
|
|
|
@ -13,9 +13,9 @@ services:
|
|||
- /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini
|
||||
- <%= @config_php_path %>:/var/www/html/config/config.php
|
||||
- <%= @nextcloud_log_path %>:/var/www/html/data/nextcloud.log
|
||||
- <%= @audit_log_path %>:/var/www/html/data/audit.log
|
||||
- <%= @rclone_conf_path %>:/rclone.conf
|
||||
environment:
|
||||
- NC_PASS=<%= @admin_password%>
|
||||
|
||||
networks:
|
||||
- default
|
||||
- proxysql_proxysql
|
||||
|
@ -25,7 +25,7 @@ services:
|
|||
- 89.32.32.32
|
||||
ports:
|
||||
- <%= @https_port %>:443
|
||||
command: apachectl -D FOREGROUND
|
||||
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
|
||||
tty: true
|
||||
|
||||
networks:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[statistics]
|
||||
type = webdav
|
||||
url = https://89.46.20.42/remote.php/dav/files/_script/
|
||||
url = https://89.45.236.246/remote.php/dav/files/_script/
|
||||
vendor = nextcloud
|
||||
user = _script
|
||||
pass = <%= @statistics_secret %>
|
||||
|
|
23
templates/satosa/docker-compose.yml.erb
Normal file
23
templates/satosa/docker-compose.yml.erb
Normal file
|
@ -0,0 +1,23 @@
|
|||
services:
|
||||
satosa:
|
||||
environment:
|
||||
- "METADATA_DIR=/etc/satosa/metadata"
|
||||
- "WORKER_TIMEOUT=120"
|
||||
dns:
|
||||
- "89.32.32.32"
|
||||
image: "<%= @image %><% if @tag %>:<%= @tag %><% end %>"
|
||||
pull_policy: "always"
|
||||
ports:
|
||||
- "443:8000"
|
||||
volumes:
|
||||
- "/etc/satosa:/etc/satosa"
|
||||
- "/etc/dehydrated:/etc/dehydrated"
|
||||
alwayshttps:
|
||||
environment:
|
||||
- "ACME_URL=http://acme-c.sunet.se"
|
||||
dns:
|
||||
- "89.32.32.32"
|
||||
image: "docker.sunet.se/always-https"
|
||||
pull_policy: "always"
|
||||
ports:
|
||||
- "80:80"
|
|
@ -6,65 +6,78 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
|
|||
number_of_full_to_keep='<%= @full_backup_retention %>'
|
||||
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
|
||||
customer="<%= @customer %>"
|
||||
environment="<%= @environment %>"
|
||||
declare -A extra_backup_jobs
|
||||
|
||||
#<% if @extra_backup_jobs.any? %>
|
||||
#<% @extra_backup_jobs.each do |client, job| %>
|
||||
extra_backup_jobs["<%= client %>"]="<%= job %>"
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
#<% if @location.start_with?('common') %>
|
||||
declare -a sixmonths=('mau')
|
||||
if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
|
||||
number_of_full_to_keep=6
|
||||
number_of_full_to_keep=6
|
||||
fi
|
||||
declare -a projects
|
||||
#<% @singlenodes.each do |singlenode| %>
|
||||
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %>")
|
||||
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %> <%= singlenode %>")
|
||||
#<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %>
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= singlenode %>")
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
#<% else %>
|
||||
declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
|
||||
declare -a projects=("<%= @primary_project %> <%= @mirror_project %> ${customer}")
|
||||
#<% @assigned_projects.each do |project| %>
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> ${customer}")
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
|
||||
if [[ ${customer} == 'common' ]]; then
|
||||
projects+=("<%= @location %> <%= @location %>-mirror")
|
||||
projects+=("<%= @location %> <%= @location %>-mirror ${customer}")
|
||||
fi
|
||||
|
||||
|
||||
function do_backup {
|
||||
local project="${1}"
|
||||
local mirror="${2}"
|
||||
local bucket="${3}"
|
||||
local mirrorbucket="${bucket}-mirror"
|
||||
local mountpoint="/opt/backupmounts/${bucket}"
|
||||
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
|
||||
local oktorun=$? # 1 == this bucket has no other bakup process in progress
|
||||
mkdir -p ${mountpoint}
|
||||
[ ${oktorun} -ne 0 ] && rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
|
||||
rclone mkdir ${mirror}:${mirrorbucket}
|
||||
[ ${oktorun} -ne 0 ] && duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \
|
||||
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
|
||||
umount ${mountpoint}
|
||||
rmdir ${mountpoint}
|
||||
# Clean up
|
||||
[ ${oktorun} -ne 0 ] && duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
|
||||
--force rclone://${mirror}:/${mirrorbucket}
|
||||
local project="${1}"
|
||||
local mirror="${2}"
|
||||
local bucket="${3}"
|
||||
local customer="${4}"
|
||||
local mirrorbucket="${bucket}-mirror"
|
||||
local mountpoint="/opt/backupmounts/${bucket}"
|
||||
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
|
||||
local oktorun=$? # 1 == this bucket has no other bakup process in progress
|
||||
if [[ ${oktorun} -ne 0 ]]; then
|
||||
mkdir -p ${mountpoint}
|
||||
rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
|
||||
rclone mkdir ${mirror}:${mirrorbucket}
|
||||
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
|
||||
if [[ "${extra_backup_jobs[${customer}]:+found}" == "found" ]] && [[ -f "${extra_backup_jobs[${customer}]}" ]]; then
|
||||
${extra_backup_jobs[${customer}]} ${project} ${mirror} ${bucket} ${customer} ${environment}
|
||||
fi
|
||||
umount ${mountpoint}
|
||||
rmdir ${mountpoint}
|
||||
# Clean up
|
||||
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${mirrorbucket}
|
||||
fi
|
||||
}
|
||||
|
||||
for entry in "${projects[@]}"; do
|
||||
project=$(echo ${entry} | awk '{print $1}')
|
||||
mirror=$(echo ${entry} | awk '{print $2}')
|
||||
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
|
||||
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
|
||||
project=$(echo ${entry} | awk '{print $1}')
|
||||
mirror=$(echo ${entry} | awk '{print $2}')
|
||||
customer=$(echo ${entry} | awk '{print $3}')
|
||||
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
|
||||
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
|
||||
if [[ ${?} -eq 124 ]]; then
|
||||
size=$((${fork_limit} * 1000000001))
|
||||
else
|
||||
size=$(echo ${maybesize} | jq -r '.bytes' )
|
||||
fi
|
||||
# If bucket is above 50 GB we fork
|
||||
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
|
||||
do_backup ${project} ${mirror} ${bucket} &
|
||||
else
|
||||
do_backup ${project} ${mirror} ${bucket}
|
||||
fi
|
||||
done
|
||||
# If bucket is above 50 GB we fork
|
||||
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer} &
|
||||
else
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer}
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
|
8
templates/script/backup-hb.erb.sh
Executable file
8
templates/script/backup-hb.erb.sh
Executable file
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
project="${1}"
|
||||
mirror="${2}"
|
||||
bucket="${3}"
|
||||
customer="${4}"
|
||||
environment="${5}"
|
||||
|
||||
rsync -e "ssh -i ${HOME}/.ssh/id_script" -avz "/opt/backupmounts/${bucket}" "sd-${environment}@sd-${environment}-backup.hb.se:~/sd-${environment}/${bucket}"
|
|
@ -20,7 +20,7 @@ for project in $(ls ${data_dir}); do
|
|||
if [[ "${issixmonths}" == "true" ]]; then
|
||||
number_of_full_to_keep=6
|
||||
fi
|
||||
max_num_inc=$((32 * number_of_full_to_keep))
|
||||
max_num_inc=$((50 * number_of_full_to_keep))
|
||||
max_num_full=$((2 * number_of_full_to_keep))
|
||||
|
||||
tabular_data=$(cat "${data_dir}/${project}/${bucket}.dat")
|
||||
|
|
|
@ -7,16 +7,16 @@ include_userbuckets="<%= @include_userbuckets %>"
|
|||
container="nextcloud_app_1"
|
||||
yq="/usr/local/bin/yq"
|
||||
if ! [[ -x ${yq} ]]; then
|
||||
pip install yq
|
||||
pip install yq
|
||||
fi
|
||||
|
||||
declare -a directories
|
||||
if [[ -n ${1} ]]; then
|
||||
directories=("${@}")
|
||||
directories=("${@}")
|
||||
else
|
||||
directories+=("Arbetsmaterial")
|
||||
directories+=("Bevarande")
|
||||
directories+=("Gallringsbart")
|
||||
directories+=("Arbetsmaterial")
|
||||
directories+=("Bevarande")
|
||||
directories+=("Gallringsbart")
|
||||
fi
|
||||
|
||||
olddir="${PWD}"
|
||||
|
@ -25,31 +25,31 @@ dirty=0
|
|||
primary=''
|
||||
declare -a users=( 'admin' )
|
||||
cd "${tempdir}" || echo "Could not cd to tempdir"
|
||||
declare -a projects=( "${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml )
|
||||
declare -a projects=( $("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml) )
|
||||
if [[ "${include_userbuckets}" == "true" ]]; then
|
||||
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
|
||||
projects+=( "${primary}" )
|
||||
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
|
||||
projects+=( "${primary}" )
|
||||
fi
|
||||
for project in "${projects[@]}"; do
|
||||
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do
|
||||
count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
|
||||
if [[ ${count} -gt 0 ]]; then
|
||||
echo "Skipping ${project}:${bucket} because it has stuff in it already"
|
||||
continue
|
||||
fi
|
||||
for directory in "${directories[@]}"; do
|
||||
dirty=1
|
||||
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do
|
||||
count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
|
||||
if [[ ${count} -gt 0 ]]; then
|
||||
echo "Skipping ${project}:${bucket} because it has stuff in it already"
|
||||
continue
|
||||
fi
|
||||
for directory in "${directories[@]}"; do
|
||||
dirty=1
|
||||
if [[ -n ${primary} ]] && [[ ${project} == "${primary}" ]] ; then
|
||||
user=$(echo "${bucket}" | awk -F '-' '{print $1}')
|
||||
users+=( "${user}@${eppn_suffix}" )
|
||||
fi
|
||||
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
|
||||
temp="README.md"
|
||||
echo "**${directory}**" >"${temp}"
|
||||
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
|
||||
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
|
||||
done
|
||||
done
|
||||
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
|
||||
temp="README.md"
|
||||
echo "**${directory}**" >"${temp}"
|
||||
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
|
||||
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
|
||||
done
|
||||
done
|
||||
done
|
||||
cd "${olddir}" || echo "could not cd to home dir"
|
||||
rmdir "${tempdir}"
|
||||
|
|
|
@ -5,7 +5,7 @@ shift
|
|||
include_userbuckets="${1}"
|
||||
shift
|
||||
environment="<%= @environment %>"
|
||||
container="nextcloud-${customer}_app_1"
|
||||
container="nextcloud-${customer}-app-1"
|
||||
|
||||
yq="/usr/local/bin/yq"
|
||||
if ! [[ -x ${yq} ]]; then
|
||||
|
|
|
@ -4,14 +4,14 @@ customer="${1}"
|
|||
multinode="${2}"
|
||||
environment="<%= @environment %>"
|
||||
location="${customer}-${environment}"
|
||||
userjson=$(ssh "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud-${customer}_app_1")
|
||||
userjson=$(ssh "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud-${customer}-app-1")
|
||||
project="statistics"
|
||||
bucket="drive-server-coms"
|
||||
base_dir="${project}:${bucket}"
|
||||
stat_dir="/opt/statistics"
|
||||
customer_dir="${stat_dir}/${location}"
|
||||
mkdir -p "${customer_dir}"
|
||||
rclone mkdir "${base_dir}/${location}"
|
||||
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}"
|
||||
echo "${userjson}" | jq . >"${customer_dir}/users.json"
|
||||
status=${?}
|
||||
if [[ ${status} -eq 0 ]]; then
|
||||
|
|
|
@ -17,7 +17,7 @@ if [[ "${ENVIRONMENT}" == "prod" ]]; then
|
|||
fi
|
||||
if [[ "${CUSTOMER}" == "common" ]]; then
|
||||
customer=""
|
||||
types="multinode gss"
|
||||
types="multinode"
|
||||
fi
|
||||
|
||||
domain="${customer}drive.${env}sunet.se"
|
||||
|
|
|
@ -14,7 +14,7 @@ endpoint = <%= @s3_host_mirror %>
|
|||
acl = private
|
||||
[statistics]
|
||||
type = webdav
|
||||
url = https://89.46.20.42/remote.php/dav/files/_script/
|
||||
url = https://89.45.236.246/remote.php/dav/files/_script/
|
||||
vendor = nextcloud
|
||||
user = _script
|
||||
pass = <%= @statistics_secret %>
|
||||
|
|
|
@ -30,7 +30,10 @@ def add_downtime(fqdn: str,
|
|||
post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, action)
|
||||
headers = {'X-Thruk-Auth-Key': apikey}
|
||||
requests.post(post_url, data=data, headers=headers)
|
||||
try:
|
||||
requests.post(post_url, data=data, headers=headers)
|
||||
except Exception:
|
||||
print("Failed to add downtime for {}".format(fqdn))
|
||||
|
||||
|
||||
def remove_downtime(fqdn: str,
|
||||
|
@ -42,12 +45,15 @@ def remove_downtime(fqdn: str,
|
|||
get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format(
|
||||
monitor_host, fqdn)
|
||||
headers = {'X-Thruk-Auth-Key': apikey}
|
||||
req = requests.get(get_url, headers=headers)
|
||||
action = 'del_active_service_downtimes'
|
||||
for service in req.json()[0]['services']:
|
||||
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, urllib.parse.quote(service), action)
|
||||
requests.post(post_url, headers=headers)
|
||||
try:
|
||||
req = requests.get(get_url, headers=headers)
|
||||
action = 'del_active_service_downtimes'
|
||||
for service in req.json()[0]['services']:
|
||||
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, urllib.parse.quote(service), action)
|
||||
requests.post(post_url, headers=headers)
|
||||
except Exception:
|
||||
print("Failed to remove downtime for {}".format(fqdn))
|
||||
|
||||
|
||||
def main() -> int:
|
||||
|
@ -61,14 +67,10 @@ def main() -> int:
|
|||
reboot_command = ['sudo /usr/local/bin/safer_reboot']
|
||||
|
||||
if customers[0] == "common":
|
||||
customers = ["gss", "lookup", "multinode"]
|
||||
customers = ["multinode"]
|
||||
for customer in customers:
|
||||
backup_type = "backup"
|
||||
if customer == "gss":
|
||||
backup_type = "gssbackup"
|
||||
elif customer == "lookup":
|
||||
backup_type = "lookupbackup"
|
||||
elif customer == "multinode":
|
||||
if customer == "multinode":
|
||||
backup_command = ['sudo /home/script/bin/backup_multinode_db.sh']
|
||||
backup_type = "multinode-db"
|
||||
|
||||
|
|
|
@ -30,7 +30,10 @@ def add_downtime(fqdn: str,
|
|||
post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, action)
|
||||
headers = {'X-Thruk-Auth-Key': apikey}
|
||||
requests.post(post_url, data=data, headers=headers)
|
||||
try:
|
||||
requests.post(post_url, data=data, headers=headers)
|
||||
except Exception:
|
||||
print("Failed to add downtime for: {}".format(fqdn))
|
||||
|
||||
|
||||
def remove_downtime(fqdn: str,
|
||||
|
@ -42,12 +45,15 @@ def remove_downtime(fqdn: str,
|
|||
get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format(
|
||||
monitor_host, fqdn)
|
||||
headers = {'X-Thruk-Auth-Key': apikey}
|
||||
req = requests.get(get_url, headers=headers)
|
||||
action = 'del_active_service_downtimes'
|
||||
for service in req.json()[0]['services']:
|
||||
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, urllib.parse.quote(service), action)
|
||||
requests.post(post_url, headers=headers)
|
||||
try:
|
||||
req = requests.get(get_url, headers=headers)
|
||||
action = 'del_active_service_downtimes'
|
||||
for service in req.json()[0]['services']:
|
||||
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, urllib.parse.quote(service), action)
|
||||
requests.post(post_url, headers=headers)
|
||||
except Exception:
|
||||
print("Failed to remove downtime for: {}".format(fqdn))
|
||||
|
||||
|
||||
def run_command(command: list) -> tuple:
|
||||
|
@ -83,9 +89,8 @@ def main() -> int:
|
|||
server_type = "node"
|
||||
backup_type = "backup"
|
||||
if customer == "common":
|
||||
customer = "gss"
|
||||
server_type = "gss"
|
||||
backup_type = "gssbackup"
|
||||
print("GSS no longer exists, bailing out.")
|
||||
sys.exit(0)
|
||||
|
||||
backup = build_fqdn(customer, environment, 1, backup_type)
|
||||
print("\tRunning backup command at {}".format(backup))
|
||||
|
|
|
@ -83,7 +83,11 @@ alias la='ls -A'
|
|||
alias l='ls -CF'
|
||||
|
||||
if [[ $(hostname) =~ redis ]]; then
|
||||
alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
|
||||
if [ -f /opt/redis/node-0/server.conf ]; then
|
||||
alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
|
||||
else
|
||||
alias redis-cli="redis-cli -a $(grep requirepass /opt/redict/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Alias definitions.
|
31
templates/scriptreceiver/check_max_uptime
Executable file
31
templates/scriptreceiver/check_max_uptime
Executable file
|
@ -0,0 +1,31 @@
|
|||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-w', '--warning',
|
||||
help='Warning threashold',
|
||||
required=True)
|
||||
parser.add_argument('-c', '--critical',
|
||||
help='Critical threashold',
|
||||
required=True)
|
||||
|
||||
args = parser.parse_args()
|
||||
warning = int(args.warning)
|
||||
critical = int(args.critical)
|
||||
|
||||
with open('/proc/uptime', 'r') as f:
|
||||
uptime_seconds = float(f.readline().split()[0])
|
||||
days = int(uptime_seconds / 86400)
|
||||
|
||||
status = "OK"
|
||||
exit = 0
|
||||
if days > warning:
|
||||
status = "WARNING"
|
||||
exit = 1
|
||||
if days > critical:
|
||||
status = "CRITICAL"
|
||||
exit = 2
|
||||
|
||||
print(f"{status}: uptime {days} days | uptime={days};{warning};{critical};")
|
||||
sys.exit(exit)
|
|
@ -11,7 +11,7 @@ function usage {
|
|||
exit 1
|
||||
}
|
||||
|
||||
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*_app_1$ ]]; then
|
||||
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*-app-1$ ]]; then
|
||||
usage
|
||||
fi
|
||||
if ! [[ ${bucket} =~ ^[a-zA-Z0-9]+ ]]; then
|
||||
|
@ -26,7 +26,7 @@ echo "$(date): Start executing create_bucket.sh ${1} ${2} ${3}"
|
|||
|
||||
rclone_config="/opt/nextcloud/rclone.conf"
|
||||
if [[ "${container}" != "nextcloud_app_1" ]]; then
|
||||
customer=$(echo "${container}" | sed -e 's/^nextcloud-//' -e 's/_app_1$//')
|
||||
customer=$(echo "${container}" | sed -e 's/^nextcloud-//' -e 's/-app-1$//')
|
||||
rclone_config="/opt/multinode/${customer}/rclone.conf"
|
||||
fi
|
||||
|
||||
|
@ -38,7 +38,7 @@ preexisting="$(docker exec -u www-data -i "${container}" php --define apc.enable
|
|||
if [[ -z ${preexisting} ]]; then
|
||||
docker exec -u www-data -i "${container}" php --define apc.enable_cli=1 /var/www/html/occ files_external:create "${user_bucket_name}" \
|
||||
amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
|
||||
amazons3::accesskey --user ${user}
|
||||
-c useMultipartCopy=false amazons3::accesskey --user ${user}
|
||||
for shareid in $(docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:export ${user} | jq -r '.[].mount_id'); do
|
||||
docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:option ${shareid} enable_sharing true
|
||||
done
|
||||
|
|
|
@ -7,7 +7,7 @@ bucket=${4}
|
|||
user=${5}
|
||||
/usr/local/bin/occ files_external:create "${bucket}" \
|
||||
amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
|
||||
amazons3::accesskey --user "${user}"
|
||||
-c useMultipartCopy=false amazons3::accesskey --user "${user}"
|
||||
for shareid in $(/usr/local/bin/occ files_external:export "${user}" | jq -r '.[].mount_id'); do
|
||||
/usr/local/bin/occ files_external:option "${shareid}" enable_sharing true
|
||||
done
|
||||
|
|
|
@ -9,7 +9,7 @@ function usage {
|
|||
exit 1
|
||||
}
|
||||
|
||||
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*_app_1$ ]]; then
|
||||
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*-app-1$ ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
container=${1}
|
||||
|
||||
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*_app_1$ ]]; then
|
||||
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*-app-1$ ]]; then
|
||||
echo "Usage: ${0} <nextcloud container name>"
|
||||
echo "Example : ${0} nextcloud_app_1"
|
||||
exit 1
|
||||
|
|
Loading…
Add table
Reference in a new issue