Compare commits
383 commits
Author | SHA1 | Date | |
---|---|---|---|
Micke Nordin | 30952f7448 | ||
Micke Nordin | b769f1637a | ||
Micke Nordin | ce745ff6b0 | ||
Micke Nordin | 1aae63ba07 | ||
Micke Nordin | 5913537c34 | ||
Micke Nordin | 932d91364e | ||
Magnus Andersson | 812a56cf99 | ||
Micke Nordin | d5e88dd431 | ||
Micke Nordin | f27a9c353c | ||
Micke Nordin | bff0aff823 | ||
Micke Nordin | a8060b7774 | ||
Micke Nordin | dac082f4f5 | ||
Micke Nordin | 48f8f6d690 | ||
Micke Nordin | deabba2e22 | ||
Micke Nordin | 22bfeaf244 | ||
Micke Nordin | d6b4098932 | ||
Micke Nordin | 42363a9aaf | ||
Micke Nordin | 4dd95bdb39 | ||
Micke Nordin | 714e2d98f6 | ||
Micke Nordin | 64e3746628 | ||
Micke Nordin | a563e16309 | ||
Micke Nordin | 124203cd92 | ||
Micke Nordin | 6365549250 | ||
Micke Nordin | 9a367b0955 | ||
Micke Nordin | d3d0807d43 | ||
Micke Nordin | cb13190197 | ||
Micke Nordin | 671b5f95df | ||
Micke Nordin | dc8ff6a712 | ||
Micke Nordin | 124c2f6c5d | ||
Micke Nordin | 142fc78f6b | ||
Micke Nordin | 4b4d0626cf | ||
Micke Nordin | e3a729e1a8 | ||
Micke Nordin | 8cae1a668a | ||
Micke Nordin | 7177dfa12e | ||
Micke Nordin | 76640002c0 | ||
Micke Nordin | b8a16bd700 | ||
Micke Nordin | a64b2533bb | ||
Micke Nordin | 6865d6833f | ||
Micke Nordin | 958ff7daa9 | ||
Micke Nordin | c649a7a6b4 | ||
Micke Nordin | ac9e29d39e | ||
Micke Nordin | 56f8a5d149 | ||
Micke Nordin | cb35c3bbf7 | ||
Micke Nordin | b280257882 | ||
Micke Nordin | d3cb9b9e14 | ||
Micke Nordin | 39384f7e40 | ||
Micke Nordin | e0f6d3bafa | ||
Micke Nordin | f531a60514 | ||
Micke Nordin | 8646f1eda7 | ||
Micke Nordin | f5f97e1c3a | ||
Micke Nordin | 81341be6e7 | ||
Micke Nordin | 51ddfa985c | ||
Micke Nordin | 158f982aed | ||
Micke Nordin | a7fe6cecc7 | ||
Micke Nordin | 45ac19bec8 | ||
Micke Nordin | a74899332e | ||
Micke Nordin | d63c586998 | ||
Micke Nordin | beb69036a1 | ||
Micke Nordin | 74af996a76 | ||
Micke Nordin | e76c22f6d5 | ||
Micke Nordin | b065ac7ae6 | ||
Micke Nordin | 019a3d9792 | ||
Micke Nordin | 6224fe3e96 | ||
Micke Nordin | cc2a9e9ebf | ||
Micke Nordin | daa647beb9 | ||
Micke Nordin | 1ecf6c8128 | ||
Micke Nordin | 4111ac7500 | ||
Micke Nordin | 8b0d90e4eb | ||
Micke Nordin | e3a66f95df | ||
Micke Nordin | b096522c36 | ||
Micke Nordin | d7e7e260c2 | ||
Micke Nordin | 52869ca089 | ||
Micke Nordin | 1a39e0dbec | ||
Micke Nordin | e0c06d4571 | ||
Micke Nordin | f29e074ded | ||
Micke Nordin | c761c56052 | ||
Micke Nordin | 026c5c5d03 | ||
Micke Nordin | 7460019ec5 | ||
Micke Nordin | 5c0e05394d | ||
Micke Nordin | f2b8406826 | ||
Micke Nordin | 6d78fdde7c | ||
Micke Nordin | 15beb333ec | ||
Micke Nordin | 8ca506f608 | ||
Micke Nordin | 00d4c96d28 | ||
Micke Nordin | d3e0f41ff1 | ||
Micke Nordin | 8b74bd9c75 | ||
Micke Nordin | 5c16a783e9 | ||
Micke Nordin | 2b590b3f01 | ||
Micke Nordin | e4093f5e40 | ||
Micke Nordin | b573820906 | ||
Micke Nordin | 141f9334f2 | ||
Micke Nordin | 07ddb2220a | ||
Micke Nordin | 722a8bdb68 | ||
Micke Nordin | d832f2a274 | ||
Micke Nordin | ccdb53310d | ||
Micke Nordin | 07d54eba92 | ||
Micke Nordin | 260fd526fe | ||
Micke Nordin | ac917b9811 | ||
Magnus Andersson | e3697719d2 | ||
Magnus Andersson | 766c337cb2 | ||
Magnus Andersson | 24c2fcbdfc | ||
Magnus Andersson | b7e94c2259 | ||
Micke Nordin | cf0a778781 | ||
Micke Nordin | 15a752a65d | ||
Micke Nordin | 911edbed58 | ||
Micke Nordin | b4b099b237 | ||
Micke Nordin | f8dce117a8 | ||
Micke Nordin | 1a63b4884f | ||
Micke Nordin | dbac473c26 | ||
Micke Nordin | 7673aca60e | ||
Micke Nordin | 1822ecd683 | ||
Micke Nordin | 74768985cc | ||
Micke Nordin | e20e41edf8 | ||
Micke Nordin | 663021dc61 | ||
Micke Nordin | 59e137e918 | ||
Micke Nordin | d7c724570f | ||
Micke Nordin | e4ccdde169 | ||
Micke Nordin | 589f62fc0a | ||
Magnus Andersson | 6827c0af7c | ||
Micke Nordin | bff8d8ab22 | ||
Micke Nordin | 2152b5b4a9 | ||
Magnus Andersson | 75c3a77c6d | ||
Magnus Andersson | eac5b172f2 | ||
Magnus Andersson | 05bde6adb6 | ||
Magnus Andersson | d8eaa122e3 | ||
Magnus Andersson | 7402202b9a | ||
Magnus Andersson | 60cb53e631 | ||
Magnus Andersson | c134f0771f | ||
Magnus Andersson | 8acd0c40b8 | ||
Magnus Andersson | 04a3c96250 | ||
Magnus Andersson | 3651afd778 | ||
Micke Nordin | c8051b065a | ||
Micke Nordin | 41dd736b20 | ||
Micke Nordin | 08be347419 | ||
Micke Nordin | 9285c155db | ||
Micke Nordin | 12ca587858 | ||
Micke Nordin | 39b571f3d1 | ||
Micke Nordin | d353069660 | ||
Micke Nordin | 4cf8cf3adb | ||
Micke Nordin | 7f7d38c87e | ||
Micke Nordin | bac25d27c8 | ||
Micke Nordin | 42831948e1 | ||
Micke Nordin | db92a5d255 | ||
Micke Nordin | 1dea001ed9 | ||
Micke Nordin | 4d6e6f67af | ||
Micke Nordin | b20c4c7397 | ||
Micke Nordin | a836417db2 | ||
Micke Nordin | a76397aafd | ||
Micke Nordin | ea51f0b56b | ||
Micke Nordin | 6c8a4f430b | ||
Micke Nordin | 097fce5f87 | ||
Micke Nordin | 5c46f5bc38 | ||
Magnus Andersson | bd1519430f | ||
Magnus Andersson | 5f47ce7fca | ||
Micke Nordin | 45321052de | ||
Micke Nordin | 717350780f | ||
Micke Nordin | 35362991d0 | ||
Micke Nordin | a4d0afe9fd | ||
Micke Nordin | 3ad7d95161 | ||
Micke Nordin | 450f31e2d5 | ||
Micke Nordin | 00a2f1f425 | ||
Micke Nordin | b3d9873176 | ||
Magnus Andersson | bf5abc073e | ||
Magnus Andersson | aa2a869203 | ||
Micke Nordin | 5b7c702573 | ||
Micke Nordin | cbf4086993 | ||
Micke Nordin | 5f74eaaab0 | ||
Micke Nordin | 05814cfd24 | ||
Micke Nordin | 4376aad6e2 | ||
Micke Nordin | c485935f30 | ||
Micke Nordin | 718bc0b496 | ||
Micke Nordin | 472977fde4 | ||
Magnus Andersson | eba9546cdc | ||
Magnus Andersson | a16c9d68e8 | ||
Micke Nordin | 2f548f2431 | ||
Micke Nordin | 47bf118116 | ||
Micke Nordin | 72cf4e570c | ||
Micke Nordin | 51a695ed9e | ||
Micke Nordin | 02454ebd39 | ||
Micke Nordin | cb16f03710 | ||
Micke Nordin | 5ba11977b0 | ||
Micke Nordin | 27d0bc15cc | ||
Micke Nordin | 01fc5a9e86 | ||
Micke Nordin | bd074c73d5 | ||
Micke Nordin | d33f80273d | ||
Micke Nordin | 4a8a4d642c | ||
Micke Nordin | df4fe9985b | ||
Micke Nordin | d13b22e3a2 | ||
Magnus Andersson | c774d67745 | ||
Magnus Andersson | d5c6b9a5fd | ||
Magnus Andersson | 8d2c481569 | ||
Micke Nordin | 36f163414f | ||
Micke Nordin | b5b3885c30 | ||
Micke Nordin | 77ad3ac26d | ||
Micke Nordin | e72c42924a | ||
Magnus Andersson | 069eb7ce93 | ||
Magnus Andersson | d07ed76d36 | ||
Micke Nordin | f626156c4c | ||
Micke Nordin | b4ce0046de | ||
Magnus Andersson | 314149c7f3 | ||
Magnus Andersson | 65abea7daa | ||
Magnus Andersson | 181f2e52fe | ||
Magnus Andersson | f1cac6ce35 | ||
Magnus Andersson | 49386124c3 | ||
Magnus Andersson | cc6f2c8d7b | ||
Magnus Andersson | 3fce5c6fab | ||
Magnus Andersson | 4f6786363b | ||
Micke Nordin | 0837f82d87 | ||
Micke Nordin | 10b0f30155 | ||
Micke Nordin | 922de25fd8 | ||
Micke Nordin | 1a70154efc | ||
Micke Nordin | cbb6ce3aa3 | ||
Micke Nordin | 7f19e0de62 | ||
Micke Nordin | 49243acaa9 | ||
Micke Nordin | 393f2c7034 | ||
Micke Nordin | f10864ff91 | ||
f511383a7a | |||
Micke Nordin | 668c1389d5 | ||
Micke Nordin | 585319b177 | ||
Magnus Andersson | 0d505d2482 | ||
Magnus Andersson | db6bfcc517 | ||
Magnus Andersson | 8aa9c14d35 | ||
Magnus Andersson | 2cdacfe0f5 | ||
c425f90b0f | |||
7dc9667bec | |||
3b82d5b0e3 | |||
f581835f7c | |||
31b1bcb92a | |||
6265472396 | |||
a150d23f1a | |||
b40beeb420 | |||
300e877b3a | |||
Fredrik Kjellman | 8cf9f0eafa | ||
Fredrik Kjellman | 436ea4f1a5 | ||
Micke Nordin | c83e8b2982 | ||
Micke Nordin | a08f473954 | ||
Micke Nordin | 2f629c289c | ||
Micke Nordin | f4eca0b747 | ||
Micke Nordin | 9d5e53c0cc | ||
Micke Nordin | ec679013e3 | ||
Micke Nordin | 7aa4a25dbd | ||
Micke Nordin | 2ba33ad5d6 | ||
Micke Nordin | ed65bb0d94 | ||
Micke Nordin | 689c625b90 | ||
Micke Nordin | df731fdb44 | ||
Micke Nordin | 440bcec0d2 | ||
Micke Nordin | 9e4879d9c8 | ||
Fredrik Kjellman | 3f1ea1b694 | ||
Fredrik Kjellman | 3491a1a5f7 | ||
Richard Freitag | 76bc7edaed | ||
Richard Freitag | 7d25ae0859 | ||
Richard Freitag | 3899dab9b3 | ||
Magnus Andersson | 3cdf7d4084 | ||
Magnus Andersson | 14b6535efd | ||
Micke Nordin | 0148a727ca | ||
Micke Nordin | e06a29a8a1 | ||
Micke Nordin | af873e7133 | ||
Micke Nordin | 65a6b1787d | ||
Micke Nordin | 4e496bcb13 | ||
Micke Nordin | 5c5ed31f13 | ||
Micke Nordin | 21d9a5e3d0 | ||
Micke Nordin | a5f5ddb293 | ||
Micke Nordin | eb0bb65f4a | ||
Micke Nordin | 333d712548 | ||
Micke Nordin | bdd858f76c | ||
Micke Nordin | 03fb0a23ac | ||
Magnus Andersson | 81bb47819c | ||
Magnus Andersson | 9e6321e743 | ||
Magnus Andersson | b1ea687f2d | ||
Richard Freitag | 33d49b7ec0 | ||
Richard Freitag | f988081b39 | ||
Micke Nordin | 7d755324f8 | ||
Micke Nordin | b72315b1a2 | ||
Micke Nordin | 6220d691a3 | ||
Micke Nordin | ec9115df06 | ||
Magnus Andersson | aa1d8710e1 | ||
Magnus Andersson | 6211f3aaea | ||
Micke Nordin | 176338f537 | ||
Micke Nordin | 1e9164dcd4 | ||
Richard Freitag | e39b6cbaed | ||
Richard Freitag | 11607f208c | ||
Richard Freitag | 823e172278 | ||
Magnus Andersson | 6afbcf5beb | ||
Magnus Andersson | ec6ba465f9 | ||
Magnus Andersson | 63631b2b51 | ||
Magnus Andersson | 059bd041e6 | ||
Magnus Andersson | 5df8dba25c | ||
Magnus Andersson | 1e7108297b | ||
Micke Nordin | cdf16db309 | ||
Richard Freitag | 5472b767c8 | ||
Richard Freitag | e990b05401 | ||
Magnus Andersson | ca27f46ffe | ||
Micke Nordin | e57de0ebd0 | ||
Micke Nordin | 28a1b95114 | ||
Micke Nordin | 88ee8c2b67 | ||
Richard Freitag | ade4bd42b3 | ||
Richard Freitag | 4c597442e1 | ||
Richard Freitag | 829f3f01c7 | ||
Micke Nordin | 7198109d56 | ||
Micke Nordin | 8752500f4d | ||
Micke Nordin | 0a09f73f12 | ||
Micke Nordin | f6cca5bcdc | ||
Micke Nordin | 37863c7a17 | ||
Micke Nordin | 63a2b363a7 | ||
Micke Nordin | a21ea6fdd5 | ||
Micke Nordin | 0dea8dbc91 | ||
Micke Nordin | 0e5653aa0c | ||
Micke Nordin | 56291a4165 | ||
Micke Nordin | 3bac4ac9f3 | ||
Micke Nordin | c6a1b2cd66 | ||
Micke Nordin | 39137a4dfc | ||
Micke Nordin | 216c706457 | ||
Micke Nordin | c013b861e9 | ||
Micke Nordin | d5b9bb88fa | ||
Micke Nordin | f606d6c1b6 | ||
Micke Nordin | 4a7af7a049 | ||
Micke Nordin | 3d4025e738 | ||
Micke Nordin | b4b90799e6 | ||
Micke Nordin | a44cdc1bb6 | ||
Micke Nordin | d2ced8d1cd | ||
Magnus Andersson | b9e35dcf92 | ||
Micke Nordin | b5f8abf934 | ||
Micke Nordin | 6d33b53a92 | ||
Micke Nordin | 631f3616c6 | ||
Micke Nordin | 174c72eb70 | ||
Micke Nordin | b989bd3d54 | ||
Micke Nordin | 234d19b652 | ||
Micke Nordin | 22f5dfac62 | ||
Micke Nordin | 1b60d41dab | ||
Micke Nordin | 7207ef2bbf | ||
Micke Nordin | c7bd0fa965 | ||
Micke Nordin | d540c90afa | ||
Micke Nordin | b927716aad | ||
Micke Nordin | ac6430df51 | ||
Micke Nordin | 88b3c1b4fb | ||
Micke Nordin | 5bc0ddfa00 | ||
Micke Nordin | 99c88c0c3b | ||
Micke Nordin | 8bfa2d58a9 | ||
Micke Nordin | 3b58bcd2ae | ||
Micke Nordin | 364a8685a7 | ||
Micke Nordin | 3bd7a44d7a | ||
Micke Nordin | f56a5c6eae | ||
Micke Nordin | feb02cc2cc | ||
Micke Nordin | 58fd21e0aa | ||
Micke Nordin | 1e0879769c | ||
Micke Nordin | 3986e69349 | ||
Micke Nordin | d0ca012129 | ||
Micke Nordin | 753512f90f | ||
Micke Nordin | 598be8c2f1 | ||
Micke Nordin | 1cea9c9173 | ||
Micke Nordin | 6df092764f | ||
Micke Nordin | 735eaf7067 | ||
Micke Nordin | 54de0adb0f | ||
Micke Nordin | 42b57f3655 | ||
Micke Nordin | ed9353198d | ||
Micke Nordin | 1152fd972f | ||
Micke Nordin | 604d485d29 | ||
Micke Nordin | c7072d6dae | ||
Micke Nordin | 3281824eac | ||
Micke Nordin | df1277e04e | ||
Micke Nordin | b15bc5ad03 | ||
Micke Nordin | befc3207d9 | ||
Micke Nordin | b166a683a6 | ||
Micke Nordin | bffb06333d | ||
Micke Nordin | bc329be450 | ||
Micke Nordin | 1e23d134f1 | ||
Micke Nordin | 50dcefdb3d | ||
Micke Nordin | ff3a5c143c | ||
Micke Nordin | e0139b0765 | ||
Micke Nordin | fa93c01846 | ||
Micke Nordin | a9fdb43bf6 | ||
Micke Nordin | 95030efba2 | ||
Micke Nordin | 2119f71008 | ||
Micke Nordin | 08243f555d | ||
Micke Nordin | 61c56fd2f2 | ||
Micke Nordin | b7a26db915 | ||
Micke Nordin | b225e8d364 | ||
Micke Nordin | 0f8f27773b | ||
Micke Nordin | f4ff3fea76 | ||
Micke Nordin | e7ffd6fac3 | ||
Micke Nordin | 8f3becd914 | ||
Micke Nordin | 3531be828e | ||
Micke Nordin | 24b0b1eedc |
|
@ -1,6 +1,6 @@
|
|||
# Lets determin who the customer is by looking at the hostname
|
||||
function sunetdrive::get_customer() >> String {
|
||||
$hostnameparts = split($facts['fqdn'],'\.')
|
||||
$hostnameparts = split($facts['networking']['fqdn'],'\.')
|
||||
if $hostnameparts[1] == 'drive' {
|
||||
if $hostnameparts[0] =~ /^gss/ {
|
||||
return 'gss'
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Lets determin where we are by looking at the hostname
|
||||
function sunetdrive::get_environment() >> String {
|
||||
$hostname = $facts['fqdn']
|
||||
$hostname = $facts['networking']['fqdn']
|
||||
if $hostname =~ /^.*\.drive\.sunet\.se$/ {
|
||||
if $hostname =~ /^.*\.pilot\.drive\.sunet\.se$/ {
|
||||
return 'pilot'
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Lets determin where we are by looking at the hostname
|
||||
function sunetdrive::get_node_number() >> Integer {
|
||||
Integer(regsubst($::fqdn, /^[a-zA-Z\-]+(\d).*$/, '\\1'))
|
||||
Integer(regsubst($facts['networking']['fqdn'], /^[a-zA-Z\-]+(\d).*$/, '\\1'))
|
||||
}
|
||||
|
|
|
@ -5,30 +5,16 @@ define sunetdrive::app_type (
|
|||
$override_config = undef,
|
||||
$override_compose = undef
|
||||
) {
|
||||
include sunet::packages::netcat_openbsd
|
||||
# Config from group.yaml and customer specific conf
|
||||
$environment = sunetdrive::get_environment()
|
||||
$customer = sunetdrive::get_customer()
|
||||
$nodenumber = sunetdrive::get_node_number()
|
||||
|
||||
# Common settings for multinode and full nodes
|
||||
$nextcloud_ip = $config['app']
|
||||
$s3_bucket = $config['s3_bucket']
|
||||
$s3_host = $config['s3_host']
|
||||
$site_name = $config['site_name']
|
||||
$trusted_domains = $config['trusted_domains']
|
||||
$trusted_proxies = $config['trusted_proxies']
|
||||
|
||||
# These are encrypted values from local.eyaml
|
||||
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||
$smtppassword = safe_hiera('smtp_password')
|
||||
|
||||
$is_multinode = (($override_config != undef) and ($override_compose != undef))
|
||||
if $is_multinode {
|
||||
# The config used
|
||||
$config = $override_config
|
||||
# Other settings
|
||||
$redis_host = $config['redis_host']
|
||||
$admin_password = $config[ 'admin_password' ]
|
||||
$dbhost = $config[ 'dbhost' ]
|
||||
$dbname = $config[ 'dbname' ]
|
||||
$dbuser = $config[ 'dbuser' ]
|
||||
|
@ -39,25 +25,11 @@ define sunetdrive::app_type (
|
|||
$s3_key = $config[ 's3_key' ]
|
||||
$s3_secret = $config[ 's3_secret' ]
|
||||
$secret = $config[ 'secret' ]
|
||||
$session_save_handler = 'redis'
|
||||
$session_save_path = "tcp://${redis_host}:6379?auth=${redis_host_password}"
|
||||
} else {
|
||||
# The config used
|
||||
$config = hiera_hash($environment)
|
||||
$skeletondirectory = $config['skeletondirectory']
|
||||
# Other settings
|
||||
$redis_seeds = [
|
||||
{'host' => "redis1.${site_name}", 'port' => 6379},
|
||||
{'host' => "redis2.${site_name}", 'port' => 6379},
|
||||
{'host' => "redis3.${site_name}", 'port' => 6379},
|
||||
{'host' => "redis1.${site_name}", 'port' => 6380},
|
||||
{'host' => "redis2.${site_name}", 'port' => 6380},
|
||||
{'host' => "redis3.${site_name}", 'port' => 6380},
|
||||
{'host' => "redis1.${site_name}", 'port' => 6381},
|
||||
{'host' => "redis2.${site_name}", 'port' => 6381},
|
||||
{'host' => "redis3.${site_name}", 'port' => 6381},
|
||||
]
|
||||
$admin_password = safe_hiera('admin_password')
|
||||
$dbhost = 'proxysql_proxysql_1'
|
||||
$dbname = 'nextcloud'
|
||||
$dbuser = 'nextcloud'
|
||||
|
@ -69,13 +41,33 @@ define sunetdrive::app_type (
|
|||
$s3_key = safe_hiera('s3_key')
|
||||
$s3_secret = safe_hiera('s3_secret')
|
||||
$secret = safe_hiera('secret')
|
||||
$session_save_handler = 'rediscluster'
|
||||
$session_save_path = "seed[]=${redis_seeds[0]['host']}:${redis_seeds[0]['port']}&seed[]=${redis_seeds[1]['host']}:${redis_seeds[1]['port']}&seed[]=${redis_seeds[2]['host']}:${redis_seeds[2]['port']}&seed[]=${redis_seeds[3]['host']}:${redis_seeds[3]['port']}&seed[]=${redis_seeds[4]['host']}:${redis_seeds[4]['port']}&seed[]=${redis_seeds[5]['host']}:${redis_seeds[6]['port']}&seed[]=${redis_seeds[7]['host']}:${redis_seeds[7]['port']}&seed[]=${redis_seeds[8]['host']}:${redis_seeds[8]['port']}&timeout=2&read_timeout=2&failover=error&persistent=1&auth=${redis_cluster_password}&stream[verify_peer]=0"
|
||||
}
|
||||
$twofactor_enforced_groups = hiera_array('twofactor_enforced_groups')
|
||||
$twofactor_enforced_excluded_groups = hiera_array('twofactor_enforced_excluded_groups')
|
||||
$twofactor_enforced_groups = $config['twofactor_enforced_groups']
|
||||
$twofactor_enforced_excluded_groups = $config['twofactor_enforced_excluded_groups']
|
||||
$nextcloud_version = hiera("nextcloud_version_${environment}")
|
||||
$nextcloud_version_string = split($nextcloud_version, '[-]')[0]
|
||||
# Common settings for multinode and full nodes
|
||||
$nextcloud_ip = $config['app']
|
||||
$redis_host = $config['redis_host']
|
||||
$s3_bucket = $config['s3_bucket']
|
||||
$s3_host = $config['s3_host']
|
||||
$site_name = $config['site_name']
|
||||
$trusted_domains = $config['trusted_domains']
|
||||
$trusted_proxies = $config['trusted_proxies']
|
||||
if $location == 'kau-prod' {
|
||||
$php_memory_limit_mb = 2048
|
||||
} else {
|
||||
$php_memory_limit_mb = 512
|
||||
}
|
||||
if $::facts['dockerhost2'] == 'yes' {
|
||||
$hostnet = true
|
||||
}
|
||||
|
||||
# These are encrypted values from local.eyaml
|
||||
|
||||
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||
$smtppassword = safe_hiera('smtp_password')
|
||||
|
||||
#These are global values from common.yaml
|
||||
$gs_enabled = hiera('gs_enabled')
|
||||
$gs_federation = hiera('gs_federation')
|
||||
|
@ -99,14 +91,6 @@ define sunetdrive::app_type (
|
|||
$lb_servers = hiera_hash($environment)['lb_servers']
|
||||
$document_servers = hiera_hash($environment)['document_servers']
|
||||
|
||||
file { '/opt/nextcloud/nce.ini':
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/nce.ini.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
unless $is_multinode{
|
||||
user { 'www-data': ensure => present, system => true }
|
||||
|
||||
|
@ -150,14 +134,28 @@ define sunetdrive::app_type (
|
|||
group => 'root',
|
||||
}
|
||||
file { '/usr/local/bin/upgrade23-25.sh':
|
||||
ensure => absent,
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/upgrade23-25.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/remount_user_bucket_as_project.sh':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/remount_user_bucket_as_project.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/opt/rotate/conf.d/nextcloud.conf':
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => "#This file is managed by puppet\n#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n",
|
||||
content => "#This file is managed by puppet
|
||||
#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n/opt/nextcloud/audit.log:180:256\n",
|
||||
mode => '0644',
|
||||
}
|
||||
file { '/opt/rotate/conf.d/redis.conf':
|
||||
|
@ -206,7 +204,14 @@ define sunetdrive::app_type (
|
|||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
mode => '0640',
|
||||
}
|
||||
file { '/opt/nextcloud/audit.log':
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
}
|
||||
file { '/opt/nextcloud/rclone.conf':
|
||||
ensure => file,
|
||||
|
@ -215,6 +220,32 @@ define sunetdrive::app_type (
|
|||
content => template('sunetdrive/application/rclone.conf.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
file { '/opt/nextcloud/apache.php.ini':
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/apache.php.ini.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
file { '/opt/nextcloud/apcu.ini':
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/apcu.ini.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
file { '/opt/nextcloud/cli.php.ini':
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/cli.php.ini.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
file { '/usr/local/bin/migrate_external_mounts':
|
||||
ensure => file,
|
||||
force => true,
|
||||
|
|
|
@ -38,8 +38,8 @@ define sunetdrive::cloudimage(
|
|||
resolver => ['130.242.80.14', '130.242.80.99'],
|
||||
search => $search,
|
||||
#
|
||||
repo => $::cosmos_repo_origin_url,
|
||||
tagpattern => $::cosmos_tag_pattern,
|
||||
repo => $facts['cosmos_repo_origin_url'],
|
||||
tagpattern => $facts['cosmos_tag_pattern'],
|
||||
#
|
||||
cpus => $cpus,
|
||||
memory => $memory,
|
||||
|
|
53
manifests/common.pp
Normal file
53
manifests/common.pp
Normal file
|
@ -0,0 +1,53 @@
|
|||
|
||||
# Common class
|
||||
class sunetdrive::common {
|
||||
include sunet::tools
|
||||
include sunet::motd
|
||||
include apt
|
||||
include apparmor
|
||||
include sunet::packages::jq
|
||||
if $::facts['sunet_nftables_enabled'] != 'yes' {
|
||||
warning('Enabling UFW')
|
||||
include ufw
|
||||
} else {
|
||||
if $facts['networking']['hostname'] =~ /^kube[wc]/ {
|
||||
warning('Setting nftables to installed but disabled')
|
||||
ensure_resource ('class','sunet::nftables::init', { enabled => false })
|
||||
} else {
|
||||
warning('Enabling nftables')
|
||||
ensure_resource ('class','sunet::nftables::init', { })
|
||||
}
|
||||
}
|
||||
package {'sysstat': ensure => 'latest'}
|
||||
package {'needrestart': ensure => installed}
|
||||
service {'sysstat': provider => 'systemd'}
|
||||
file_line { 'enable_sa':
|
||||
ensure => 'present',
|
||||
line => 'ENABLED="true"',
|
||||
path => '/etc/default/sysstat',
|
||||
match => 'ENABLED="false"',
|
||||
require => Package['sysstat'],
|
||||
}
|
||||
file_line { 'sa_cron_comment':
|
||||
ensure => 'present',
|
||||
line => '# Activity reports every 2 minutes everyday',
|
||||
path => '/etc/cron.d/sysstat',
|
||||
match => '^#\ Activity\ reports\ every\ 10\ minutes\ everyday',
|
||||
require => Package['sysstat'],
|
||||
notify => Service['sysstat'],
|
||||
}
|
||||
file_line { 'sa_cron':
|
||||
ensure => 'present',
|
||||
line => '*/2 * * * * root command -v debian-sa1 > /dev/null && debian-sa1 1 1',
|
||||
path => '/etc/cron.d/sysstat',
|
||||
match => '^5-55/10',
|
||||
require => Package['sysstat'],
|
||||
notify => Service['sysstat'],
|
||||
}
|
||||
if $::facts['os']['distro']['id'] == 'Debian' {
|
||||
exec { 'sysstat_systemd_timer':
|
||||
command => 'systemctl enable --now sysstat-collect.timer',
|
||||
unless => 'systemctl is-enabled --quiet sysstat-collect.timer',
|
||||
}
|
||||
}
|
||||
}
|
|
@ -4,53 +4,47 @@ define sunetdrive::db_type(
|
|||
$bootstrap=undef,
|
||||
$location=undef,
|
||||
$override_config = undef,
|
||||
$override_compose = undef)
|
||||
$override_compose = undef,
|
||||
)
|
||||
{
|
||||
|
||||
# Config from group.yaml
|
||||
$environment = sunetdrive::get_environment()
|
||||
$mariadb_version = hiera("mariadb_version_${environment}")
|
||||
$is_multinode = (($override_config != undef) and ($override_compose != undef))
|
||||
if $is_multinode {
|
||||
$config = $override_config
|
||||
$mysql_root_password = $config['mysql_root_password']
|
||||
$mysql_user_password = $config['mysql_user_password']
|
||||
$backup_password = $config['backup_password']
|
||||
$mariadb_dir = $config['mariadb_dir']
|
||||
$mycnf_path = $config['mycnf_path']
|
||||
$server_id = '1000'
|
||||
} else {
|
||||
$config = hiera_hash($environment)
|
||||
$mysql_root_password = safe_hiera('mysql_root_password')
|
||||
$backup_password = safe_hiera('backup_password')
|
||||
$proxysql_password = safe_hiera('proxysql_password')
|
||||
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||
$mariadb_dir = '/etc/mariadb'
|
||||
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
|
||||
$server_id = 1000 + Integer($facts['hostname'][-1])
|
||||
ensure_resource('file',"${mariadb_dir}", { ensure => directory, recurse => true } )
|
||||
$dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
|
||||
$dirs.each |$dir| {
|
||||
ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
|
||||
}
|
||||
$config = hiera_hash($environment)
|
||||
$mysql_root_password = safe_hiera('mysql_root_password')
|
||||
$backup_password = safe_hiera('backup_password')
|
||||
$proxysql_password = safe_hiera('proxysql_password')
|
||||
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||
$mariadb_dir = '/etc/mariadb'
|
||||
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
|
||||
$server_id = 1000 + Integer($facts['networking']['hostname'][-1])
|
||||
ensure_resource('file',$mariadb_dir, { ensure => directory, recurse => true } )
|
||||
$dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
|
||||
$dirs.each |$dir| {
|
||||
ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
|
||||
}
|
||||
|
||||
$nextcloud_ip = $config['app']
|
||||
|
||||
unless $is_multinode {
|
||||
$db_ip = $config['db']
|
||||
$db_ipv6 = $config['db_v6']
|
||||
$backup_ip = $config['backup']
|
||||
$backup_ipv6 = $config['backup_v6']
|
||||
$ports = [3306, 4444, 4567, 4568]
|
||||
|
||||
sunet::misc::ufw_allow { 'mariadb_ports':
|
||||
from => $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6,
|
||||
port => $ports,
|
||||
}
|
||||
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
|
||||
$db_ip = $config['db']
|
||||
$db_ipv6 = $config['db_v6']
|
||||
$backup_ip = $config['backup']
|
||||
$backup_ipv6 = $config['backup_v6']
|
||||
$ports = [3306, 4444, 4567, 4568]
|
||||
if $location =~ /^multinode/ {
|
||||
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['kube'] + $config['kube_v6']
|
||||
} elsif $location == 'sunet-test' or $location == 'sunet-prod' {
|
||||
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6']
|
||||
} else {
|
||||
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6
|
||||
}
|
||||
|
||||
sunet::misc::ufw_allow { 'mariadb_ports':
|
||||
from => $from,
|
||||
port => $ports,
|
||||
}
|
||||
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
|
||||
|
||||
|
||||
if $location =~ /^lookup/ {
|
||||
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '05-lookup.sql']
|
||||
|
@ -90,45 +84,46 @@ define sunetdrive::db_type(
|
|||
mode => '0744',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'purge_binlogs':
|
||||
cmd => "/usr/local/bin/purge-binlogs",
|
||||
cmd => '/usr/local/bin/purge-binlogs',
|
||||
hour => '6',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
if $is_multinode {
|
||||
$docker_compose = $override_compose
|
||||
} else {
|
||||
file { '/usr/local/bin/size-test':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/size-test.erb'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/status-test':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/status-test.erb'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { "/etc/sudoers.d/99-size-test":
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/size-test\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { "/etc/sudoers.d/99-status-test":
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
$docker_compose = sunet::docker_compose { 'drive_mariadb_docker_compose':
|
||||
content => template('sunetdrive/mariadb/docker-compose_mariadb.yml.erb'),
|
||||
service_name => 'mariadb',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Mariadb server',
|
||||
}
|
||||
file { '/usr/local/bin/mysql':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/mysql.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/size-test':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/size-test.erb'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/status-test':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/status-test.erb'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/etc/sudoers.d/99-size-test':
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/size-test\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/etc/sudoers.d/99-status-test':
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
$docker_compose = sunet::docker_compose { 'drive_mariadb_docker_compose':
|
||||
content => template('sunetdrive/mariadb/docker-compose_mariadb.yml.erb'),
|
||||
service_name => 'mariadb',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Mariadb server',
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
include apt
|
||||
# Wrapper for sunet::dockerhost to do thiss specific things
|
||||
class sunetdrive::dockerhost(
|
||||
String $version = safe_hiera('docker_version'),
|
||||
|
@ -8,6 +7,8 @@ class sunetdrive::dockerhost(
|
|||
String $docker_args = '',
|
||||
Optional[String] $docker_dns = undef,
|
||||
String $storage_driver = 'aufs',
|
||||
Boolean $write_daemon_config = false,
|
||||
Boolean $enable_ipv6 = false,
|
||||
) {
|
||||
if $version == 'NOT_SET_IN_HIERA' {
|
||||
fail('Docker version not set in Hiera')
|
||||
|
@ -26,6 +27,8 @@ class sunetdrive::dockerhost(
|
|||
storage_driver => $storage_driver,
|
||||
docker_network => true, # let docker choose a network for the 'docker' bridge
|
||||
compose_version => $compose_version,
|
||||
write_daemon_config => $write_daemon_config,
|
||||
enable_ipv6 => $enable_ipv6,
|
||||
}
|
||||
file { '/etc/apt/preferences.d/containerd.io.pref':
|
||||
ensure => absent,
|
||||
|
|
|
@ -4,7 +4,7 @@ class sunetdrive::infra_script (
|
|||
$location = undef
|
||||
) {
|
||||
$environment = sunetdrive::get_environment()
|
||||
$customer = "common"
|
||||
$customer = 'common'
|
||||
$config = hiera_hash($environment)
|
||||
$gss_backup_server = $config['gss_backup_server']
|
||||
$lookup_backup_server = $config['lookup_backup_server']
|
||||
|
|
38
manifests/jupyter_site.pp
Normal file
38
manifests/jupyter_site.pp
Normal file
|
@ -0,0 +1,38 @@
|
|||
#Class for SUNET-Drive-portal-Server
|
||||
class sunetdrive::jupyter_site (
|
||||
String $site_version = '0.0.1-1'
|
||||
) {
|
||||
|
||||
$domain = 'jupyter.sunet.dev'
|
||||
sunet::docker_compose { 'portal_compose':
|
||||
content => template('sunetdrive/jupyter_site/docker-compose.erb.yaml'),
|
||||
service_name => 'jupyter_site',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Web server',
|
||||
}
|
||||
|
||||
exec { 'workaround_docker_compose_dir':
|
||||
command => 'mkdir -p /opt/jupyter_site/nginx',
|
||||
unless => 'test -d /opt/jupyter_site/nginx',
|
||||
}
|
||||
|
||||
$nginx_dirs = ['acme', 'certs', 'conf', 'dhparam', 'html', 'vhost']
|
||||
$nginx_dirs.each | $dir| {
|
||||
file { "/opt/jupyter_site/nginx/${dir}":
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0751',
|
||||
}
|
||||
}
|
||||
sunet::misc::ufw_allow { 'https':
|
||||
from => '0.0.0.0/0',
|
||||
port => 443,
|
||||
}
|
||||
# For acme and redirect
|
||||
sunet::misc::ufw_allow { 'http':
|
||||
from => '0.0.0.0/0',
|
||||
port => 80,
|
||||
}
|
||||
}
|
|
@ -5,22 +5,27 @@ class sunetdrive::lookup (
|
|||
) {
|
||||
|
||||
$environment = sunetdrive::get_environment()
|
||||
$config = lookup($environment, undef, undef, undef)
|
||||
|
||||
$public_url = "https://${config['site_name']}"
|
||||
|
||||
|
||||
# Firewall settings
|
||||
$nextcloud_ip = hiera_array("${location}_app", [])
|
||||
$tug_office = hiera_array('tug_office')
|
||||
|
||||
$dbhost = 'proxysql_proxysql_1'
|
||||
$dbhost = '127.0.0.1'
|
||||
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||
$replication_auth = safe_hiera('replication_auth')
|
||||
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||
$lookup_version = hiera("lookup_version_${environment}")
|
||||
$email_sender = $config['email_sender']
|
||||
|
||||
#Create users
|
||||
user { 'www-data': ensure => present, system => true }
|
||||
|
||||
file { '/opt/lookup/config.php':
|
||||
ensure => present,
|
||||
ensure => file,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/lookup/config.php.erb'),
|
||||
|
|
|
@ -4,10 +4,10 @@ class sunetdrive::mariadb (
|
|||
$location = undef,
|
||||
$tag_mariadb = undef,
|
||||
$override_config = undef,
|
||||
$override_compose = undef
|
||||
$override_compose = undef,
|
||||
) {
|
||||
|
||||
$quorum_id = $::fqdn
|
||||
$quorum_id = $facts['networking']['fqdn']
|
||||
$quorum_password = safe_hiera('quorum_password')
|
||||
$db = sunetdrive::db_type { 'base_db':
|
||||
bootstrap => $bootstrap,
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# This is a asyncronous replica of the Maria DB Cluster for SUNET Drive
|
||||
class sunetdrive::mariadb_backup($tag_mariadb=undef, $location=undef) {
|
||||
include sunet::packages::netcat_openbsd
|
||||
$dirs = [ 'datadir', 'init', 'conf', 'backups' ]
|
||||
$dirs.each | $dir | {
|
||||
ensure_resource('file',"/opt/mariadb_backup/${dir}", { ensure => directory, recurse => true } )
|
||||
|
|
|
@ -5,7 +5,7 @@ class sunetdrive::multinode (
|
|||
)
|
||||
{
|
||||
include sunet::packages::yq
|
||||
$myname = $facts['hostname']
|
||||
$myname = $facts['networking']['hostname']
|
||||
$is_multinode = true;
|
||||
$environment = sunetdrive::get_environment()
|
||||
$lb_servers = hiera_hash($environment)['lb_servers']
|
||||
|
@ -14,6 +14,8 @@ class sunetdrive::multinode (
|
|||
$db_ip = hiera_hash($environment)['db']
|
||||
$admin_password = hiera('admin_password')
|
||||
$cluster_admin_password = hiera('cluster_admin_password')
|
||||
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
|
||||
$gss_mode = hiera('gss_mode')
|
||||
|
||||
$twofactor_enforced_groups = []
|
||||
$twofactor_enforced_excluded_groups = []
|
||||
|
@ -209,14 +211,16 @@ MACAddressPolicy=none'
|
|||
hour => '0',
|
||||
weekday => '0',
|
||||
}
|
||||
if $nodenumber == '2' {
|
||||
cron { 'add_back_bucket_for_karin_nordgren':
|
||||
command => '(/usr/local/bin/occ nextcloud-kmh_app_1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh_app_1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true',
|
||||
user => 'root',
|
||||
minute => '*/10',
|
||||
}
|
||||
}
|
||||
# if $nodenumber == '2' {
|
||||
# cron { 'add_back_bucket_for_karin_nordgren':
|
||||
# command => '(/usr/local/bin/occ nextcloud-kmh_app_1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh_app_1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true',
|
||||
# user => 'root',
|
||||
# minute => '*/10',
|
||||
# }
|
||||
# }
|
||||
$customers.each | $index, $customer | {
|
||||
$customer_config_full = hiera_hash($customer)
|
||||
$customer_config = $customer_config_full[$environment]
|
||||
cron { "multinode_cron_${customer}":
|
||||
command => "/opt/nextcloud/cron.sh nextcloud-${customer}_app_1",
|
||||
require => File['/opt/nextcloud/cron.sh'],
|
||||
|
@ -224,11 +228,19 @@ MACAddressPolicy=none'
|
|||
minute => '*/10',
|
||||
}
|
||||
if $environment == 'prod' {
|
||||
$s3_bucket = "primary-${customer}-drive.sunet.se"
|
||||
if 'primary_bucket' in $customer_config.keys() {
|
||||
$s3_bucket = $customer_config['primary_bucket']
|
||||
} else {
|
||||
$s3_bucket = "primary-${customer}-drive.sunet.se"
|
||||
}
|
||||
$site_name = "${customer}.drive.sunet.se"
|
||||
$trusted_proxies = ['lb1.drive.sunet.se','lb2.drive.sunet.se', 'lb3.drive.sunet.se', 'lb4.drive.sunet.se']
|
||||
} else {
|
||||
$s3_bucket = "primary-${customer}-${environment}.sunet.se"
|
||||
if 'primary_bucket' in $customer_config.keys() {
|
||||
$s3_bucket = $customer_config['primary_bucket']
|
||||
} else {
|
||||
$s3_bucket = "primary-${customer}-${environment}.sunet.se"
|
||||
}
|
||||
$site_name = "${customer}.drive.${environment}.sunet.se"
|
||||
$trusted_proxies = ["lb1.drive.${environment}.sunet.se","lb2.drive.${environment}.sunet.se",
|
||||
"lb3.drive.${environment}.sunet.se","lb4.drive.${environment}.sunet.se"]
|
||||
|
@ -237,8 +249,6 @@ MACAddressPolicy=none'
|
|||
$apache_error_path = "/opt/multinode/${customer}/404.html"
|
||||
$config_php_path = "/opt/multinode/${customer}/config.php"
|
||||
$cron_log_path ="/opt/multinode/${customer}/cron.log"
|
||||
$customer_config_full = hiera_hash($customer)
|
||||
$customer_config = $customer_config_full[$environment]
|
||||
|
||||
$dbhost = 'proxysql_proxysql_1'
|
||||
$dbname = "nextcloud_${customer}"
|
||||
|
@ -254,7 +264,12 @@ MACAddressPolicy=none'
|
|||
$mail_from_address = hiera("mail_from_address_${environment}")
|
||||
$mail_smtphost = hiera("mail_smtphost_${environment}")
|
||||
$nextcloud_log_path ="/opt/multinode/${customer}/nextcloud.log"
|
||||
$nextcloud_version = hiera("nextcloud_version_${environment}")
|
||||
$audit_log_path ="/opt/multinode/${customer}/audit.log"
|
||||
if $customer_config['nextcloud_version'] {
|
||||
$nextcloud_version = $customer_config['nextcloud_version']
|
||||
} else {
|
||||
$nextcloud_version = hiera("nextcloud_version_${environment}")
|
||||
}
|
||||
$nextcloud_version_string = split($nextcloud_version, '[-]')[0]
|
||||
$rclone_conf_path = "/opt/multinode/${customer}/rclone.conf"
|
||||
$redis_conf_dir = "/opt/multinode/${customer}/server"
|
||||
|
@ -263,8 +278,14 @@ MACAddressPolicy=none'
|
|||
$s3_host = $customer_config['s3_host']
|
||||
$s3_usepath = hiera('s3_usepath')
|
||||
$smtpuser = hiera("smtp_user_${environment}")
|
||||
$trusted_domains = [$site_name, $facts['fqdn'], 'localhost']
|
||||
$trusted_domains = [$site_name, $facts['networking']['fqdn'], 'localhost']
|
||||
$tug_office = hiera_array('tug_office')
|
||||
if $customer_config['twofactor_enforced_groups'] {
|
||||
$twofactor_enforced_groups = $customer_config['twofactor_enforced_groups']
|
||||
}
|
||||
if $customer_config['twofactor_enforced_excluded_groups'] {
|
||||
$twofactor_enforced_excluded_groups = $customer_config['twofactor_enforced_excluded_groups']
|
||||
}
|
||||
|
||||
# Secrets from local.eyaml
|
||||
$admin_password = safe_hiera("${customer}_admin_password")
|
||||
|
@ -364,6 +385,13 @@ MACAddressPolicy=none'
|
|||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
file { $audit_log_path:
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
file { $rclone_conf_path:
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
|
|
|
@ -14,4 +14,58 @@ class sunetdrive::multinode_db(){
|
|||
mode => '0744',
|
||||
}
|
||||
}
|
||||
if $facts["networking"]["fqdn"] =~ /^multinode-db1\.drive\.(test\.){1}sunet\.se$/ {
|
||||
$statistics_secret = safe_hiera('statistics_secret')
|
||||
notify { 'hostmessage':
|
||||
message => 'We are on multinode-db1. Set up statistics environment.',
|
||||
}
|
||||
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
|
||||
$local_path = '/tmp/rclone-current-linux-amd64.deb'
|
||||
exec { 'rclone_deb':
|
||||
command => "/usr/bin/wget -q ${rclone_url} -O ${local_path}",
|
||||
creates => $local_path,
|
||||
}
|
||||
package { 'rclone':
|
||||
ensure => installed,
|
||||
provider => dpkg,
|
||||
source => $local_path,
|
||||
require => Exec['rclone_deb'],
|
||||
}
|
||||
file { '/root/.rclone.conf':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb_backup/rclone.conf.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
}
|
||||
file { '/root/tasks/listusersbydep.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb/listusersdep.sh.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/tasks/genusersondepartmentlists.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb/genuserdeplists.sh.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file {'/opt/mariadb/statistics/custdata.json':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb/custconfig.json.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'genuserdeplists':
|
||||
cmd => '/root/tasks/genusersondepartmentlists.sh',
|
||||
hour => '2',
|
||||
minute => '5',
|
||||
ok_criteria => ['exit_status=0','max_age=30h'],
|
||||
warn_criteria => ['exit_status=1', 'max_age=60h'],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
79
manifests/nrpe.pp
Normal file
79
manifests/nrpe.pp
Normal file
|
@ -0,0 +1,79 @@
|
|||
|
||||
# NRPE class
|
||||
class sunetdrive::nrpe(
|
||||
$loadw = '15,10,5',
|
||||
$loadc = '30,25,20',
|
||||
$procsw = 150,
|
||||
$procsc = 200,
|
||||
) {
|
||||
|
||||
require apt
|
||||
class { 'sunet::nagios':
|
||||
command_timeout => 600,
|
||||
loadw => $loadw,
|
||||
loadc => $loadc,
|
||||
procsw => $procsw,
|
||||
procsc => $procsc,
|
||||
}
|
||||
package {'nagios-plugins-contrib': ensure => latest}
|
||||
if ($facts['os']['name'] == 'Ubuntu' and versioncmp($facts['os']['release']['full'], '22.04') >= 0 ){
|
||||
$mem_w = '90'
|
||||
$mem_c = '95'
|
||||
} else {
|
||||
$mem_w = '10'
|
||||
$mem_c = '5'
|
||||
}
|
||||
$checks = ['nrpe_check_memory']
|
||||
$checks.each |$check| {
|
||||
ensure_resource("sunet::nagios::${check}", "nagios-nrpe-${check}")
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_entropy':
|
||||
command_line => '/usr/lib/nagios/plugins/check_entropy -w 256'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_ntp_time':
|
||||
command_line => '/usr/lib/nagios/plugins/check_ntp_time -H ntp.se'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_scriptherder':
|
||||
command_line => '/usr/local/bin/scriptherder --mode check'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_needrestart_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_needrestart_check',
|
||||
command_line => '/usr/sbin/needrestart -p -l'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_galera_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_galera_check',
|
||||
command_line => '/usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_galera_cluster':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_proxysql_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_proxysql_check',
|
||||
command_line => '/usr/lib/nagios/plugins/check_proxysql_server, /usr/lib/nagios/plugins/check_mysql_server_status'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_proxysql_server':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_proxysql_server'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_replication_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_replication_check',
|
||||
command_line => '/usr/local/bin/check_replication'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_async_replication':
|
||||
command_line => '/usr/bin/sudo /usr/local/bin/check_replication'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_backup_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_backup_check',
|
||||
command_line => '/usr/local/bin/check_backups'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_backups':
|
||||
command_line => '/usr/bin/sudo /usr/local/bin/check_backups'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_mysql_server_status':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_mysql_server_status'
|
||||
}
|
||||
}
|
|
@ -3,6 +3,8 @@ class sunetdrive::onlyoffice () {
|
|||
$environment = sunetdrive::get_environment()
|
||||
$extra_hosts = hiera_hash($environment)['extra_hosts']
|
||||
$docker_tag = hiera_hash($environment)['collabora_tag']
|
||||
$token_name = 'Sunet'
|
||||
$access_token = safe_hiera('collabora_access_token')
|
||||
$customers = hiera('fullnodes')
|
||||
$multinode_customers = keys(hiera_hash('multinode_mapping'))
|
||||
if $environment == 'prod' {
|
||||
|
@ -10,14 +12,18 @@ class sunetdrive::onlyoffice () {
|
|||
} else {
|
||||
$domain = 'drive.test.sunet.se'
|
||||
}
|
||||
exec { 'collabora_docker_login':
|
||||
command => "docker login registry.gitlab.collabora.com -u ${token_name} -p ${access_token}",
|
||||
}
|
||||
sunet::collabora::docs { 'sunet-onlyoffice':
|
||||
dns => [ '89.32.32.32' ],
|
||||
extra_hosts => $extra_hosts,
|
||||
dns => [ '89.32.32.32' ],
|
||||
extra_hosts => $extra_hosts,
|
||||
extra_volumes => ['/opt/collabora/coolwsd.xml:/etc/coolwsd/coolwsd.xml'],
|
||||
docker_tag => $docker_tag,
|
||||
docker_tag => $docker_tag,
|
||||
docker_image => 'registry.gitlab.collabora.com/productivity/collabora-online-nc',
|
||||
}
|
||||
file {'/opt/collabora/coolwsd.xml':
|
||||
ensure => present,
|
||||
ensure => present,
|
||||
content => template('sunetdrive/document/coolwsd.xml.erb'),
|
||||
}
|
||||
}
|
||||
|
|
48
manifests/portal.pp
Normal file
48
manifests/portal.pp
Normal file
|
@ -0,0 +1,48 @@
|
|||
#Class for SUNET-Drive-portal-Server
|
||||
class sunetdrive::portal (
|
||||
String $portal_version = '0.0.1-1'
|
||||
) {
|
||||
|
||||
$environment = sunetdrive::get_environment()
|
||||
if $environment == 'prod' {
|
||||
$domain = 'drive.sunet.se'
|
||||
} else {
|
||||
$domain = 'drive.test.sunet.se'
|
||||
}
|
||||
sunet::docker_compose { 'portal_compose':
|
||||
content => template('sunetdrive/portal/docker-compose.erb.yaml'),
|
||||
service_name => 'portal',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Portal server',
|
||||
}
|
||||
|
||||
exec { 'workaround_docker_compose_dir':
|
||||
command => 'mkdir -p /opt/portal/nginx',
|
||||
unless => 'test -d /opt/portal/nginx',
|
||||
}
|
||||
|
||||
$nginx_dirs = ['acme', 'certs', 'conf', 'dhparam', 'html', 'vhost']
|
||||
$nginx_dirs.each | $dir| {
|
||||
file { "/opt/portal/nginx/${dir}":
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0751',
|
||||
}
|
||||
}
|
||||
file { '/opt/portal/config.yaml':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/portal/config.erb.yaml'),
|
||||
mode => '0644',
|
||||
}
|
||||
sunet::misc::ufw_allow { 'https':
|
||||
from => '0.0.0.0/0',
|
||||
port => 443,
|
||||
}
|
||||
# For acme and redirect
|
||||
sunet::misc::ufw_allow { 'http':
|
||||
from => '0.0.0.0/0',
|
||||
port => 80,
|
||||
}
|
||||
}
|
|
@ -26,6 +26,9 @@ class sunetdrive::proxysql (
|
|||
$mysql_user = safe_hiera('mysql_user')
|
||||
|
||||
$transaction_persistent = 1
|
||||
if $::facts['dockerhost2'] == 'yes' {
|
||||
$hostnet = true
|
||||
}
|
||||
|
||||
file { '/usr/local/bin/proxysql':
|
||||
ensure => file,
|
||||
|
|
|
@ -17,10 +17,10 @@ class sunetdrive::redis_cluster (
|
|||
content => template('sunetdrive/redis_cluster/reset_cluster.erb.sh'),
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/.bashrc':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/redis_cluster/bashrc.erb'),
|
||||
mode => '0644',
|
||||
exec { 'set_permissions_directory':
|
||||
command => 'chown -R 999:root /opt/redis/node-*'
|
||||
}
|
||||
exec { 'set_permissions_files':
|
||||
command => 'chown -R 999:999 /opt/redis/node-*/*'
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,12 +1,16 @@
|
|||
#Class for SUNET-Drive-Lookup-Server
|
||||
class sunetdrive::reva (
|
||||
String $domain = '',
|
||||
String $reva_domain = ''
|
||||
String $domain = 'drive.test.sunet.se',
|
||||
String $customer = 'sunet',
|
||||
String $reva_domain = "${customer}-reva.${domain}",
|
||||
String $reva_version = 'v1.26.0',
|
||||
) {
|
||||
|
||||
$environment = sunetdrive::get_environment()
|
||||
$shared_secret = safe_hiera('shared_secret')
|
||||
$statistics_secret = safe_hiera('statistics_secret')
|
||||
$iopsecret = safe_hiera('iopsecret')
|
||||
$smtp_credentials = safe_hiera('smtp_credentials')
|
||||
|
||||
# Firewall settings
|
||||
#Create users
|
||||
|
@ -19,18 +23,17 @@ class sunetdrive::reva (
|
|||
content => template('sunetdrive/reva/revad.toml.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
file { '/opt/reva/rclone.conf':
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/reva/rclone.conf.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
file { '/opt/reva/data':
|
||||
ensure => directory,
|
||||
owner => 'www-data',
|
||||
}
|
||||
file { '/opt/reva/ocm-providers.json':
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/reva/ocm-providers.json.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
sunet::docker_compose { 'drive_reva_docker_compose':
|
||||
content => template('sunetdrive/reva/docker-compose.yml.erb'),
|
||||
service_name => 'reva',
|
||||
|
@ -38,9 +41,12 @@ class sunetdrive::reva (
|
|||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Sciencemesh reva server',
|
||||
}
|
||||
|
||||
sunet::misc::ufw_allow { 'https_reva':
|
||||
from => '0.0.0.0/0',
|
||||
port => 443,
|
||||
$ports = [443,19000]
|
||||
$ports.each | $port|{
|
||||
sunet::misc::ufw_allow { "reva_${port}":
|
||||
from => '0.0.0.0/0',
|
||||
port => $port,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,7 +18,9 @@ class sunetdrive::script (
|
|||
$backup_server = $config['backup_server']
|
||||
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
|
||||
$local_path = '/tmp/rclone-current-linux-amd64.deb'
|
||||
$singlenodes = hiera('singlenodes')
|
||||
$singlenodes = lookup('singlenodes')
|
||||
$multinodes = keys(lookup('multinode_mapping'))
|
||||
$extra_backup_jobs = pick($config['extra_backup_jobs'], {})
|
||||
|
||||
if $customer == 'mdu' {
|
||||
$eppn_suffix = 'mdh.se'
|
||||
|
@ -54,6 +56,17 @@ class sunetdrive::script (
|
|||
}
|
||||
$site_name = $config['site_name']
|
||||
$user_bucket_name = $config['user_bucket_name']
|
||||
if $config['user_scans'] {
|
||||
$config['user_scans'].each |$job| {
|
||||
sunet::scriptherder::cronjob { $job['name']:
|
||||
cmd => "ssh -t -l script ${job['server']} /usr/bin/sudo /usr/local/bin/occ ${job['container']} files:scan ${job['user']}",
|
||||
hour => $job['hour'],
|
||||
minute => $job['minute'],
|
||||
ok_criteria => ['exit_status=0','max_age=1d'],
|
||||
warn_criteria => ['exit_status=1','max_age=2d'],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# It is a start that will get us user buckets and primary buckets
|
||||
$backup_projects = $location
|
||||
|
@ -67,7 +80,11 @@ class sunetdrive::script (
|
|||
source => $local_path,
|
||||
require => Exec['rclone_deb'],
|
||||
}
|
||||
package { 'python3.9':
|
||||
package { 'fuse3':
|
||||
ensure => installed,
|
||||
provider => apt,
|
||||
}
|
||||
package { 'python3':
|
||||
ensure => installed,
|
||||
provider => apt,
|
||||
}
|
||||
|
@ -75,11 +92,29 @@ class sunetdrive::script (
|
|||
ensure => installed,
|
||||
provider => apt,
|
||||
}
|
||||
package { 'xmlstarlet':
|
||||
ensure => installed,
|
||||
provider => apt,
|
||||
}
|
||||
$drive_version = '0.3.1'
|
||||
if $facts['os']['distro']['id'] == 'Debian' {
|
||||
$pip_cmd = 'pip3 install --break-system-packages'
|
||||
} else {
|
||||
$pip_cmd = 'python3 -m pip install'
|
||||
|
||||
}
|
||||
exec { 'drive-utils':
|
||||
command => "python3.9 -m pip install https://pypi.sunet.se/packages/drive-utils-${drive_version}.tar.gz",
|
||||
unless => "python3.9 -m pip list | grep drive-utils | grep ${drive_version}",
|
||||
require => Package['python3.9'],
|
||||
command => "${pip_cmd} https://pypi.sunet.se/packages/drive-utils-${drive_version}.tar.gz",
|
||||
unless => "python3 -m pip list | grep drive-utils | grep ${drive_version}",
|
||||
require => Package['python3'],
|
||||
}
|
||||
file { '/opt/backups':
|
||||
ensure => directory,
|
||||
mode => '0700'
|
||||
}
|
||||
file { '/opt/backups/scripts':
|
||||
ensure => directory,
|
||||
mode => '0700'
|
||||
}
|
||||
file { '/root/.ssh/':
|
||||
ensure => directory,
|
||||
|
@ -230,8 +265,13 @@ class sunetdrive::script (
|
|||
warn_criteria => ['exit_status=1','max_age=31d'],
|
||||
}
|
||||
}
|
||||
# Opt out of userbuckets
|
||||
unless $customer in ['extern', 'gih', 'suni', 'common'] {
|
||||
# Opt out of userbuckets, also customers that ended the contract
|
||||
if $customer in ['extern', 'gih', 'hkr', 'suni', 'common', 'su', 'lnu'] {
|
||||
sunet::scriptherder::cronjob { 'makebuckets':
|
||||
ensure => absent,
|
||||
cmd => 'bin/true',
|
||||
}
|
||||
} else {
|
||||
sunet::scriptherder::cronjob { 'makebuckets':
|
||||
cmd => '/root/tasks/makebuckets.sh',
|
||||
minute => '*/5',
|
||||
|
@ -297,6 +337,16 @@ class sunetdrive::script (
|
|||
}
|
||||
}
|
||||
if $customer == 'common' {
|
||||
$multinode_passwords = $multinodes.map | $index, $customer | {
|
||||
safe_hiera("${customer}_admin_app_password")
|
||||
}
|
||||
file { '/root/tasks/announce.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/multinodeannounce.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/tasks/backupmultinodedb.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/backupmultinodedb.erb.sh'),
|
||||
|
@ -304,6 +354,13 @@ class sunetdrive::script (
|
|||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/opt/backups/scripts/hb.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/backup-hb.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backupmultinodedb':
|
||||
cmd => '/root/tasks/backupmultinodedb.sh',
|
||||
hour => '2',
|
||||
|
@ -326,7 +383,7 @@ class sunetdrive::script (
|
|||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
}
|
||||
}
|
||||
$singlenodes.each | $singlenode| {
|
||||
$multinode = hiera_hash('multinode_mapping')[$singlenode]['server']
|
||||
$multinodeserver = "${multinode}.${site_name}"
|
||||
|
@ -377,6 +434,21 @@ class sunetdrive::script (
|
|||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
} else {
|
||||
$admin_app_password = safe_hiera('admin_app_password')
|
||||
file { '/root/tasks/announce.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/announce.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/tasks/delete_announcement_with_subject.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/delete_announcement_with_subject.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/tasks/backupmultinodedb.sh':
|
||||
ensure => absent,
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ class sunetdrive::scriptreceiver()
|
|||
owner => 'script',
|
||||
group => 'script',
|
||||
}
|
||||
|
||||
$kano_shell = ['89.46.21.246','2001:6b0:6c::1bc']
|
||||
sunet::misc::ufw_allow { 'script_port':
|
||||
from => $script_ipv4 + $script_ipv6 + $kano_shell,
|
||||
|
@ -49,14 +48,28 @@ class sunetdrive::scriptreceiver()
|
|||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/usr/local/bin/safer_reboot':
|
||||
file { '/usr/local/bin/get_drive_customers':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/scriptreceiver/safer_reboot.erb'),
|
||||
mode => '0740',
|
||||
content => template('sunetdrive/scriptreceiver/get_drive_customers.erb.sh'),
|
||||
mode => '0744',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { "/etc/sudoers.d/99-safer_reboot":
|
||||
file { '/usr/local/bin/safer_reboot':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/scriptreceiver/safer_reboot.erb'),
|
||||
mode => '0744',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/root/.bashrc':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/scriptreceiver/bashrc.erb.sh'),
|
||||
mode => '0644',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/etc/sudoers.d/99-safer_reboot':
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/safer_reboot\n",
|
||||
mode => '0440',
|
||||
|
|
|
@ -7,7 +7,7 @@ class sunetdrive::sitemonitornaemon() {
|
|||
$tls_servers_with_port = hiera_array('tls_servers_with_port')
|
||||
$nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0]
|
||||
$nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0]
|
||||
$monitorhost = $::fqdn
|
||||
$monitorhost = $facts['networking']['fqdn']
|
||||
$environment = sunetdrive::get_environment()
|
||||
$influx_passwd = safe_hiera('influx_passwd')
|
||||
$slack_url = safe_hiera('slack_url')
|
||||
|
@ -42,11 +42,15 @@ class sunetdrive::sitemonitornaemon() {
|
|||
}
|
||||
file { '/etc/naemon/conf.d/sunetdrive_thruk_templates.conf':
|
||||
ensure => present,
|
||||
owner => 'naemon',
|
||||
group => 'naemon',
|
||||
content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
nagioscfg::service {'check_scriptherder':
|
||||
hostgroup_name => ['sunetdrive::nrpe'],
|
||||
check_command => 'check_nrpe_1arg_to30!check_scriptherder',
|
||||
description => 'Scriptherder Status',
|
||||
contact_groups => ['naemon-admins'],
|
||||
}
|
||||
nagioscfg::service {'check_galera_cluster':
|
||||
hostgroup_name => ['galera_monitor'],
|
||||
check_command => 'check_nrpe_1arg!check_galera_cluster',
|
||||
|
|
10
manifests/ubuntu_2004.pp
Normal file
10
manifests/ubuntu_2004.pp
Normal file
|
@ -0,0 +1,10 @@
|
|||
# Class for Ubuntu 20.04
|
||||
class sunetdrive::ubuntu_2004() {
|
||||
if $facts['os']['name'] == 'Ubuntu' and $facts['os']['distro']['release']['full'] == '20.04' {
|
||||
# Hide deprecation warnings for Ubuntu 2004
|
||||
file_line {'env_rubyopt':
|
||||
path => '/etc/environment',
|
||||
line => 'RUBYOPT=\'-W0\'',
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,13 +1,14 @@
|
|||
Hello and welcome to your personal space for research data storage,
|
||||
Welcome to your personal space for research data storage!
|
||||
|
||||
It is important that research data is managed in a secure and careful manner, so that it is protected from damage and destruction. Mälardalen University offers its researchers and doctoral students a secure and stable IT service that makes it possible to store research data. Furthermore, it enables research collaboration internally and externally since researchers can easily share their data within the project or with other individual researchers at MDU in a secure way. This technical infrastructure entails a good collaboration space for researchers, a secure handling of data, the possibility for the researcher to be able to store data in such a way that data management according to the FAIR principles can be complied with at MDU.
|
||||
Researchers and PhD-students at MDU have access to a free storage space of 200 GB.
|
||||
|
||||
To meet the general need for research data storage, MDU has purchased Sunet Drive's S3 research data storage service. Each researcher will be offered a space of 200GB per person. To simplify administration, all spaces will be appropriated in advance in the same way, regardless of the research domain or total amount of research data.
|
||||
Your storage space is in the “Your storage space” folder that appears a few minutes after opening SUNET Drive for the first time.
|
||||
|
||||
200 GB will not correspond to all of MDU researchers' data storage needs; therefore, the researcher will have the possibility to purchase additional data space via a digital form. The additional purchase function is done by internal billing and the purchased space is then visible at the next login in the researcher's personal space.
|
||||
The “Your storage space” folder has an ABG structure which means that there are folders for Arbetsmaterial (work material), Bevarande (retention) and Gallringsbart (disposable).
|
||||
|
||||
If there is a need for a specific project space for research data storage and/or sharing, this is also possible. Such a space is created by an administrator after ordering project space through the same form as when purchasing additional storage space. The project space will then be visible to project members in the personal storage space at the next login.
|
||||
It is possible to create subfolders within these main folders themselves.
|
||||
|
||||
Can a MDU student be included in the research project? It is possible to assign a student a time-limited access/sharing opportunity to specific folders in either the project space or the researcher's personal space. Contact the Data Access Unit (DAU) to enable such a feature.
|
||||
All research data, except for military material and security-classified information, can be stored on SUNET Drive.
|
||||
If your data needs to undergo an export control, please refer to the check list on the internal portal.
|
||||
|
||||
If you have any questions about the storage space, please contact dau@mdu.se.
|
||||
A user guide and a FAQ regarding SUNET Drive is available on the internal portal. If you have any questions, please contact dau@mdu.se
|
||||
|
|
160
templates/application/apache.php.ini.erb
Normal file
160
templates/application/apache.php.ini.erb
Normal file
|
@ -0,0 +1,160 @@
|
|||
[PHP]
|
||||
allow_url_fopen = On
|
||||
allow_url_include = Off
|
||||
auto_append_file =
|
||||
auto_globals_jit = On
|
||||
auto_prepend_file =
|
||||
default_charset = "UTF-8"
|
||||
default_mimetype = "text/html"
|
||||
default_socket_timeout = 60
|
||||
disable_classes =
|
||||
disable_functions = pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_get_handler,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,pcntl_async_signals,pcntl_unshare,
|
||||
display_errors = Off
|
||||
display_startup_errors = Off
|
||||
doc_root =
|
||||
enable_dl = Off
|
||||
engine = On
|
||||
error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
|
||||
expose_php = Off
|
||||
file_uploads = On
|
||||
ignore_repeated_errors = Off
|
||||
ignore_repeated_source = Off
|
||||
implicit_flush = Off
|
||||
log_errors = On
|
||||
log_errors_max_len = 1024
|
||||
max_execution_time = 86400
|
||||
max_file_uploads = 20
|
||||
max_input_time = 86400
|
||||
memory_limit = <%= @php_memory_limit_mb %>M
|
||||
output_buffering = Off
|
||||
post_max_size = 30G
|
||||
precision = 14
|
||||
register_argc_argv = Off
|
||||
report_memleaks = On
|
||||
request_order = "GP"
|
||||
serialize_precision = -1
|
||||
short_open_tag = Off
|
||||
unserialize_callback_func =
|
||||
upload_max_filesize = 30G
|
||||
user_dir =
|
||||
variables_order = "GPCS"
|
||||
zend.enable_gc = On
|
||||
zend.exception_ignore_args = On
|
||||
zlib.output_compression = Off
|
||||
<% if @customer == "kau" -%>
|
||||
upload_tmp_dir = /opt/tmp/
|
||||
<% end %>
|
||||
|
||||
[CLI Server]
|
||||
cli_server.color = On
|
||||
[Date]
|
||||
; Nothing here
|
||||
[filter]
|
||||
; Nothing here
|
||||
[iconv]
|
||||
; Nothing here
|
||||
[imap]
|
||||
; Nothing here
|
||||
[intl]
|
||||
; Nothing here
|
||||
[sqlite3]
|
||||
; Nothing here
|
||||
[Pcre]
|
||||
; Nothing here
|
||||
[Pdo]
|
||||
; Nothing here
|
||||
[Pdo_mysql]
|
||||
pdo_mysql.default_socket=
|
||||
[Phar]
|
||||
; Nothing here
|
||||
[mail function]
|
||||
SMTP = localhost
|
||||
smtp_port = 25
|
||||
mail.add_x_header = Off
|
||||
[ODBC]
|
||||
odbc.allow_persistent = On
|
||||
odbc.check_persistent = On
|
||||
odbc.max_persistent = -1
|
||||
odbc.max_links = -1
|
||||
odbc.defaultlrl = 4096
|
||||
odbc.defaultbinmode = 1
|
||||
[MySQLi]
|
||||
mysqli.max_persistent = -1
|
||||
mysqli.allow_persistent = On
|
||||
mysqli.max_links = -1
|
||||
mysqli.default_port = 3306
|
||||
mysqli.default_socket =
|
||||
mysqli.default_host =
|
||||
mysqli.default_user =
|
||||
mysqli.default_pw =
|
||||
mysqli.reconnect = Off
|
||||
[mysqlnd]
|
||||
mysqlnd.collect_statistics = On
|
||||
mysqlnd.collect_memory_statistics = Off
|
||||
[OCI8]
|
||||
; Nothing here
|
||||
[PostgreSQL]
|
||||
pgsql.allow_persistent = On
|
||||
pgsql.auto_reset_persistent = Off
|
||||
pgsql.max_persistent = -1
|
||||
pgsql.max_links = -1
|
||||
pgsql.ignore_notice = 0
|
||||
pgsql.log_notice = 0
|
||||
[bcmath]
|
||||
bcmath.scale = 0
|
||||
[browscap]
|
||||
; Nothing here
|
||||
[Session]
|
||||
session.save_handler = files
|
||||
session.use_strict_mode = 0
|
||||
session.use_cookies = 1
|
||||
session.use_only_cookies = 1
|
||||
session.name = PHPSESSID
|
||||
session.auto_start = 0
|
||||
session.cookie_lifetime = 0
|
||||
session.cookie_path = /
|
||||
session.cookie_domain =
|
||||
session.cookie_httponly =
|
||||
session.cookie_samesite =
|
||||
session.serialize_handler = php
|
||||
session.gc_probability = 0
|
||||
session.gc_divisor = 1000
|
||||
session.gc_maxlifetime = 1440
|
||||
session.referer_check =
|
||||
session.cache_limiter = nocache
|
||||
session.cache_expire = 180
|
||||
session.use_trans_sid = 0
|
||||
session.sid_length = 26
|
||||
session.trans_sid_tags = "a=href,area=href,frame=src,form="
|
||||
session.sid_bits_per_character = 5
|
||||
[Assertion]
|
||||
zend.assertions = -1
|
||||
[COM]
|
||||
; Nothing here
|
||||
[mbstring]
|
||||
; Nothing here
|
||||
[gd]
|
||||
; Nothing here
|
||||
[exif]
|
||||
; Nothing here
|
||||
[Tidy]
|
||||
tidy.clean_output = Off
|
||||
[soap]
|
||||
soap.wsdl_cache_enabled=1
|
||||
soap.wsdl_cache_dir="/tmp"
|
||||
soap.wsdl_cache_ttl=86400
|
||||
soap.wsdl_cache_limit = 5
|
||||
[sysvshm]
|
||||
; Nothing here
|
||||
[ldap]
|
||||
ldap.max_links = -1
|
||||
[dba]
|
||||
; Nothing here
|
||||
[opcache]
|
||||
opcache.interned_strings_buffer=32
|
||||
[curl]
|
||||
; Nothing here
|
||||
[openssl]
|
||||
; Nothing here
|
||||
[ffi]
|
||||
; Nothing here
|
2
templates/application/apcu.ini.erb
Normal file
2
templates/application/apcu.ini.erb
Normal file
|
@ -0,0 +1,2 @@
|
|||
extension=apcu.so
|
||||
apc.enable_cli=1
|
158
templates/application/cli.php.ini.erb
Normal file
158
templates/application/cli.php.ini.erb
Normal file
|
@ -0,0 +1,158 @@
|
|||
[PHP]
|
||||
allow_url_fopen = On
|
||||
allow_url_include = Off
|
||||
auto_append_file =
|
||||
auto_globals_jit = On
|
||||
auto_prepend_file =
|
||||
default_charset = "UTF-8"
|
||||
default_mimetype = "text/html"
|
||||
default_socket_timeout = 60
|
||||
disable_classes =
|
||||
disable_functions =
|
||||
display_errors = Off
|
||||
display_startup_errors = Off
|
||||
doc_root =
|
||||
enable_dl = Off
|
||||
engine = On
|
||||
error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
|
||||
expose_php = On
|
||||
file_uploads = On
|
||||
ignore_repeated_errors = Off
|
||||
ignore_repeated_source = Off
|
||||
implicit_flush = Off
|
||||
log_errors = On
|
||||
log_errors_max_len = 1024
|
||||
max_execution_time = 86400
|
||||
max_file_uploads = 20
|
||||
max_input_time = 86400
|
||||
memory_limit = -1
|
||||
output_buffering = Off
|
||||
post_max_size = 16G
|
||||
precision = 14
|
||||
register_argc_argv = Off
|
||||
report_memleaks = On
|
||||
request_order = "GP"
|
||||
serialize_precision = -1
|
||||
short_open_tag = Off
|
||||
unserialize_callback_func =
|
||||
upload_max_filesize = 16G
|
||||
user_dir =
|
||||
variables_order = "GPCS"
|
||||
zend.enable_gc = On
|
||||
zend.exception_ignore_args = On
|
||||
zlib.output_compression = Off
|
||||
[CLI Server]
|
||||
cli_server.color = On
|
||||
[Date]
|
||||
; Nothing here
|
||||
[filter]
|
||||
; Nothing here
|
||||
[iconv]
|
||||
; Nothing here
|
||||
[imap]
|
||||
; Nothing here
|
||||
[intl]
|
||||
; Nothing here
|
||||
[sqlite3]
|
||||
; Nothing here
|
||||
[Pcre]
|
||||
; Nothing here
|
||||
[Pdo]
|
||||
; Nothing here
|
||||
[Pdo_mysql]
|
||||
pdo_mysql.default_socket=
|
||||
[Phar]
|
||||
; Nothing here
|
||||
[mail function]
|
||||
SMTP = localhost
|
||||
smtp_port = 25
|
||||
mail.add_x_header = Off
|
||||
[ODBC]
|
||||
odbc.allow_persistent = On
|
||||
odbc.check_persistent = On
|
||||
odbc.max_persistent = -1
|
||||
odbc.max_links = -1
|
||||
odbc.defaultlrl = 4096
|
||||
odbc.defaultbinmode = 1
|
||||
[MySQLi]
|
||||
mysqli.max_persistent = -1
|
||||
mysqli.allow_persistent = On
|
||||
mysqli.max_links = -1
|
||||
mysqli.default_port = 3306
|
||||
mysqli.default_socket =
|
||||
mysqli.default_host =
|
||||
mysqli.default_user =
|
||||
mysqli.default_pw =
|
||||
mysqli.reconnect = Off
|
||||
[mysqlnd]
|
||||
mysqlnd.collect_statistics = On
|
||||
mysqlnd.collect_memory_statistics = Off
|
||||
[OCI8]
|
||||
; Nothing here
|
||||
[PostgreSQL]
|
||||
pgsql.allow_persistent = On
|
||||
pgsql.auto_reset_persistent = Off
|
||||
pgsql.max_persistent = -1
|
||||
pgsql.max_links = -1
|
||||
pgsql.ignore_notice = 0
|
||||
pgsql.log_notice = 0
|
||||
[bcmath]
|
||||
bcmath.scale = 0
|
||||
[browscap]
|
||||
; Nothing here
|
||||
[Session]
|
||||
session.save_handler = files
|
||||
session.use_strict_mode = 0
|
||||
session.use_cookies = 1
|
||||
session.use_only_cookies = 1
|
||||
session.name = PHPSESSID
|
||||
session.auto_start = 0
|
||||
session.cookie_lifetime = 0
|
||||
session.cookie_path = /
|
||||
session.cookie_domain =
|
||||
session.cookie_httponly =
|
||||
session.cookie_samesite =
|
||||
session.serialize_handler = php
|
||||
session.gc_probability = 0
|
||||
session.gc_divisor = 1000
|
||||
session.gc_maxlifetime = 1440
|
||||
session.referer_check =
|
||||
session.cache_limiter = nocache
|
||||
session.cache_expire = 180
|
||||
session.use_trans_sid = 0
|
||||
session.sid_length = 26
|
||||
session.trans_sid_tags = "a=href,area=href,frame=src,form="
|
||||
session.sid_bits_per_character = 5
|
||||
[Assertion]
|
||||
zend.assertions = -1
|
||||
[COM]
|
||||
; Nothing here
|
||||
[mbstring]
|
||||
; Nothing here
|
||||
[gd]
|
||||
; Nothing here
|
||||
[exif]
|
||||
; Nothing here
|
||||
[Tidy]
|
||||
tidy.clean_output = Off
|
||||
[soap]
|
||||
soap.wsdl_cache_enabled=1
|
||||
soap.wsdl_cache_dir="/tmp"
|
||||
soap.wsdl_cache_ttl=86400
|
||||
soap.wsdl_cache_limit = 5
|
||||
[sysvshm]
|
||||
; Nothing here
|
||||
[ldap]
|
||||
ldap.max_links = -1
|
||||
[dba]
|
||||
; Nothing here
|
||||
[opcache]
|
||||
opcache.interned_strings_buffer=16
|
||||
opcache.validate_timestamps=0
|
||||
opcache.memory_consumption=128
|
||||
[curl]
|
||||
; Nothing here
|
||||
[openssl]
|
||||
; Nothing here
|
||||
[ffi]
|
||||
; Nothing here
|
|
@ -3,7 +3,6 @@
|
|||
config_php='/var/www/html/config/config.php'
|
||||
dbhost="<%= @dbhost %>"
|
||||
mysql_user_password="<%= @mysql_user_password %>"
|
||||
admin_password="<%= @admin_password %>"
|
||||
location="<%= @location %>"
|
||||
bucket="<%= @s3_bucket %>"
|
||||
|
||||
|
@ -14,6 +13,9 @@ if [[ "${user_input}" == "IKnowWhatIAmDoing" ]]; then
|
|||
echo "WARNING: This will delete everything in the database and reinstall Nextcloud."
|
||||
echo "You have 10 seconds to abort by hitting CTRL/C"
|
||||
sleep 10s
|
||||
echo "Setting temp admin password"
|
||||
apt update && apt install -y apg
|
||||
admin_password="$(apg -m 40 | head -1)"
|
||||
echo "Ok, proceeding."
|
||||
echo "Dropping database in 3 seconds"
|
||||
sleep 3s
|
||||
|
@ -48,6 +50,11 @@ EOF
|
|||
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
|
||||
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
|
||||
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
|
||||
echo "Now delete the admin user:"
|
||||
echo " occ user:delete admin"
|
||||
echo "and then create a new admin user:"
|
||||
echo " /usr/local/bin/add_admin_user <username> <email address>"
|
||||
echo ""
|
||||
echo "Please use edit-secrets to add these variables to all Nextcloud servers:"
|
||||
echo "instanceid: DEC::PKCS7[${instanceid}]!"
|
||||
echo "secret: DEC::PKCS7[${secret}]!"
|
||||
|
|
|
@ -24,7 +24,12 @@ $CONFIG = array (
|
|||
'config_is_read_only' => true,
|
||||
'csrf.disabled' => true,
|
||||
'datadirectory' => '/var/www/html/data',
|
||||
'davstorage.request_timeout' => 86401,
|
||||
<%- if @hostnet -%>
|
||||
'dbhost' => '127.0.0.1',
|
||||
<%- else -%>
|
||||
'dbhost' => '<%= @dbhost %>',
|
||||
<%- end -%>
|
||||
'dbname' => '<%= @dbname %>',
|
||||
'dbpassword' => '<%= @mysql_user_password %>',
|
||||
'dbport' => '3306',
|
||||
|
@ -38,6 +43,7 @@ $CONFIG = array (
|
|||
<% if @location == 'sunet-test' -%>
|
||||
'filelocking.debug' => true,
|
||||
<% end -%>
|
||||
'files_external_allow_create_new_local' => false,
|
||||
'forcessl' => true,
|
||||
'gs.enabled' => '<%= @gs_enabled %>',
|
||||
'gs.federation' => '<%= @gs_federation %>',
|
||||
|
@ -57,11 +63,12 @@ $CONFIG = array (
|
|||
'gss.master.url' => '<%= @gss_master_url %>',
|
||||
'gss.mode' => '<%= @gss_mode %>',
|
||||
'gss.user.discovery.module' => '\\OCA\\GlobalSiteSelector\\UserDiscoveryModules\\ManualUserMapping',
|
||||
'gss.username_format' => 'sanitize',
|
||||
'installed' => true,
|
||||
'instanceid' => '<%= @instanceid %>',
|
||||
'integrity.check.disabled' => true,
|
||||
'log_type' => 'file',
|
||||
'loglevel' => 0,
|
||||
'loglevel' => 1,
|
||||
'lookup_server' => '<%= @lookup_server %>',
|
||||
'mail_domain' => '<%= @mail_domain %>',
|
||||
'mail_from_address' => '<%= @mail_from_address %>',
|
||||
|
@ -90,6 +97,7 @@ $CONFIG = array (
|
|||
'region' => 'us-east-1',
|
||||
'hostname' => '<%= @s3_host %>',
|
||||
'port' => '',
|
||||
'useMultipartCopy' => false,
|
||||
'objectPrefix' => 'urn:oid:',
|
||||
'autocreate' => false,
|
||||
'use_ssl' => true,
|
||||
|
@ -119,6 +127,24 @@ $CONFIG = array (
|
|||
],
|
||||
'timeout' => 1.1
|
||||
],
|
||||
<% elsif @location == 'gss-prod' -%>
|
||||
'redis.cluster' => [
|
||||
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
|
||||
'password' => '<%= @redis_cluster_password %>',
|
||||
'read_timeout' => 0.0,
|
||||
'seeds' => [
|
||||
'redis1.drive.sunet.se:6379',
|
||||
'redis2.drive.sunet.se:6379',
|
||||
'redis3.drive.sunet.se:6379',
|
||||
'redis1.drive.sunet.se:6380',
|
||||
'redis2.drive.sunet.se:6380',
|
||||
'redis3.drive.sunet.se:6380',
|
||||
'redis1.drive.sunet.se:6381',
|
||||
'redis2.drive.sunet.se:6381',
|
||||
'redis3.drive.sunet.se:6381'
|
||||
],
|
||||
'timeout' => 1.1
|
||||
],
|
||||
<% elsif @environment == 'test' && ! @is_multinode -%>
|
||||
'redis.cluster' => [
|
||||
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
|
||||
|
@ -187,18 +213,23 @@ $CONFIG = array (
|
|||
'twofactor_enforced_groups' =>
|
||||
array (
|
||||
0 => 'admin',
|
||||
<%- index = 1 -%>
|
||||
<%- @twofactor_enforced_groups.each do |item| -%>
|
||||
1 => 'forcemfa',
|
||||
<%- if @twofactor_enforced_groups -%>
|
||||
<%- index = 2 -%>
|
||||
<%- @twofactor_enforced_groups.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
<%- end -%>
|
||||
),
|
||||
'twofactor_enforced_excluded_groups' =>
|
||||
array (
|
||||
<%- index = 0 -%>
|
||||
<%- @twofactor_enforced_excluded_groups.each do |item| -%>
|
||||
<%- if @twofactor_enforced_excluded_groups -%>
|
||||
<%- index = 0 -%>
|
||||
<%- @twofactor_enforced_excluded_groups.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
<%- end -%>
|
||||
),
|
||||
'updatechecker' => false,
|
||||
|
|
|
@ -5,15 +5,21 @@ services:
|
|||
app:
|
||||
image: docker.sunet.se/drive/nextcloud-custom:<%= @nextcloud_version %>
|
||||
restart: always
|
||||
container_name: nextcloud_app_1
|
||||
<%- if @hostnet -%>
|
||||
network_mode: host
|
||||
<%- end -%>
|
||||
volumes:
|
||||
- /opt/nextcloud/000-default.conf:/etc/apache2/sites-enabled/000-default.conf
|
||||
- /opt/nextcloud/mpm_prefork.conf:/etc/apache2/mods-available/mpm_prefork.conf
|
||||
- /opt/nextcloud/404.html:/var/www/html/404.html
|
||||
- /opt/nextcloud/apache.php.ini:/etc/php/8.0/apache2/php.ini
|
||||
- /opt/nextcloud/apcu.ini:/etc/php/8.0/mods-available/apcu.ini
|
||||
- /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini
|
||||
- /opt/nextcloud/complete_reinstall.sh:/complete_reinstall.sh
|
||||
- /opt/nextcloud/config.php:/var/www/html/config/config.php
|
||||
- /opt/nextcloud/nextcloud.log:/var/www/html/data/nextcloud.log
|
||||
- /opt/nextcloud/audit.log:/var/www/html/data/audit.log
|
||||
- /opt/nextcloud/rclone.conf:/rclone.conf
|
||||
<%- if @skeletondirectory -%>
|
||||
- /opt/nextcloud/skeleton:<%= @skeletondirectory %>
|
||||
|
@ -24,18 +30,22 @@ services:
|
|||
<% if @location =~ /^kau/ -%>
|
||||
- /mnt:/opt/tmp/
|
||||
<%- end -%>
|
||||
<%- if ! @hostnet -%>
|
||||
networks:
|
||||
- default
|
||||
- proxysql_proxysql
|
||||
<%- end -%>
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
- 89.32.32.32
|
||||
<%- if !@hostnet -%>
|
||||
ports:
|
||||
- 443:443
|
||||
command: apachectl -D FOREGROUND
|
||||
<%- end -%>
|
||||
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
|
||||
tty: true
|
||||
|
||||
<%- if !@hostnet -%>
|
||||
networks:
|
||||
proxysql_proxysql:
|
||||
external: true
|
||||
<%- end -%>
|
||||
|
|
|
@ -1,57 +1,58 @@
|
|||
{
|
||||
"/antagning.se$/": "antagning.drive.sunet.se",
|
||||
"/bth.se$/": "bth.drive.sunet.se",
|
||||
"/chalmers.se$/": "chalmers.drive.sunet.se",
|
||||
"/du.se$/": "du.drive.sunet.se",
|
||||
"/eduid.se$/": "extern.drive.sunet.se",
|
||||
"/esh.se$/": "esh.drive.sunet.se",
|
||||
"/fhs.se$/": "fhs.drive.sunet.se",
|
||||
"/gih.se$/": "gih.drive.sunet.se",
|
||||
"/gu.se$/": "gu.drive.sunet.se",
|
||||
"/hb.se$/": "hb.drive.sunet.se",
|
||||
"/@shh.se$/": "shh.drive.sunet.se",
|
||||
"/@hh.se$/": "hh.drive.sunet.se",
|
||||
"/hhs.se$/": "hhs.drive.sunet.se",
|
||||
"/hig.se$/": "hig.drive.sunet.se",
|
||||
"/his.se$/": "his.drive.sunet.se",
|
||||
"/hj.se$/": "hj.drive.sunet.se",
|
||||
"/hkr.se$/": "hkr.drive.sunet.se",
|
||||
"/hv.se$/": "hv.drive.sunet.se",
|
||||
"/irf.se$/": "irf.drive.sunet.se",
|
||||
"/kb.se$/": "kb.drive.sunet.se",
|
||||
"/ki.se$/": "ki.drive.sunet.se",
|
||||
"/kkh.se$/": "kkh.drive.sunet.se",
|
||||
"/kmh.se$/": "kmh.drive.sunet.se",
|
||||
"/konstfack.se$/": "konstfack.drive.sunet.se",
|
||||
"/kth.se$/": "kth.drive.sunet.se",
|
||||
"/kva.se$/": "kva.drive.sunet.se",
|
||||
"/liu.se$/": "liu.drive.sunet.se",
|
||||
"/lnu.se$/": "lnu.drive.sunet.se",
|
||||
"/ltu.se$/": "ltu.drive.sunet.se",
|
||||
"/lu.se$/": "lu.drive.sunet.se",
|
||||
"/mah.se$/": "mau.drive.sunet.se",
|
||||
"/mau.se$/": "mau.drive.sunet.se",
|
||||
"/mdh.se$/": "mdu.drive.sunet.se",
|
||||
"/mdu.se$/": "mdu.drive.sunet.se",
|
||||
"/miun.se$/": "miun.drive.sunet.se",
|
||||
"/nordunet.se$/": "nordunet.drive.sunet.se",
|
||||
"/nrm.se$/": "nrm.drive.sunet.se",
|
||||
"/oru.se$/": "oru.drive.sunet.se",
|
||||
"/rkh.se$/": "rkh.drive.sunet.se",
|
||||
"/sics.se$/": "sics.drive.sunet.se",
|
||||
"/slu.se$/": "slu.drive.sunet.se",
|
||||
"/smhi.se$/": "smhi.drive.sunet.se",
|
||||
"/sp.se$/": "sp.drive.sunet.se",
|
||||
"/su.se$/": "su.drive.sunet.se",
|
||||
"/sunet.se$/": "sunet.drive.sunet.se",
|
||||
"/suni.se$/": "suni.drive.sunet.se",
|
||||
"/swamid.se$/": "swamid.drive.sunet.se",
|
||||
"/ths.se$/": "ths.drive.sunet.se",
|
||||
"/uhr.se$/": "uhr.drive.sunet.se",
|
||||
"/umu.se$/": "umu.drive.sunet.se",
|
||||
"/uniarts.se$/": "uniarts.drive.sunet.se",
|
||||
"/uu.se$/": "uu.drive.sunet.se",
|
||||
"/vinnova.se$/": "vinnova.drive.sunet.se",
|
||||
"/vr.se$/": "vr.drive.sunet.se",
|
||||
"/(\\w+\\.)*antagning.se$/": "antagning.drive.sunet.se",
|
||||
"/(\\w+\\.)*bth.se$/": "bth.drive.sunet.se",
|
||||
"/(\\w+\\.)*chalmers.se$/": "chalmers.drive.sunet.se",
|
||||
"/(\\w+\\.)*du.se$/": "du.drive.sunet.se",
|
||||
"/(\\w+\\.)*eduid.se$/": "extern.drive.sunet.se",
|
||||
"/(\\w+\\.)*esh.se$/": "esh.drive.sunet.se",
|
||||
"/(\\w+\\.)*fhs.se$/": "fhs.drive.sunet.se",
|
||||
"/(\\w+\\.)*gih.se$/": "gih.drive.sunet.se",
|
||||
"/(\\w+\\.)*gu.se$/": "gu.drive.sunet.se",
|
||||
"/(\\w+\\.)*hb.se$/": "hb.drive.sunet.se",
|
||||
"/(\\w+\\.)*shh.se$/": "shh.drive.sunet.se",
|
||||
"/(\\w+\\.)*hh.se$/": "hh.drive.sunet.se",
|
||||
"/(\\w+\\.)*hhs.se$/": "hhs.drive.sunet.se",
|
||||
"/(\\w+\\.)*hig.se$/": "hig.drive.sunet.se",
|
||||
"/(\\w+\\.)*his.se$/": "his.drive.sunet.se",
|
||||
"/(\\w+\\.)*hj.se$/": "hj.drive.sunet.se",
|
||||
"/(\\w+\\.)*hkr.se$/": "hkr.drive.sunet.se",
|
||||
"/(\\w+\\.)*hv.se$/": "hv.drive.sunet.se",
|
||||
"/(\\w+\\.)*irf.se$/": "irf.drive.sunet.se",
|
||||
"/(\\w+\\.)*kb.se$/": "kb.drive.sunet.se",
|
||||
"/(\\w+\\.)*ki.se$/": "ki.drive.sunet.se",
|
||||
"/(\\w+\\.)*kkh.se$/": "kkh.drive.sunet.se",
|
||||
"/(\\w+\\.)*kmh.se$/": "kmh.drive.sunet.se",
|
||||
"/(\\w+\\.)*konstfack.se$/": "konstfack.drive.sunet.se",
|
||||
"/(\\w+\\.)*kth.se$/": "kth.drive.sunet.se",
|
||||
"/(\\w+\\.)*kva.se$/": "kva.drive.sunet.se",
|
||||
"/(\\w+\\.)*liu.se$/": "liu.drive.sunet.se",
|
||||
"/(\\w+\\.)*lnu.se$/": "lnu.drive.sunet.se",
|
||||
"/(\\w+\\.)*ltu.se$/": "ltu.drive.sunet.se",
|
||||
"/(\\w+\\.)*lu.se$/": "lu.drive.sunet.se",
|
||||
"/(\\w+\\.)*mah.se$/": "mau.drive.sunet.se",
|
||||
"/(\\w+\\.)*mau.se$/": "mau.drive.sunet.se",
|
||||
"/(\\w+\\.)*mdh.se$/": "mdu.drive.sunet.se",
|
||||
"/(\\w+\\.)*mdu.se$/": "mdu.drive.sunet.se",
|
||||
"/(\\w+\\.)*miun.se$/": "miun.drive.sunet.se",
|
||||
"/(\\w+\\.)*nordunet.se$/": "nordunet.drive.sunet.se",
|
||||
"/(\\w+\\.)*nordu.net$/": "nordunet.drive.sunet.se",
|
||||
"/(\\w+\\.)*nrm.se$/": "nrm.drive.sunet.se",
|
||||
"/(\\w+\\.)*oru.se$/": "oru.drive.sunet.se",
|
||||
"/(\\w+\\.)*rkh.se$/": "rkh.drive.sunet.se",
|
||||
"/(\\w+\\.)*sics.se$/": "sics.drive.sunet.se",
|
||||
"/(\\w+\\.)*slu.se$/": "slu.drive.sunet.se",
|
||||
"/(\\w+\\.)*smhi.se$/": "smhi.drive.sunet.se",
|
||||
"/(\\w+\\.)*sp.se$/": "sp.drive.sunet.se",
|
||||
"/(\\w+\\.)*su.se$/": "su.drive.sunet.se",
|
||||
"/(\\w+\\.)*sunet.se$/": "sunet.drive.sunet.se",
|
||||
"/(\\w+\\.)*suni.se$/": "suni.drive.sunet.se",
|
||||
"/(\\w+\\.)*swamid.se$/": "swamid.drive.sunet.se",
|
||||
"/(\\w+\\.)*ths.se$/": "ths.drive.sunet.se",
|
||||
"/(\\w+\\.)*uhr.se$/": "uhr.drive.sunet.se",
|
||||
"/(\\w+\\.)*umu.se$/": "umu.drive.sunet.se",
|
||||
"/(\\w+\\.)*uniarts.se$/": "uniarts.drive.sunet.se",
|
||||
"/(\\w+\\.)*uu.se$/": "uu.drive.sunet.se",
|
||||
"/(\\w+\\.)*vinnova.se$/": "vinnova.drive.sunet.se",
|
||||
"/(\\w+\\.)*vr.se$/": "vr.drive.sunet.se",
|
||||
"/$/": "extern.drive.sunet.se"
|
||||
}
|
||||
|
|
|
@ -1,57 +1,58 @@
|
|||
{
|
||||
"/antagning.se$/": "antagning.drive.test.sunet.se",
|
||||
"/bth.se$/": "bth.drive.test.sunet.se",
|
||||
"/chalmers.se$/": "chalmers.drive.test.sunet.se",
|
||||
"/du.se$/": "du.drive.test.sunet.se",
|
||||
"/eduid.se$/": "extern.drive.test.sunet.se",
|
||||
"/esh.se$/": "esh.drive.test.sunet.se",
|
||||
"/fhs.se$/": "fhs.drive.test.sunet.se",
|
||||
"/gih.se$/": "gih.drive.test.sunet.se",
|
||||
"/gu.se$/": "gu.drive.test.sunet.se",
|
||||
"/hb.se$/": "hb.drive.test.sunet.se",
|
||||
"/@shh.se$/": "shh.drive.test.sunet.se",
|
||||
"/@hh.se$/": "hh.drive.test.sunet.se",
|
||||
"/hhs.se$/": "hhs.drive.test.sunet.se",
|
||||
"/hig.se$/": "hig.drive.test.sunet.se",
|
||||
"/his.se$/": "his.drive.test.sunet.se",
|
||||
"/hj.se$/": "hj.drive.test.sunet.se",
|
||||
"/hkr.se$/": "hkr.drive.test.sunet.se",
|
||||
"/hv.se$/": "hv.drive.test.sunet.se",
|
||||
"/irf.se$/": "irf.drive.test.sunet.se",
|
||||
"/kb.se$/": "kb.drive.test.sunet.se",
|
||||
"/ki.se$/": "ki.drive.test.sunet.se",
|
||||
"/kkh.se$/": "kkh.drive.test.sunet.se",
|
||||
"/kmh.se$/": "kmh.drive.test.sunet.se",
|
||||
"/konstfack.se$/": "konstfack.drive.test.sunet.se",
|
||||
"/kth.se$/": "kth.drive.test.sunet.se",
|
||||
"/kva.se$/": "kva.drive.test.sunet.se",
|
||||
"/liu.se$/": "liu.drive.test.sunet.se",
|
||||
"/lnu.se$/": "lnu.drive.test.sunet.se",
|
||||
"/ltu.se$/": "ltu.drive.test.sunet.se",
|
||||
"/lu.se$/": "lu.drive.test.sunet.se",
|
||||
"/mah.se$/": "mau.drive.test.sunet.se",
|
||||
"/mau.se$/": "mau.drive.test.sunet.se",
|
||||
"/mdh.se$/": "mdu.drive.test.sunet.se",
|
||||
"/mdu.se$/": "mdu.drive.test.sunet.se",
|
||||
"/miun.se$/": "miun.drive.test.sunet.se",
|
||||
"/nordunet.se$/": "nordunet.drive.test.sunet.se",
|
||||
"/nrm.se$/": "nrm.drive.test.sunet.se",
|
||||
"/oru.se$/": "oru.drive.test.sunet.se",
|
||||
"/rkh.se$/": "rkh.drive.test.sunet.se",
|
||||
"/sics.se$/": "sics.drive.test.sunet.se",
|
||||
"/slu.se$/": "slu.drive.test.sunet.se",
|
||||
"/smhi.se$/": "smhi.drive.test.sunet.se",
|
||||
"/sp.se$/": "sp.drive.test.sunet.se",
|
||||
"/su.se$/": "su.drive.test.sunet.se",
|
||||
"/sunet.se$/": "sunet.drive.test.sunet.se",
|
||||
"/suni.se$/": "suni.drive.test.sunet.se",
|
||||
"/swamid.se$/": "swamid.drive.test.sunet.se",
|
||||
"/ths.se$/": "ths.drive.test.sunet.se",
|
||||
"/uhr.se$/": "uhr.drive.test.sunet.se",
|
||||
"/umu.se$/": "umu.drive.test.sunet.se",
|
||||
"/uniarts.se$/": "uniarts.drive.test.sunet.se",
|
||||
"/uu.se$/": "uu.drive.test.sunet.se",
|
||||
"/vinnova.se$/": "vinnova.drive.test.sunet.se",
|
||||
"/vr.se$/": "vr.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*antagning.se$/": "antagning.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*bth.se$/": "bth.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*chalmers.se$/": "chalmers.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*du.se$/": "du.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*eduid.se$/": "extern.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*esh.se$/": "esh.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*fhs.se$/": "fhs.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*gih.se$/": "gih.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*gu.se$/": "gu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hb.se$/": "hb.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*shh.se$/": "shh.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hh.se$/": "hh.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hhs.se$/": "hhs.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hig.se$/": "hig.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*his.se$/": "his.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hj.se$/": "hj.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hkr.se$/": "hkr.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hv.se$/": "hv.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*irf.se$/": "irf.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*kb.se$/": "kb.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*ki.se$/": "ki.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*kkh.se$/": "kkh.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*kmh.se$/": "kmh.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*konstfack.se$/": "konstfack.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*kth.se$/": "kth.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*kva.se$/": "kva.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*liu.se$/": "liu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*lnu.se$/": "lnu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*ltu.se$/": "ltu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*lu.se$/": "lu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*mah.se$/": "mau.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*mau.se$/": "mau.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*mdh.se$/": "mdu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*mdu.se$/": "mdu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*miun.se$/": "miun.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*nordunet.se$/": "nordunet.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*nordu.net$/": "nordunet.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*nrm.se$/": "nrm.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*oru.se$/": "oru.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*rkh.se$/": "rkh.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*sics.se$/": "sics.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*slu.se$/": "slu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*smhi.se$/": "smhi.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*sp.se$/": "sp.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*su.se$/": "su.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*sunet.se$/": "sunet.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*suni.se$/": "suni.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*swamid.se$/": "swamid.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*ths.se$/": "ths.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*uhr.se$/": "uhr.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*umu.se$/": "umu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*uniarts.se$/": "uniarts.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*uu.se$/": "uu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*vinnova.se$/": "vinnova.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*vr.se$/": "vr.drive.test.sunet.se",
|
||||
"/$/": "extern.drive.test.sunet.se"
|
||||
}
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
memory_limit=2048M
|
||||
max_file_uploads=20
|
||||
upload_max_filesize=30G
|
||||
post_max_size=30G
|
||||
max_execution_time=86400
|
||||
max_input_time=86400
|
||||
session.save_handler = <%= @session_save_handler %>
|
||||
session.save_path = "<%= @session_save_path %>"
|
|
@ -15,18 +15,19 @@ if [[ "x${oc_list}" != "x" ]]; then
|
|||
done
|
||||
fi
|
||||
|
||||
if [[ ${1} == 'config:editable' ]] && [[ ${2} == '--on' ]]; then
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => true,/config_is_read_only\1 => false,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp'
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp
|
||||
if [[ ${1} == 'config:editable' ]]; then
|
||||
echo "config:editable is deprecated"
|
||||
exit 0
|
||||
fi
|
||||
if [[ ${1} == 'config:editable' ]] && [[ ${2} == '--off' ]]; then
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => false,/config_is_read_only\1 => true,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp'
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp
|
||||
exit 0
|
||||
fi
|
||||
|
||||
docker exec ${container} chmod u+w /var/www/html/config/
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => true,/config_is_read_only\1 => false,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp'
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => false,/config_is_read_only\1 => true,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp'
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp
|
||||
docker exec ${container} chmod u-w /var/www/html/config/
|
||||
exit 0
|
||||
|
||||
|
||||
|
|
79
templates/application/remount_user_bucket_as_project.sh
Executable file
79
templates/application/remount_user_bucket_as_project.sh
Executable file
|
@ -0,0 +1,79 @@
|
|||
#!/bin/bash
|
||||
|
||||
mountid="${1}"
|
||||
user="${2}"
|
||||
container="${3}"
|
||||
if [[ -z ${mountid} ]] || [[ -z ${user} ]]; then
|
||||
echo "We need a valid mount id and user to proceed"
|
||||
echo "Usage: ${0} <mountid> <user> [<container>]"
|
||||
exit
|
||||
fi
|
||||
|
||||
if [[ -z ${container} ]]; then
|
||||
container="nextcloud_app_1"
|
||||
fi
|
||||
|
||||
occ="/usr/local/bin/occ ${container}"
|
||||
function get_config {
|
||||
${occ} files_external:config ${mountid} ${1} | tr -d '\n\t\r'
|
||||
}
|
||||
|
||||
echo "Gathering information, hang tight."
|
||||
|
||||
echo -n "."
|
||||
bucket="$(get_config bucket)"
|
||||
echo -n "."
|
||||
hostname="$(get_config hostname)"
|
||||
echo -n "."
|
||||
key="$(get_config key)"
|
||||
echo -n "."
|
||||
region="$(get_config region)"
|
||||
echo -n "."
|
||||
secret="$(get_config secret)"
|
||||
jsonfile="/tmp/${user}-user-bucket.json"
|
||||
mount_point="${user/@/-}"
|
||||
mount_point="${mount_point/./-}-user-bucket"
|
||||
|
||||
echo "This will remount the user bucket with mountid ${mountid} for ${user} as project bucket with mountpoint ${mount_point}."
|
||||
read -r -p "Press enter to continue"
|
||||
|
||||
echo '
|
||||
[
|
||||
{
|
||||
"mount_point": "\/'${mount_point}'",
|
||||
"storage": "\\OCA\\Files_External\\Lib\\Storage\\AmazonS3",
|
||||
"authentication_type": "amazons3::accesskey",
|
||||
"configuration": {
|
||||
"bucket": "'${bucket}'",
|
||||
"hostname": "'${hostname}'",
|
||||
"key": "'${key}'",
|
||||
"legacy_auth": false,
|
||||
"port": "443",
|
||||
"region": "'${region}'",
|
||||
"secret": "'${secret}'",
|
||||
"storageClass": "",
|
||||
"useMultipartCopy": false,
|
||||
"use_path_style": true,
|
||||
"use_ssl": true
|
||||
},
|
||||
"options": {
|
||||
"encrypt": true,
|
||||
"previews": true,
|
||||
"enable_sharing": true,
|
||||
"filesystem_check_changes": 0,
|
||||
"encoding_compatibility": false,
|
||||
"readonly": false
|
||||
},
|
||||
"applicable_users": [
|
||||
],
|
||||
"applicable_groups": ["admin"]
|
||||
}
|
||||
]
|
||||
' > "${jsonfile}"
|
||||
|
||||
|
||||
docker cp ${jsonfile} ${container}:/${jsonfile}
|
||||
${occ} files_external:import /${jsonfile}
|
||||
docker exec ${container} rm /${jsonfile}
|
||||
rm ${jsonfile}
|
||||
${occ} files_external:delete ${mountid}
|
69
templates/jupyter_site/docker-compose.erb.yaml
Normal file
69
templates/jupyter_site/docker-compose.erb.yaml
Normal file
|
@ -0,0 +1,69 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
nginx:
|
||||
image: docker.io/nginxproxy/nginx-proxy:latest
|
||||
container_name: nginx
|
||||
networks:
|
||||
- internal_network
|
||||
- external_network
|
||||
dns:
|
||||
- 89.32.32.32
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
labels:
|
||||
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: "true"
|
||||
volumes:
|
||||
- /opt/jupyter_site/nginx/certs:/etc/nginx/certs:ro
|
||||
- /opt/jupyter_site/nginx/conf:/etc/nginx/conf.d
|
||||
- /opt/jupyter_site/nginx/dhparam:/etc/nginx/dhparam
|
||||
- /opt/jupyter_site/nginx/html:/usr/share/nginx/html
|
||||
- /opt/jupyter_site/nginx/vhost:/etc/nginx/vhost.d
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
environment:
|
||||
- ENABLE_IPV6=true
|
||||
restart: unless-stopped
|
||||
|
||||
acme:
|
||||
image: docker.io/nginxproxy/acme-companion:latest
|
||||
container_name: acme
|
||||
networks:
|
||||
- external_network
|
||||
dns:
|
||||
- 89.32.32.32
|
||||
volumes:
|
||||
- /opt/jupyter_site/nginx/acme:/etc/acme.sh
|
||||
- /opt/jupyter_site/nginx/certs:/etc/nginx/certs:rw
|
||||
- /opt/jupyter_site/nginx/conf:/etc/nginx/conf.d
|
||||
- /opt/jupyter_site/nginx/dhparam:/etc/nginx/dhparam
|
||||
- /opt/jupyter_site/nginx/html:/usr/share/nginx/html
|
||||
- /opt/jupyter_site/nginx/vhost:/etc/nginx/vhost.d:rw
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
- NGINX_PROXY_CONTAINER=nginx
|
||||
- DEFAULT_EMAIL=drive@sunet.se
|
||||
depends_on:
|
||||
- nginx
|
||||
restart: unless-stopped
|
||||
|
||||
web:
|
||||
image: docker.sunet.se/drive/jupyter-site:<%= @site_version %>
|
||||
container_name: web
|
||||
restart: always
|
||||
networks:
|
||||
- internal_network
|
||||
ports:
|
||||
- "127.0.0.1:3000:3000"
|
||||
dns:
|
||||
- 89.32.32.32
|
||||
environment:
|
||||
- VIRTUAL_HOST=<%= @domain %>
|
||||
- VIRTUAL_PATH=/
|
||||
- VIRTUAL_PORT=3000
|
||||
- LETSENCRYPT_HOST=<%= @domain %>
|
||||
|
||||
networks:
|
||||
external_network:
|
||||
internal_network:
|
||||
internal: true
|
|
@ -1,16 +1,29 @@
|
|||
|
||||
<?php
|
||||
|
||||
|
||||
$CONFIG = [
|
||||
'AUTH_KEY' => "<%= @gss_jwt_key %>",
|
||||
'DB' => [
|
||||
'host' => "<%= @dbhost %>",
|
||||
'db' => "lookup" ,
|
||||
'user' => "lookup",
|
||||
'pass' => "<%= @mysql_user_password %>",
|
||||
],
|
||||
|
||||
'EMAIL_SENDER' => '<%= @email_sender %>',
|
||||
'ERROR_VERBOSE' => false,
|
||||
'GLOBAL_SCALE' => true,
|
||||
|
||||
'AUTH_KEY' => "<%= @gss_jwt_key %>",
|
||||
];
|
||||
'IP_BLACKLIST' => [
|
||||
],
|
||||
'MAX_REQUESTS' => 10000,
|
||||
'MAX_SEARCH_PAGE' => 10,
|
||||
'PUBLIC_URL' => '<%= @public_url %>',
|
||||
'REPLICATION_AUTH' => '<%= @replication_auth %>',
|
||||
'REPLICATION_HOSTS' => [
|
||||
],
|
||||
'SPAM_BLACKLIST' => [
|
||||
],
|
||||
'TWITTER' => [
|
||||
'CONSUMER_KEY' => '',
|
||||
'CONSUMER_SECRET' => '',
|
||||
'ACCESS_TOKEN' => '',
|
||||
'ACCESS_TOKEN_SECRET' => '',
|
||||
],
|
||||
];
|
||||
|
|
|
@ -3,22 +3,15 @@ version: '3.2'
|
|||
services:
|
||||
|
||||
app:
|
||||
container_name: lookup_app_1
|
||||
image: docker.sunet.se/drive/nextcloud-lookup:<%= @lookup_version %>
|
||||
restart: always
|
||||
volumes:
|
||||
- /opt/lookup/config.php:/var/www/html/config/config.php
|
||||
networks:
|
||||
- default
|
||||
- proxysql_proxysql
|
||||
network_mode: host
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
- 89.32.32.32
|
||||
ports:
|
||||
- 443:443
|
||||
command: apachectl -D FOREGROUND
|
||||
tty: true
|
||||
|
||||
networks:
|
||||
proxysql_proxysql:
|
||||
external: true
|
||||
|
|
1
templates/mariadb/custconfig.json.erb
Normal file
1
templates/mariadb/custconfig.json.erb
Normal file
|
@ -0,0 +1 @@
|
|||
<%= @custdata.to_json %>
|
|
@ -4,6 +4,7 @@ services:
|
|||
|
||||
db:
|
||||
image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %>
|
||||
container_name: mariadb_db_1
|
||||
restart: always
|
||||
volumes:
|
||||
- /etc/mariadb/backups:/backups
|
||||
|
@ -20,6 +21,6 @@ services:
|
|||
- MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %>
|
||||
- BOOTSTRAP=<%= @bootstrap %>
|
||||
- FORCE_BOOTSTRAP=0
|
||||
command: "--wsrep_cluster_address=gcomm://<%= @db_ip[0] %>,<%= @db_ip[1] %>,<%= @db_ip[2] %>"
|
||||
command: "--wsrep_cluster_address=gcomm://<%= @db_ip.join(',') %>"
|
||||
tty: true
|
||||
|
||||
|
|
32
templates/mariadb/genuserdeplists.sh.erb
Normal file
32
templates/mariadb/genuserdeplists.sh.erb
Normal file
|
@ -0,0 +1,32 @@
|
|||
#!/bin/bash
|
||||
|
||||
<% basedir="statistics:drive-server-coms" -%>
|
||||
<% cupath="/opt/mariadb/statistics/users/" -%>
|
||||
<% custdata="/opt/mariadb/statistics/custdata.json" -%>
|
||||
status=0
|
||||
|
||||
<% @custdata.each do |cust,data| -%>
|
||||
#Customer <%= cust %> has no billing departments.
|
||||
<% if defined?(data[@environment]["billdomains"]) && data[@environment]["billdomains"] -%>
|
||||
mkdir -p /opt/mariadb/statistics/users/<%= cust %>
|
||||
chmod '0700' /opt/mariadb/statistics/users/<%= cust %>
|
||||
<% data[@environment]["billdomains"].each do |dom| -%>
|
||||
/root/tasks/listusersbydep.sh <%= cust %> <%= dom %> > /opt/mariadb/statistics/users/<%= cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json
|
||||
if jq . <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json &>/dev/null
|
||||
then
|
||||
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json <%= basedir%>/<%= cust %>-<%= @environment%>/
|
||||
[[ $? -eq 0 ]] || { status=1 ; echo "Error: Upload of user data failed." ; }
|
||||
else
|
||||
echo "Error in json data"
|
||||
status=1
|
||||
fi
|
||||
<% end -%>
|
||||
<% end -%>
|
||||
<% end -%>
|
||||
|
||||
if [[ -f <%= custdata %> ]]
|
||||
then
|
||||
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= custdata %> <%= basedir%>/
|
||||
fi
|
||||
|
||||
exit ${status}
|
24
templates/mariadb/listusersdep.sh.erb
Normal file
24
templates/mariadb/listusersdep.sh.erb
Normal file
|
@ -0,0 +1,24 @@
|
|||
#!/bin/bash
|
||||
|
||||
function usage () {
|
||||
printf "Usage:\t%s <customer> <department email domain>\n" "${0##*/}"
|
||||
printf "Example:\t%s uu int.uu.se\n" "${0##*/}"
|
||||
}
|
||||
|
||||
[[ -z "${1}" || -z "${2}" || ! "${1}" =~ ^[a-zA-Z0-9.]{1,200}$ || ! "${2}" =~ ^[a-zA-Z0-9.]{1,200}$ ]] && usage && exit 1
|
||||
|
||||
depdom="${2}"
|
||||
customer="${1}"
|
||||
|
||||
docker exec mariadb_db_1 /bin/bash -c 'mysql -p${MYSQL_ROOT_PASSWORD} -NB -e '\
|
||||
$'\'select JSON_PRETTY(JSON_OBJECTAGG(uid,displayname)) from ('\
|
||||
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_global_scale_users.displayname'\
|
||||
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_global_scale_users'\
|
||||
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_global_scale_users.uid'\
|
||||
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\""'\
|
||||
$' UNION'\
|
||||
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_user_saml_users.displayname'\
|
||||
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_user_saml_users'\
|
||||
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_user_saml_users.uid'\
|
||||
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\"") as users\'' \
|
||||
| sed 's/\\n/\n/g'
|
4
templates/mariadb/mysql.erb.sh
Normal file
4
templates/mariadb/mysql.erb.sh
Normal file
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
pw=$(yq -r '.services.db.environment[0]' /opt/mariadb/docker-compose.yml | awk -F '=' '{print $2}')
|
||||
|
||||
docker exec -ti mariadb_db_1 mysql -u root -p"${pw}" "${@}"
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
result="$(docker exec mariadb_backup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
|
||||
result="$(docker exec -u root mariadb_backup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
|
||||
if [[ "${result}" == "Slave_running ON" ]]; then
|
||||
echo "OK: Replica running"
|
||||
exit 0
|
||||
|
|
|
@ -4,6 +4,7 @@ services:
|
|||
|
||||
mariadb_backup:
|
||||
image: docker.sunet.se/drive/mariadb
|
||||
container_name: mariadb_backup_mariadb_backup_1
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
|
|
|
@ -15,9 +15,16 @@ password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}')
|
|||
|
||||
mysql="${dexec} mysql -p${password}"
|
||||
|
||||
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
|
||||
users="${users}
|
||||
$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
|
||||
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_global_scale_users")')" == "1" ]]
|
||||
then
|
||||
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
|
||||
fi
|
||||
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
|
||||
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_user_saml_users")')" == "1" ]]
|
||||
then
|
||||
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_user_saml_users')"
|
||||
fi
|
||||
users="$(echo "${users}" | sort | uniq)"
|
||||
|
||||
project="statistics"
|
||||
bucket="drive-server-coms"
|
||||
|
@ -25,7 +32,7 @@ base_dir="${project}:${bucket}"
|
|||
mountpoint="/opt/statistics"
|
||||
customer_dir="${mountpoint}/${location}"
|
||||
mkdir -p "${customer_dir}"
|
||||
rclone mkdir "${base_dir}/${location}"
|
||||
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}"
|
||||
|
||||
echo "${users}" | awk 'BEGIN{print "{"} {print t "\""$1"\": \""$2"\""} {t=","} END{print "}"}' | jq . >"${customer_dir}/users.json"
|
||||
status=0
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[statistics]
|
||||
type = webdav
|
||||
url = https://89.46.20.42/remote.php/dav/files/_script/
|
||||
url = https://89.45.236.246/remote.php/dav/files/_script/
|
||||
vendor = nextcloud
|
||||
user = _script
|
||||
pass = <%= @statistics_secret %>
|
||||
|
|
|
@ -3,10 +3,12 @@
|
|||
config_php='/var/www/html/config/config.php'
|
||||
dbhost="<%= @dbhost %>"
|
||||
mysql_user_password="<%= @mysql_user_password %>"
|
||||
admin_password="<%= @admin_password %>"
|
||||
location="<%= @location %>"
|
||||
bucket="<%= @s3_bucket %>"
|
||||
customer="<%= @customer %>"
|
||||
echo "Setting temp admin password"
|
||||
apt update && apt install -y apg
|
||||
admin_password="$(apg -m 40 | head -1)"
|
||||
|
||||
/usr/bin/mysql -e "drop database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
|
||||
/usr/bin/mysql -e "create database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
|
||||
|
@ -35,6 +37,11 @@ EOF
|
|||
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
|
||||
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
|
||||
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
|
||||
echo "Now delete the admin user:"
|
||||
echo " occ <container> user:delete admin"
|
||||
echo "and then create a new admin user:"
|
||||
echo " /usr/local/bin/add_admin_user <username> <email address> <container>"
|
||||
echo ""
|
||||
echo "${customer}_instanceid: DEC::PKCS7[${instanceid}]!"
|
||||
echo "${customer}_secret: DEC::PKCS7[${secret}]!"
|
||||
echo "${customer}_passwordsalt: DEC::PKCS7[${passwordsalt}]!"
|
||||
|
|
|
@ -5,7 +5,7 @@ no_files=30 # Keep this many files as an archive, script is run once a week
|
|||
# a specific host, but will differ between hosts
|
||||
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
|
||||
|
||||
for logfile in $(ls /opt/multinode/*/{nextcloud.log,server/server.log}); do
|
||||
for logfile in $(ls /opt/multinode/*/{nextcloud.log,audit.log,server/server.log}); do
|
||||
if [[ -f ${logfile}.gz.${no_files} ]]; then
|
||||
rm ${logfile}.gz.${no_files}
|
||||
fi
|
||||
|
|
|
@ -13,7 +13,9 @@ services:
|
|||
- /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini
|
||||
- <%= @config_php_path %>:/var/www/html/config/config.php
|
||||
- <%= @nextcloud_log_path %>:/var/www/html/data/nextcloud.log
|
||||
- <%= @audit_log_path %>:/var/www/html/data/audit.log
|
||||
- <%= @rclone_conf_path %>:/rclone.conf
|
||||
|
||||
networks:
|
||||
- default
|
||||
- proxysql_proxysql
|
||||
|
@ -23,7 +25,7 @@ services:
|
|||
- 89.32.32.32
|
||||
ports:
|
||||
- <%= @https_port %>:443
|
||||
command: apachectl -D FOREGROUND
|
||||
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
|
||||
tty: true
|
||||
|
||||
networks:
|
||||
|
|
2
templates/portal/config.erb.yaml
Normal file
2
templates/portal/config.erb.yaml
Normal file
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
domain: "<%= @domain %>"
|
71
templates/portal/docker-compose.erb.yaml
Normal file
71
templates/portal/docker-compose.erb.yaml
Normal file
|
@ -0,0 +1,71 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
nginx:
|
||||
image: docker.io/nginxproxy/nginx-proxy:latest
|
||||
container_name: nginx
|
||||
networks:
|
||||
- internal_network
|
||||
- external_network
|
||||
dns:
|
||||
- 89.32.32.32
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
labels:
|
||||
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: "true"
|
||||
volumes:
|
||||
- /opt/portal/nginx/certs:/etc/nginx/certs:ro
|
||||
- /opt/portal/nginx/conf:/etc/nginx/conf.d
|
||||
- /opt/portal/nginx/dhparam:/etc/nginx/dhparam
|
||||
- /opt/portal/nginx/html:/usr/share/nginx/html
|
||||
- /opt/portal/nginx/vhost:/etc/nginx/vhost.d
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
environment:
|
||||
- ENABLE_IPV6=true
|
||||
restart: unless-stopped
|
||||
|
||||
acme:
|
||||
image: docker.io/nginxproxy/acme-companion:latest
|
||||
container_name: acme
|
||||
networks:
|
||||
- external_network
|
||||
dns:
|
||||
- 89.32.32.32
|
||||
volumes:
|
||||
- /opt/portal/nginx/acme:/etc/acme.sh
|
||||
- /opt/portal/nginx/certs:/etc/nginx/certs:rw
|
||||
- /opt/portal/nginx/conf:/etc/nginx/conf.d
|
||||
- /opt/portal/nginx/dhparam:/etc/nginx/dhparam
|
||||
- /opt/portal/nginx/html:/usr/share/nginx/html
|
||||
- /opt/portal/nginx/vhost:/etc/nginx/vhost.d:rw
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
- NGINX_PROXY_CONTAINER=nginx
|
||||
- DEFAULT_EMAIL=noc@sunet.se
|
||||
depends_on:
|
||||
- nginx
|
||||
restart: unless-stopped
|
||||
|
||||
portal:
|
||||
image: docker.sunet.se/drive/portal:<%= @portal_version %>
|
||||
container_name: portal
|
||||
restart: always
|
||||
networks:
|
||||
- internal_network
|
||||
ports:
|
||||
- "127.0.0.1:8080:8080"
|
||||
dns:
|
||||
- 89.32.32.32
|
||||
volumes:
|
||||
- /opt/portal/config.yaml:/app/config.yaml
|
||||
environment:
|
||||
- VIRTUAL_HOST=portal.<%= @domain %>
|
||||
- VIRTUAL_PATH=/
|
||||
- VIRTUAL_PORT=8080
|
||||
- LETSENCRYPT_HOST=portal.<%= @domain %>
|
||||
|
||||
networks:
|
||||
external_network:
|
||||
internal_network:
|
||||
internal: true
|
|
@ -4,18 +4,25 @@ services:
|
|||
|
||||
proxysql:
|
||||
image: docker.sunet.se/drive/proxysql:<%= @proxysql_version %>
|
||||
container_name: proxysql_proxysql_1
|
||||
<%- if @hostnet -%>
|
||||
network_mode: host
|
||||
<%- else -%>
|
||||
ports:
|
||||
- 3306:3306
|
||||
- 6032:6032
|
||||
- 6080:6080
|
||||
<%- end -%>
|
||||
environment:
|
||||
INITIALIZE: 1
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
- 89.32.32.32
|
||||
<%- if !@hostnet -%>
|
||||
networks:
|
||||
- proxysql
|
||||
<%- end -%>
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- proxysql:/var/lib/proxysql
|
||||
|
@ -25,6 +32,8 @@ services:
|
|||
volumes:
|
||||
proxysql:
|
||||
|
||||
<%- if !@hostnet -%>
|
||||
networks:
|
||||
proxysql:
|
||||
driver: bridge
|
||||
<%- end -%>
|
||||
|
|
|
@ -1,58 +0,0 @@
|
|||
# ~/.bashrc: executed by bash(1) for non-login shells.
|
||||
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
|
||||
# for examples
|
||||
|
||||
# If not running interactively, don't do anything
|
||||
[ -z "$PS1" ] && return
|
||||
|
||||
# don't put duplicate lines in the history. See bash(1) for more options
|
||||
# ... or force ignoredups and ignorespace
|
||||
HISTCONTROL=ignoredups:ignorespace
|
||||
|
||||
# append to the history file, don't overwrite it
|
||||
shopt -s histappend
|
||||
|
||||
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
|
||||
HISTSIZE=1000
|
||||
HISTFILESIZE=2000
|
||||
|
||||
# check the window size after each command and, if necessary,
|
||||
# update the values of LINES and COLUMNS.
|
||||
shopt -s checkwinsize
|
||||
|
||||
# make less more friendly for non-text input files, see lesspipe(1)
|
||||
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
|
||||
|
||||
# set variable identifying the chroot you work in (used in the prompt below)
|
||||
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
|
||||
debian_chroot=$(cat /etc/debian_chroot)
|
||||
fi
|
||||
|
||||
# set a fancy prompt (non-color, unless we know we "want" color)
|
||||
case "$TERM" in
|
||||
xterm-color) color_prompt=yes;;
|
||||
esac
|
||||
|
||||
# uncomment for a colored prompt, if the terminal has the capability; turned
|
||||
# off by default to not distract the user: the focus in a terminal window
|
||||
# should be on the output of commands, not on the prompt
|
||||
#force_color_prompt=yes
|
||||
|
||||
if [ -n "$force_color_prompt" ]; then
|
||||
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
|
||||
# We have color support; assume it's compliant with Ecma-48
|
||||
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
|
||||
# a case would tend to support setf rather than setaf.)
|
||||
color_prompt=yes
|
||||
else
|
||||
color_prompt=
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$color_prompt" = yes ]; then
|
||||
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
|
||||
else
|
||||
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
|
||||
fi
|
||||
unset color_prompt force_color_prompt
|
||||
alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
|
|
@ -3,17 +3,20 @@ version: '3.2'
|
|||
services:
|
||||
|
||||
reva-server:
|
||||
# image: docker.sunet.se/drive/sciencemesh-reva:v2.12.0
|
||||
image: michielbdejong/reva:mentix-fixes
|
||||
image: docker.sunet.se/drive/sciencemesh-reva:<%= @reva_version %>
|
||||
network_mode: host
|
||||
security_opt:
|
||||
- seccomp:unconfined
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
- 89.32.32.32
|
||||
environment:
|
||||
- CGO_ENABLED=1
|
||||
volumes:
|
||||
- /opt/reva/revad.toml:/etc/revad/revad.toml
|
||||
- /opt/reva/data:/var/tmp/reva
|
||||
- /opt/reva/ocm-providers.json:/etc/revad/ocm-providers.json
|
||||
- /opt/reva/test/drive.test.sunet.se.crt:/etc/revad/tls/drive.test.sunet.se.crt
|
||||
- /opt/reva/test/drive.test.sunet.se.key:/etc/revad/tls/drive.test.sunet.se.key
|
||||
- /opt/reva/rclone.conf:/root/.rclone.conf
|
||||
- /opt/reva/<%= @environment %>/<%= @domain %>.crt:/etc/revad/tls/<%= @domain %>.crt
|
||||
- /opt/reva/<%= @environment %>/<%= @domain %>.key:/etc/revad/tls/<%= @domain %>.key
|
||||
restart: always
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
[
|
||||
{ "domain": "mesh.pondersource.org", "services": [
|
||||
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://mesh.pondersource.org/ocm/" }, "host": "https://mesh.pondersource.org" },
|
||||
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://cloud.pondersource.org/remote.php/webdav/" }, "host": "https://cloud.pondersource.org" }
|
||||
] },
|
||||
{ "domain": "cs3mesh-iop.apps.dcw1.paas.psnc.pl", "services": [
|
||||
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://cs3mesh-iop.apps.dcw1.paas.psnc.pl/ocm/" }, "host": "https://cs3mesh-iop.apps.dcw1.paas.psnc.pl" },
|
||||
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://cs3mesh-drive.apps.dcw1.paas.psnc.pl/remote.php/webdav/" }, "host": "https://cs3mesh-drive.apps.dcw1.paas.psnc.pl" }
|
||||
] },
|
||||
{ "domain": "<%= @reva_domain %>", "services": [
|
||||
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://<%= @reva_domain%>/ocm/" }, "host": "https://<%= @reva_domain %>" },
|
||||
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://<%= @domain %>/remote.php/webdav/" }, "host": "https://<%= @domain %>" }
|
||||
] }
|
||||
]
|
6
templates/reva/rclone.conf.erb
Normal file
6
templates/reva/rclone.conf.erb
Normal file
|
@ -0,0 +1,6 @@
|
|||
[statistics]
|
||||
type = webdav
|
||||
url = https://89.45.236.246/remote.php/dav/files/_script/
|
||||
vendor = nextcloud
|
||||
user = _script
|
||||
pass = <%= @statistics_secret %>
|
|
@ -1,48 +1,83 @@
|
|||
[vars]
|
||||
internal_gateway = "<%= @reva_domain %>"
|
||||
provider_domain = "<%= @reva_domain %>"
|
||||
external_reva_endpoint = "https://<%= @reva_domain %>" # append here any route if applicable
|
||||
efss_sciencemesh_endpoint = "https://<%= @customer %>.<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
machine_api_key = "<%= @iopsecret %>"
|
||||
efss_shared_secret = "<%= @shared_secret %>"
|
||||
|
||||
[http]
|
||||
certfile = "/etc/revad/tls/<%= @domain %>.crt"
|
||||
keyfile = "/etc/revad/tls/<%= @domain %>.key"
|
||||
|
||||
[log]
|
||||
level = "debug"
|
||||
|
||||
[shared]
|
||||
gatewaysvc = "<%= @reva_domain %>:19000"
|
||||
# gatewaycertfile = "/etc/revad/tls/revanc1.crt"
|
||||
|
||||
# [registry]
|
||||
# driver = "static"
|
||||
#
|
||||
# [registry.static]
|
||||
# services = ["authprovider","userprovider"]
|
||||
#
|
||||
# [registry.static.authprovider]
|
||||
# bearer = ["localhost:0123"]
|
||||
# basic = ["localhost:1234"]
|
||||
# publiclink = ["localhost:9876"]
|
||||
|
||||
[grpc]
|
||||
address = "0.0.0.0:19000"
|
||||
# certfile = "/etc/revad/tls/revanc1.crt"
|
||||
# keyfile = "/etc/revad/tls/revanc1.key"
|
||||
gatewaysvc = "{{ vars.internal_gateway }}:19000"
|
||||
|
||||
[grpc.services.gateway]
|
||||
authregistrysvc = "<%= @reva_domain %>:19000"
|
||||
appprovidersvc = "<%= @reva_domain %>:19000"
|
||||
appregistry = "<%= @reva_domain %>:19000"
|
||||
storageregistrysvc = "<%= @reva_domain %>:19000"
|
||||
preferencessvc = "<%= @reva_domain %>:19000"
|
||||
userprovidersvc = "<%= @reva_domain %>:19000"
|
||||
usershareprovidersvc = "<%= @reva_domain %>:19000"
|
||||
publicshareprovidersvc = "<%= @reva_domain %>:19000"
|
||||
ocmcoresvc = "<%= @reva_domain %>:19000"
|
||||
ocmshareprovidersvc = "<%= @reva_domain %>:19000"
|
||||
ocminvitemanagersvc = "<%= @reva_domain %>:19000"
|
||||
ocmproviderauthorizersvc = "<%= @reva_domain %>:19000"
|
||||
commit_share_to_storage_grant = false
|
||||
datagateway = "https://<%= @reva_domain %>/data"
|
||||
transfer_expires = 6 # give it a moment
|
||||
address = ":19000"
|
||||
authregistrysvc = "{{ grpc.services.authregistry.address }}"
|
||||
appregistrysvc = "{{ grpc.services.appregistry.address }}"
|
||||
storageregistrysvc = "{{ grpc.services.storageregistry.address }}"
|
||||
preferencessvc = "{{ grpc.services.userprovider.address }}"
|
||||
userprovidersvc = "{{ grpc.services.userprovider.address }}"
|
||||
usershareprovidersvc = "{{ grpc.services.usershareprovider.address }}"
|
||||
ocmcoresvc = "{{ grpc.services.ocmcore.address }}"
|
||||
ocmshareprovidersvc = "{{ grpc.services.ocmshareprovider.address }}"
|
||||
ocminvitemanagersvc = "{{ grpc.services.ocminvitemanager.address }}"
|
||||
ocmproviderauthorizersvc = "{{ grpc.services.ocmproviderauthorizer.address }}"
|
||||
datagateway = "https://{{ http.services.datagateway.address }}/data"
|
||||
|
||||
transfer_expires = 6 # give it a moment
|
||||
commit_share_to_storage_grant = true
|
||||
commit_share_to_storage_ref = true
|
||||
|
||||
[grpc.services.appregistry]
|
||||
driver = "static"
|
||||
|
||||
[grpc.services.appregistry.drivers.static]
|
||||
mime_types = [
|
||||
{"mime_type" = "text/plain", "extension" = "txt", "name" = "Text file", "description" = "Text file", "allow_creation" = true},
|
||||
{"mime_type" = "text/markdown", "extension" = "md", "name" = "Markdown file", "description" = "Markdown file", "allow_creation" = true},
|
||||
{"mime_type" = "application/vnd.oasis.opendocument.text", "extension" = "odt", "name" = "OpenDocument", "description" = "OpenDocument text document", "default_app" = "Collabora", "allow_creation" = true},
|
||||
{"mime_type" = "application/vnd.oasis.opendocument.spreadsheet", "extension" = "ods", "name" = "OpenSpreadsheet", "description" = "OpenDocument spreadsheet document", "default_app" = "Collabora", "allow_creation" = true},
|
||||
{"mime_type" = "application/vnd.oasis.opendocument.presentation", "extension" = "odp", "name" = "OpenPresentation", "description" = "OpenDocument presentation document", "default_app" = "Collabora", "allow_creation" = true},
|
||||
{"mime_type" = "application/vnd.jupyter", "extension" = "ipynb", "name" = "Jupyter Notebook", "description" = "Jupyter Notebook"}
|
||||
]
|
||||
|
||||
|
||||
### AUTH PROVIDERS ###
|
||||
|
||||
[grpc.services.authregistry]
|
||||
driver = "static"
|
||||
|
||||
[grpc.services.authregistry.drivers.static.rules]
|
||||
basic = "<%= @reva_domain %>:19000"
|
||||
basic = "{{ grpc.services.authprovider[0].address }}"
|
||||
machine = "{{ grpc.services.authprovider[1].address }}"
|
||||
ocmshares = "{{ grpc.services.authprovider[2].address }}"
|
||||
|
||||
[[grpc.services.authprovider]]
|
||||
auth_manager = "nextcloud"
|
||||
|
||||
[grpc.services.authprovider.auth_managers.nextcloud]
|
||||
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
|
||||
shared_secret = "{{ vars.efss_shared_secret }}"
|
||||
mock_http = false
|
||||
|
||||
[[grpc.services.authprovider]]
|
||||
auth_manager = "machine"
|
||||
|
||||
[grpc.services.authprovider.auth_managers.machine]
|
||||
api_key = "{{ vars.machine_api_key }}"
|
||||
gateway_addr = "{{ vars.internal_gateway }}:19000"
|
||||
|
||||
[[grpc.services.authprovider]]
|
||||
auth_manager = "ocmshares"
|
||||
|
||||
|
||||
### STORAGE PROVIDERS ###
|
||||
|
||||
[grpc.services.storageregistry]
|
||||
driver = "static"
|
||||
|
@ -51,8 +86,36 @@ driver = "static"
|
|||
home_provider = "/home"
|
||||
|
||||
[grpc.services.storageregistry.drivers.static.rules]
|
||||
"/home" = {"address" = "<%= @reva_domain %>:19000"}
|
||||
"123e4567-e89b-12d3-a456-426655440000" = {"address" = "<%= @reva_domain %>:19000"}
|
||||
"/home" = {"address" = "{{ grpc.services.storageprovider[0].address }}"}
|
||||
"nextcloud" = {"address" = "{{ grpc.services.storageprovider[0].address }}"}
|
||||
"/ocm" = {"address" = "{{ grpc.services.storageprovider[1].address }}"}
|
||||
"ocm" = {"address" = "{{ grpc.services.storageprovider[1].address }}"}
|
||||
|
||||
[[grpc.services.storageprovider]]
|
||||
driver = "nextcloud"
|
||||
mount_id = "nextcloud"
|
||||
expose_data_server = true
|
||||
enable_home_creation = false
|
||||
data_server_url = "https://localhost:{{ http.services.dataprovider[0].address.port }}/data"
|
||||
|
||||
[grpc.services.storageprovider.drivers.nextcloud]
|
||||
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
|
||||
shared_secret = "{{ vars.efss_shared_secret }}"
|
||||
mock_http = false
|
||||
|
||||
[[grpc.services.storageprovider]]
|
||||
driver = "ocmoutcoming"
|
||||
mount_id = "ocm"
|
||||
mount_path = "/ocm"
|
||||
expose_data_server = true
|
||||
enable_home_creation = false
|
||||
data_server_url = "{{ vars.external_reva_endpoint }}/data"
|
||||
|
||||
[grpc.services.storageprovider.drivers.ocmoutcoming]
|
||||
machine_secret = "{{ vars.machine_api_key }}"
|
||||
|
||||
|
||||
### OTHER PROVIDERS ###
|
||||
|
||||
[grpc.services.usershareprovider]
|
||||
driver = "memory"
|
||||
|
@ -61,121 +124,148 @@ driver = "memory"
|
|||
driver = "nextcloud"
|
||||
|
||||
[grpc.services.ocmcore.drivers.nextcloud]
|
||||
webdav_host = "https://<%= @domain %>/"
|
||||
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
shared_secret = "<%= @shared_secret %>"
|
||||
host = "{{ vars.external_reva_endpoint }}"
|
||||
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
|
||||
shared_secret = "{{ vars.efss_shared_secret }}"
|
||||
mock_http = false
|
||||
|
||||
[grpc.services.ocminvitemanager]
|
||||
# TODO the driver should be "nextcloud" once it is implemented
|
||||
driver = "json"
|
||||
provider_domain = "{{ vars.provider_domain }}"
|
||||
|
||||
[grpc.services.ocmshareprovider]
|
||||
driver = "nextcloud"
|
||||
provider_domain = "{{ vars.provider_domain }}"
|
||||
webdav_endpoint = "{{ vars.external_reva_endpoint }}"
|
||||
webdav_prefix = "{{ vars.external_reva_endpoint }}/remote.php/dav/files"
|
||||
# TODO the following should become {{ vars.external_reva_endpoint }}/external/{{.Token}}/...
|
||||
webapp_template = "https://your.revad.org/external/sciencemesh/{{.Token}}/{relative-path-to-shared-resource}"
|
||||
|
||||
[grpc.services.ocmshareprovider.drivers.nextcloud]
|
||||
webdav_host = "https://<%= @domain %>/"
|
||||
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
shared_secret = "<%= @shared_secret %>"
|
||||
webdav_host = "{{ vars.external_reva_endpoint }}"
|
||||
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
|
||||
shared_secret = "{{ vars.efss_shared_secret }}"
|
||||
mock_http = false
|
||||
mount_id = "nextcloud"
|
||||
|
||||
[grpc.services.ocmproviderauthorizer]
|
||||
#driver = "mentix"
|
||||
driver = "open"
|
||||
driver = "mentix"
|
||||
|
||||
[grpc.services.ocmproviderauthorizer.drivers.mentix]
|
||||
url = "https://iop.sciencemesh.uni-muenster.de/iop/mentix/cs3"
|
||||
verify_request_hostname = false
|
||||
verify_request_hostname = true
|
||||
insecure = false
|
||||
timeout = 10
|
||||
refresh = 900
|
||||
|
||||
[grpc.services.publicshareprovider]
|
||||
driver = "memory"
|
||||
|
||||
[grpc.services.appprovider]
|
||||
driver = "demo"
|
||||
iopsecret = "<%= @iopsecret %>"
|
||||
wopiurl = "http://0.0.0.0:8880/"
|
||||
wopibridgeurl = "http://localhost:8000/wopib"
|
||||
|
||||
[grpc.services.appregistry]
|
||||
driver = "static"
|
||||
|
||||
[grpc.services.appregistry.static.rules]
|
||||
"text/plain" = "<%= @reva_domain %>:19000"
|
||||
"text/markdown" = "<%= @reva_domain %>:19000"
|
||||
"application/compressed-markdown" = "<%= @reva_domain %>:19000"
|
||||
"application/vnd.oasis.opendocument.text" = "<%= @reva_domain %>:19000"
|
||||
"application/vnd.oasis.opendocument.spreadsheet" = "<%= @reva_domain %>:19000"
|
||||
"application/vnd.oasis.opendocument.presentation" = "<%= @reva_domain %>:19000"
|
||||
|
||||
[grpc.services.storageprovider]
|
||||
driver = "nextcloud"
|
||||
expose_data_server = true
|
||||
data_server_url = "https://<%= @reva_domain %>/data"
|
||||
enable_home_creation = true
|
||||
|
||||
[grpc.services.storageprovider.drivers.nextcloud]
|
||||
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
shared_secret = "<%= @shared_secret %>"
|
||||
mock_http = false
|
||||
|
||||
[grpc.services.authprovider]
|
||||
auth_manager = "nextcloud"
|
||||
|
||||
[grpc.services.authprovider.auth_managers.nextcloud]
|
||||
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
shared_secret = "<%= @shared_secret %>"
|
||||
mock_http = false
|
||||
[grpc.services.ocmproviderauthorizer.drivers.json]
|
||||
# this is used by the docker-based test deployment, not in production
|
||||
providers = "providers.testnet.json"
|
||||
verify_request_hostname = true
|
||||
|
||||
[grpc.services.userprovider]
|
||||
driver = "nextcloud"
|
||||
|
||||
[grpc.services.userprovider.drivers.nextcloud]
|
||||
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
shared_secret = "<%= @shared_secret %>"
|
||||
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
|
||||
shared_secret = "{{ vars.efss_shared_secret }}"
|
||||
mock_http = false
|
||||
|
||||
[http]
|
||||
enabled_services = ["ocmd"]
|
||||
enabled_middlewares = ["providerauthorizer", "cors"]
|
||||
address = "0.0.0.0:443"
|
||||
certfile = "/etc/revad/tls/drive.test.sunet.se.crt"
|
||||
keyfile = "/etc/revad/tls/drive.test.sunet.se.key"
|
||||
[grpc.services.datatx]
|
||||
txdriver = "rclone"
|
||||
storagedriver = "json"
|
||||
remove_transfer_on_cancel = true
|
||||
|
||||
[http.services.dataprovider]
|
||||
[grpc.services.datatx.txdrivers.rclone]
|
||||
# rclone endpoint
|
||||
endpoint = "http://rclone.docker"
|
||||
# basic auth is used
|
||||
auth_user = "rcloneuser"
|
||||
auth_pass = "eilohtho9oTahsuongeeTh7reedahPo1Ohwi3aek"
|
||||
auth_header = "x-access-token"
|
||||
job_status_check_interval = 2000
|
||||
job_timeout = 120000
|
||||
storagedriver = "json"
|
||||
remove_transfer_job_on_cancel = true
|
||||
|
||||
[grpc.services.datatx.storagedrivers.json]
|
||||
file = ""
|
||||
|
||||
[grpc.services.datatx.txdrivers.rclone.storagedrivers.json]
|
||||
file = ""
|
||||
|
||||
|
||||
### HTTP ENDPOINTS ###
|
||||
|
||||
[http.services.appprovider]
|
||||
address = ":443"
|
||||
insecure = true
|
||||
|
||||
[http.services.datagateway]
|
||||
address = ":443"
|
||||
|
||||
[[http.services.dataprovider]]
|
||||
driver = "nextcloud"
|
||||
|
||||
[http.services.prometheus]
|
||||
[http.services.sysinfo]
|
||||
|
||||
[http.services.dataprovider.drivers.nextcloud]
|
||||
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
shared_secret = "<%= @shared_secret %>"
|
||||
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
|
||||
shared_secret = "{{ vars.efss_shared_secret }}"
|
||||
mock_http = false
|
||||
|
||||
[[http.services.dataprovider]]
|
||||
address = ":443"
|
||||
driver = "ocmoutcoming"
|
||||
|
||||
[http.services.dataprovider.drivers.ocmoutcoming]
|
||||
machine_secret = "{{ vars.machine_api_key }}"
|
||||
|
||||
[http.services.sciencemesh]
|
||||
address = ":443"
|
||||
provider_domain = "{{ vars.provider_domain }}"
|
||||
mesh_directory_url = "https://sciencemesh.cesnet.cz/iop/meshdir"
|
||||
ocm_mount_point = "/sciencemesh"
|
||||
|
||||
[http.services.sciencemesh.smtp_credentials]
|
||||
disable_auth = false
|
||||
sender_mail = "noreply@<%= @domain %>"
|
||||
sender_login = "noreply@<%= @domain %>"
|
||||
sender_password = "<%= @smtp_credentials %>"
|
||||
smtp_server = "smtp.sunet.se"
|
||||
smtp_port = 587
|
||||
|
||||
[http.services.ocmprovider]
|
||||
address = ":443"
|
||||
ocm_prefix = "ocm"
|
||||
provider = "Reva for ownCloud/Nextcloud"
|
||||
endpoint = "{{ vars.external_reva_endpoint }}"
|
||||
enable_webapp = true
|
||||
enable_datatx = true
|
||||
|
||||
[http.services.ocmd]
|
||||
address = ":443"
|
||||
prefix = "ocm"
|
||||
|
||||
[http.services.ocmd.config]
|
||||
host = "<%= @reva_domain %>"
|
||||
provider = "test-revanc1"
|
||||
|
||||
[http.middlewares.providerauthorizer]
|
||||
#driver = "mentix"
|
||||
driver = "open"
|
||||
|
||||
[http.middlewares.providerauthorizer.drivers.mentix]
|
||||
url = "https://iop.sciencemesh.uni-muenster.de/iop/mentix/cs3"
|
||||
verify_request_hostname = false
|
||||
insecure = false
|
||||
timeout = 10
|
||||
refresh = 900
|
||||
host = "{{ vars.provider_domain }}"
|
||||
|
||||
[http.services.ocs]
|
||||
address = ":443"
|
||||
prefix = "ocs"
|
||||
|
||||
[http.services.ocdav]
|
||||
prefix = "ocdav"
|
||||
address = ":443"
|
||||
|
||||
[http.services.prometheus]
|
||||
address = ":443"
|
||||
|
||||
[http.services.metrics]
|
||||
address = ":443"
|
||||
metrics_data_driver_type = "json"
|
||||
metrics_data_location = "/etc/revad/metrics.json"
|
||||
metrics_record_interval = 5000
|
||||
|
||||
[http.services.sysinfo]
|
||||
|
||||
[http.middlewares.cors]
|
||||
[http.middlewares.log]
|
||||
|
|
75
templates/script/announce.erb.sh
Executable file
75
templates/script/announce.erb.sh
Executable file
|
@ -0,0 +1,75 @@
|
|||
#!/bin/bash
|
||||
|
||||
VALID_ARGS=$(getopt -o cdghi:m:s: --long create,delete,get,help,id:,message:,subject: -- "$@")
|
||||
# shellcheck disable=SC2181
|
||||
if [[ ${?} -ne 0 ]]; then
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
usage () {
|
||||
echo "${0}: -c|--create -m|--message <'Your announcement goes here'> -s|--subject <Your subject goes here>"
|
||||
echo "${0}: -d|--delete -i|--id <announcement_id>"
|
||||
echo "${0}: -g|--get"
|
||||
exit 1
|
||||
}
|
||||
|
||||
eval set -- "${VALID_ARGS}"
|
||||
# shellcheck disable=SC2078
|
||||
while [ : ]; do
|
||||
case "$1" in
|
||||
-c | --create)
|
||||
method='POST'
|
||||
shift
|
||||
;;
|
||||
-d | --delete)
|
||||
method='DELETE'
|
||||
shift
|
||||
;;
|
||||
-g | --get)
|
||||
method='GET'
|
||||
shift
|
||||
;;
|
||||
-h | --help)
|
||||
usage
|
||||
;;
|
||||
-i | --id)
|
||||
argument="${2}"
|
||||
shift 2
|
||||
;;
|
||||
-m | --message)
|
||||
message="${2}"
|
||||
shift 2
|
||||
;;
|
||||
-s | --subject)
|
||||
subject="${2}"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ${method} == 'DELETE' ]] && [[ -z ${argument} ]]; then
|
||||
usage
|
||||
fi
|
||||
if [[ ${method} == 'POST' ]]; then
|
||||
if [[ -z ${message} ]] || [[ -z ${subject} ]]; then
|
||||
usage
|
||||
fi
|
||||
argument='{"subject":"'${subject}'","message":"'${message}'", "plainMessage":"'${message}'", "groups": [], "userId": "admin", "activities": false, "notifications": true, "emails": false, "comments": false }'
|
||||
fi
|
||||
|
||||
curl_cmd(){
|
||||
local method="${1}"
|
||||
if [[ ${method} == 'POST' ]] && [[ -n ${2} ]]; then
|
||||
local payload=(-d "${2}" -H "Content-Type: application/json")
|
||||
elif [[ ${method} == 'DELETE' ]] && [[ -n ${2} ]]; then
|
||||
local id="/${2}"
|
||||
fi
|
||||
local admin_app_password="<%= @admin_app_password %>"
|
||||
domain="$(hostname -d)"
|
||||
curl -s -X "${method}" -u "admin:${admin_app_password}" "${payload[@]}" -H 'OCS-APIRequest: true' "https://${domain}/ocs/v2.php/apps/announcementcenter/api/v1/announcements${id}"
|
||||
}
|
||||
curl_cmd "${method}" "${argument}"
|
||||
|
|
@ -6,63 +6,78 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
|
|||
number_of_full_to_keep='<%= @full_backup_retention %>'
|
||||
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
|
||||
customer="<%= @customer %>"
|
||||
environment="<%= @environment %>"
|
||||
declare -A extra_backup_jobs
|
||||
|
||||
#<% if @extra_backup_jobs.any? %>
|
||||
#<% @extra_backup_jobs.each do |client, job| %>
|
||||
extra_backup_jobs["<%= client %>"]="<%= job %>"
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
#<% if @location.start_with?('common') %>
|
||||
declare -a sixmonths=('mau')
|
||||
if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
|
||||
number_of_full_to_keep=6
|
||||
number_of_full_to_keep=6
|
||||
fi
|
||||
declare -a projects
|
||||
#<% @singlenodes.each do |singlenode| %>
|
||||
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %>")
|
||||
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %> <%= singlenode %>")
|
||||
#<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %>
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= singlenode %>")
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
#<% else %>
|
||||
declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
|
||||
declare -a projects=("<%= @primary_project %> <%= @mirror_project %> ${customer}")
|
||||
#<% @assigned_projects.each do |project| %>
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> ${customer}")
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
|
||||
if [[ ${customer} == 'common' ]]; then
|
||||
projects+=("<%= @location %> <%= @location %>-mirror")
|
||||
projects+=("<%= @location %> <%= @location %>-mirror ${customer}")
|
||||
fi
|
||||
|
||||
|
||||
function do_backup {
|
||||
local project="${1}"
|
||||
local mirror="${2}"
|
||||
local bucket="${3}"
|
||||
local mirrorbucket="${bucket}-mirror"
|
||||
local mountpoint="/opt/backupmounts/${bucket}"
|
||||
mkdir -p ${mountpoint}
|
||||
rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
|
||||
rclone mkdir ${mirror}:${mirrorbucket}
|
||||
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \
|
||||
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
|
||||
umount ${mountpoint}
|
||||
rmdir ${mountpoint}
|
||||
# Clean up
|
||||
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
|
||||
--force rclone://${mirror}:/${mirrorbucket}
|
||||
local project="${1}"
|
||||
local mirror="${2}"
|
||||
local bucket="${3}"
|
||||
local customer="${4}"
|
||||
local mirrorbucket="${bucket}-mirror"
|
||||
local mountpoint="/opt/backupmounts/${bucket}"
|
||||
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
|
||||
local oktorun=$? # 1 == this bucket has no other bakup process in progress
|
||||
if [[ ${oktorun} -ne 0 ]]; then
|
||||
mkdir -p ${mountpoint}
|
||||
rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
|
||||
rclone mkdir ${mirror}:${mirrorbucket}
|
||||
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
|
||||
if [[ "${extra_backup_jobs[${customer}]:+found}" == "found" ]] && [[ -f "${extra_backup_jobs[${customer}]}" ]]; then
|
||||
${extra_backup_jobs[${customer}]} ${project} ${mirror} ${bucket} ${customer} ${environment}
|
||||
fi
|
||||
umount ${mountpoint}
|
||||
rmdir ${mountpoint}
|
||||
# Clean up
|
||||
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${mirrorbucket}
|
||||
fi
|
||||
}
|
||||
|
||||
for entry in "${projects[@]}"; do
|
||||
project=$(echo ${entry} | awk '{print $1}')
|
||||
mirror=$(echo ${entry} | awk '{print $2}')
|
||||
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
|
||||
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
|
||||
project=$(echo ${entry} | awk '{print $1}')
|
||||
mirror=$(echo ${entry} | awk '{print $2}')
|
||||
customer=$(echo ${entry} | awk '{print $3}')
|
||||
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
|
||||
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
|
||||
if [[ ${?} -eq 124 ]]; then
|
||||
size=$((${fork_limit} * 1000000001))
|
||||
else
|
||||
size=$(echo ${maybesize} | jq -r '.bytes' )
|
||||
fi
|
||||
# If bucket is above 50 GB we fork
|
||||
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
|
||||
do_backup ${project} ${mirror} ${bucket} &
|
||||
else
|
||||
do_backup ${project} ${mirror} ${bucket}
|
||||
fi
|
||||
done
|
||||
# If bucket is above 50 GB we fork
|
||||
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer} &
|
||||
else
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer}
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
|
|
@ -1,86 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Backup all buckets
|
||||
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
|
||||
# a specific host, but will differ between hosts
|
||||
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
|
||||
number_of_full_to_keep="<%= @full_backup_retention %>"
|
||||
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
|
||||
split_limit=1000 #in GB, if bigger than this number, we fork backup of each directory to it's own process
|
||||
|
||||
declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
|
||||
#<% @assigned_projects.each do |project| %>
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
|
||||
#<% end %>
|
||||
|
||||
function do_huge_backup {
|
||||
local project="${1}"
|
||||
local mirror="${2}"
|
||||
local bucket="${3}"
|
||||
declare -a directories
|
||||
declare -a empty
|
||||
for dir in $(rclone lsd ${project}:${bucket} | awk '{print $NF}'); do
|
||||
directories+=("${dir}")
|
||||
mountpoint="/opt/backupmounts/${bucket}-${dir}"
|
||||
do_backup ${project} ${mirror} ${bucket} ${mountpoint} ${dir} ${empty} &
|
||||
done
|
||||
mountpoint="/opt/backupmounts/${bucket}"
|
||||
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${directories[@]} &
|
||||
|
||||
}
|
||||
|
||||
function do_backup {
|
||||
local project="${1}"
|
||||
shift
|
||||
local mirror="${1}"
|
||||
shift
|
||||
local bucket="${1}"
|
||||
shift
|
||||
local mountpoint="${1}"
|
||||
shift
|
||||
local dire="${1}"
|
||||
shift
|
||||
declare -a exclude
|
||||
exclude=( "${@}" )
|
||||
suffix=""
|
||||
opts=""
|
||||
if [[ "${dire}" != "none" ]]; then
|
||||
suffix="/${dire}"
|
||||
fi
|
||||
if ((${#exclude[@]})); then
|
||||
for dir in "${exclude[@]}"; do
|
||||
opts="${opts} --exclude /${dir}"
|
||||
done
|
||||
fi
|
||||
local mirrorbucket="${bucket}-mirror"
|
||||
mkdir -p ${mountpoint}
|
||||
rclone mount ${project}:${bucket}${suffix} ${mountpoint}/ --daemon --allow-other
|
||||
rclone mkdir ${mirror}:${mirrorbucket}${suffix}
|
||||
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt ${opts} \
|
||||
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}${suffix}
|
||||
umount ${mountpoint}
|
||||
rmdir ${mountpoint}
|
||||
# Clean up
|
||||
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
|
||||
--force rclone://${mirror}:/${mirrorbucket}${suffix}
|
||||
}
|
||||
|
||||
for entry in "${projects[@]}"; do
|
||||
project=$(echo ${entry} | awk '{print $1}')
|
||||
mirror=$(echo ${entry} | awk '{print $2}')
|
||||
declare -a empty
|
||||
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
|
||||
size=$(rclone size --json ${project}:${bucket} | jq -r '.bytes')
|
||||
mirrorbucket="${bucket}-mirror"
|
||||
mountpoint="/opt/backupmounts/${bucket}"
|
||||
# If bucket is above ${split_limit} we fork and do backup per directory
|
||||
if [[ ${size} -gt $((${split_limit} * 1000000000)) ]]; then
|
||||
do_huge_backup ${project} ${mirror} ${bucket} &
|
||||
# If bucket is above ${fork_limit} we fork and do backup for bucket
|
||||
elif [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
|
||||
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${empty} &
|
||||
else
|
||||
# If bucket is below ${fork_limit} we do not fork and do backup for bucket
|
||||
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${empty}
|
||||
fi
|
||||
done
|
||||
done
|
8
templates/script/backup-hb.erb.sh
Executable file
8
templates/script/backup-hb.erb.sh
Executable file
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
project="${1}"
|
||||
mirror="${2}"
|
||||
bucket="${3}"
|
||||
customer="${4}"
|
||||
environment="${5}"
|
||||
|
||||
rsync -e "ssh -i ${HOME}/.ssh/id_script" -avz "/opt/backupmounts/${bucket}" "sd-${environment}@sd-${environment}-backup.hb.se:~/sd-${environment}/${bucket}"
|
|
@ -6,11 +6,14 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
|
|||
number_of_full_to_keep="<%= @full_backup_retention %>"
|
||||
|
||||
backup="${1}"
|
||||
if [[ -z ${backup} ]]; then
|
||||
backup="backup1.$(hostname -d)"
|
||||
fi
|
||||
if ! [[ ${backup} =~ backup1.*sunet.se$ ]]; then
|
||||
echo "Usage: ${0} <fqdn of backup server>"
|
||||
echo "Example: ${0} backup1.sunet.drive.sunet.se"
|
||||
fi
|
||||
backup_dir="/opt/backups"
|
||||
backup_dir="/opt/backups/backup-files"
|
||||
bucket="db-backups"
|
||||
mirror="<%= @customer %>-<%= @environment %>-mirror"
|
||||
if [[ ${mirror} =~ common-(test|prod)-mirror ]]; then
|
||||
|
@ -18,16 +21,15 @@ if [[ ${mirror} =~ common-(test|prod)-mirror ]]; then
|
|||
bucket="${bucket}-${suffix}"
|
||||
backup_dir="${backup_dir}-${suffix}"
|
||||
fi
|
||||
mkdir -p ${backup_dir}
|
||||
echo "Backing up database for ${backup}"
|
||||
ssh ${backup} "sudo /home/script/bin/backup_db.sh"
|
||||
echo "Cleaning up old backups for ${backup}"
|
||||
ssh ${backup} "sudo /home/script/bin/purge_backups.sh /opt/mariadb_backup/backups/"
|
||||
echo "Copying backups here"
|
||||
mkdir -p ${backup_dir}
|
||||
scp script@${backup}:/opt/mariadb_backup/backups/$(date +%Y/%m/%d)/*.gz ${backup_dir}
|
||||
echo "Copying backups to remote bucket"
|
||||
rclone mkdir ${mirror}:${bucket}
|
||||
duplicity --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption ${backup_dir} rclone://${mirror}:/${bucket}
|
||||
duplicity --allow-source-mismatch --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption ${backup_dir} rclone://${mirror}:/${bucket}
|
||||
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${bucket}
|
||||
echo "cleaning up"
|
||||
rm -r ${backup_dir}
|
||||
rm -rf "${backup_dir}"
|
||||
|
|
|
@ -6,7 +6,7 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
|
|||
number_of_full_to_keep=7
|
||||
backup="multinode-db1.$(hostname -d)"
|
||||
remote_backup_dir="/etc/mariadb/backups"
|
||||
backup_dir="/opt/backups"
|
||||
backup_dir="/opt/backups/multinode"
|
||||
bucket="db-backups-multinode"
|
||||
mirror="common-<%= @environment %>-mirror"
|
||||
echo "Backing up all databases for for multinode customer"
|
||||
|
@ -16,7 +16,7 @@ mkdir -p ${backup_dir}
|
|||
scp "script@${backup}:${remote_backup_dir}/mariadb-dump*.sql.gz" "${backup_dir}"
|
||||
echo "Copying backups to remote bucket"
|
||||
rclone mkdir "${mirror}:${bucket}"
|
||||
duplicity --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption "${backup_dir}" "rclone://${mirror}:/${bucket}"
|
||||
duplicity --allow-source-mismatch --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption "${backup_dir}" "rclone://${mirror}:/${bucket}"
|
||||
duplicity remove-all-but-n-full "${number_of_full_to_keep}" --tempdir /mnt --archive-dir /mnt --force "rclone://${mirror}:/${bucket}"
|
||||
echo "cleaning up"
|
||||
rm -r "${backup_dir}"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
project="${1}"
|
||||
bucket="${2}"
|
||||
declare -a sixmonths=('mau')
|
||||
declare -a sixmonths=('multinode')
|
||||
output_status="OK"
|
||||
exit_status=0
|
||||
problems=""
|
||||
|
|
|
@ -7,16 +7,16 @@ include_userbuckets="<%= @include_userbuckets %>"
|
|||
container="nextcloud_app_1"
|
||||
yq="/usr/local/bin/yq"
|
||||
if ! [[ -x ${yq} ]]; then
|
||||
pip install yq
|
||||
pip install yq
|
||||
fi
|
||||
|
||||
declare -a directories
|
||||
if [[ -n ${1} ]]; then
|
||||
directories=("${@}")
|
||||
directories=("${@}")
|
||||
else
|
||||
directories+=("Arbetsmaterial")
|
||||
directories+=("Bevarande")
|
||||
directories+=("Gallringsbart")
|
||||
directories+=("Arbetsmaterial")
|
||||
directories+=("Bevarande")
|
||||
directories+=("Gallringsbart")
|
||||
fi
|
||||
|
||||
olddir="${PWD}"
|
||||
|
@ -25,31 +25,31 @@ dirty=0
|
|||
primary=''
|
||||
declare -a users=( 'admin' )
|
||||
cd "${tempdir}" || echo "Could not cd to tempdir"
|
||||
declare -a projects=( "${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml )
|
||||
declare -a projects=( $("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml) )
|
||||
if [[ "${include_userbuckets}" == "true" ]]; then
|
||||
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
|
||||
projects+=( "${primary}" )
|
||||
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
|
||||
projects+=( "${primary}" )
|
||||
fi
|
||||
for project in "${projects[@]}"; do
|
||||
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do
|
||||
count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
|
||||
if [[ ${count} -gt 0 ]]; then
|
||||
echo "Skipping ${project}:${bucket} because it has stuff in it already"
|
||||
continue
|
||||
fi
|
||||
for directory in "${directories[@]}"; do
|
||||
dirty=1
|
||||
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do
|
||||
count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
|
||||
if [[ ${count} -gt 0 ]]; then
|
||||
echo "Skipping ${project}:${bucket} because it has stuff in it already"
|
||||
continue
|
||||
fi
|
||||
for directory in "${directories[@]}"; do
|
||||
dirty=1
|
||||
if [[ -n ${primary} ]] && [[ ${project} == "${primary}" ]] ; then
|
||||
user=$(echo "${bucket}" | awk -F '-' '{print $1}')
|
||||
users+=( "${user}@${eppn_suffix}" )
|
||||
fi
|
||||
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
|
||||
temp="README.md"
|
||||
echo "**${directory}**" >"${temp}"
|
||||
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
|
||||
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
|
||||
done
|
||||
done
|
||||
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
|
||||
temp="README.md"
|
||||
echo "**${directory}**" >"${temp}"
|
||||
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
|
||||
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
|
||||
done
|
||||
done
|
||||
done
|
||||
cd "${olddir}" || echo "could not cd to home dir"
|
||||
rmdir "${tempdir}"
|
||||
|
|
35
templates/script/delete_announcement_with_subject.erb.sh
Executable file
35
templates/script/delete_announcement_with_subject.erb.sh
Executable file
|
@ -0,0 +1,35 @@
|
|||
#!/bin/bash
|
||||
|
||||
VALID_ARGS=$(getopt -o s: --long subject: -- "$@")
|
||||
# shellcheck disable=SC2181
|
||||
if [[ ${?} -ne 0 ]]; then
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
usage () {
|
||||
echo "${0}: -s|--subject <subject>"
|
||||
exit 1
|
||||
}
|
||||
|
||||
eval set -- "${VALID_ARGS}"
|
||||
# shellcheck disable=SC2078
|
||||
while [ : ]; do
|
||||
case "$1" in
|
||||
-s | --subject)
|
||||
subject="${2}"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z ${subject} ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
id=$(/root/tasks/announce.sh --get | xmlstarlet sel -t -i '//subject="'"${subject}"'"' -m "/ocs/data/element/id" -v .)
|
||||
if [[ -n ${id} ]]; then
|
||||
/root/tasks/announce.sh --delete --id "${id}"
|
||||
fi
|
|
@ -11,7 +11,7 @@ base_dir="${project}:${bucket}"
|
|||
stat_dir="/opt/statistics"
|
||||
customer_dir="${stat_dir}/${location}"
|
||||
mkdir -p "${customer_dir}"
|
||||
rclone mkdir "${base_dir}/${location}"
|
||||
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}"
|
||||
echo "${userjson}" | jq . >"${customer_dir}/users.json"
|
||||
status=${?}
|
||||
if [[ ${status} -eq 0 ]]; then
|
||||
|
|
81
templates/script/multinodeannounce.erb.sh
Executable file
81
templates/script/multinodeannounce.erb.sh
Executable file
|
@ -0,0 +1,81 @@
|
|||
#!/bin/bash
|
||||
|
||||
VALID_ARGS=$(getopt -o cdghi:m:s: --long create,delete,get,help,id:,message:,subject: -- "$@")
|
||||
# shellcheck disable=SC2181
|
||||
if [[ ${?} -ne 0 ]]; then
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
usage () {
|
||||
echo "${0}: -c|--create -m|--message <'Your announcement goes here'> -s|--subject <Your subject goes here>"
|
||||
echo "${0}: -d|--delete -i|--id <announcement_id>"
|
||||
echo "${0}: -g|--get"
|
||||
exit 1
|
||||
}
|
||||
|
||||
eval set -- "${VALID_ARGS}"
|
||||
# shellcheck disable=SC2078
|
||||
while [ : ]; do
|
||||
case "$1" in
|
||||
-c | --create)
|
||||
method='POST'
|
||||
shift
|
||||
;;
|
||||
-d | --delete)
|
||||
method='DELETE'
|
||||
shift
|
||||
;;
|
||||
-g | --get)
|
||||
method='GET'
|
||||
shift
|
||||
;;
|
||||
-h | --help)
|
||||
usage
|
||||
;;
|
||||
-i | --id)
|
||||
argument="${2}"
|
||||
shift 2
|
||||
;;
|
||||
-m | --message)
|
||||
message="${2}"
|
||||
shift 2
|
||||
;;
|
||||
-s | --subject)
|
||||
subject="${2}"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ${method} == 'DELETE' ]] && [[ -z ${argument} ]]; then
|
||||
usage
|
||||
fi
|
||||
if [[ ${method} == 'POST' ]]; then
|
||||
if [[ -z ${message} ]] || [[ -z ${subject} ]]; then
|
||||
usage
|
||||
fi
|
||||
argument='{"subject":"'${subject}'","message":"'${message}'", "plainMessage":"'${message}'", "groups": [], "userId": "admin", "activities": false, "notifications": true, "emails": false, "comments": false }'
|
||||
fi
|
||||
|
||||
curl_cmd(){
|
||||
local admin_app_password="${1}"
|
||||
local customer="${2}"
|
||||
local method="${3}"
|
||||
if [[ ${method} == 'POST' ]] && [[ -n ${4} ]]; then
|
||||
local payload=(-d "${4}" -H "Content-Type: application/json")
|
||||
elif [[ ${method} == 'DELETE' ]] && [[ -n ${4} ]]; then
|
||||
local id="/${4}"
|
||||
fi
|
||||
domain="$(hostname -d)"
|
||||
curl -X "${method}" -u "admin:${admin_app_password}" "${payload[@]}" -H 'OCS-APIRequest: true' "https://${customer}.${domain}/ocs/v2.php/apps/announcementcenter/api/v1/announcements${id}"
|
||||
}
|
||||
|
||||
#<%- index = 0 %>
|
||||
#<%- @multinodes.each do |customer| %>
|
||||
curl_cmd "<%= @multinode_passwords[index] %>" "<%= customer %>" "${method}" "${argument}"
|
||||
#<%- index += 1 %>
|
||||
#<%- end %>
|
||||
|
|
@ -14,7 +14,7 @@ endpoint = <%= @s3_host_mirror %>
|
|||
acl = private
|
||||
[statistics]
|
||||
type = webdav
|
||||
url = https://89.46.20.42/remote.php/dav/files/_script/
|
||||
url = https://89.45.236.246/remote.php/dav/files/_script/
|
||||
vendor = nextcloud
|
||||
user = _script
|
||||
pass = <%= @statistics_secret %>
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
#!/usr/bin/env python3.9
|
||||
#!/usr/bin/env python3
|
||||
# vim: set filetype=python:
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import urllib.parse
|
||||
|
||||
import requests
|
||||
|
||||
from drive_utils import (build_fqdn, get_ips_for_hostname, run_remote_command,
|
||||
smoketest_db_node)
|
||||
|
||||
|
@ -30,7 +30,10 @@ def add_downtime(fqdn: str,
|
|||
post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, action)
|
||||
headers = {'X-Thruk-Auth-Key': apikey}
|
||||
requests.post(post_url, data=data, headers=headers)
|
||||
try:
|
||||
requests.post(post_url, data=data, headers=headers)
|
||||
except Exception:
|
||||
print("Failed to add downtime for {}".format(fqdn))
|
||||
|
||||
|
||||
def remove_downtime(fqdn: str,
|
||||
|
@ -42,55 +45,82 @@ def remove_downtime(fqdn: str,
|
|||
get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format(
|
||||
monitor_host, fqdn)
|
||||
headers = {'X-Thruk-Auth-Key': apikey}
|
||||
req = requests.get(get_url, headers=headers)
|
||||
action = 'del_active_service_downtimes'
|
||||
for service in req.json()[0]['services']:
|
||||
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, urllib.parse.quote(service), action)
|
||||
requests.post(post_url, headers=headers)
|
||||
try:
|
||||
req = requests.get(get_url, headers=headers)
|
||||
action = 'del_active_service_downtimes'
|
||||
for service in req.json()[0]['services']:
|
||||
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, urllib.parse.quote(service), action)
|
||||
requests.post(post_url, headers=headers)
|
||||
except Exception:
|
||||
print("Failed to remove downtime for {}".format(fqdn))
|
||||
|
||||
|
||||
def main() -> int:
|
||||
customer = "<%= @customer %>"
|
||||
customers = ["<%= @customer %>"]
|
||||
environment = "<%= @environment %>"
|
||||
apikey_test = "<%= @apikey_test %>"
|
||||
apikey_prod = "<%= @apikey_prod %>"
|
||||
user = "script"
|
||||
|
||||
backup_command = ['sudo /home/script/bin/backup_db.sh']
|
||||
reboot_command = ['sudo /usr/local/bin/safer_reboot']
|
||||
|
||||
for number in reversed(range(1, 4)):
|
||||
fqdn = build_fqdn(customer, environment, number)
|
||||
ipv4, _ = get_ips_for_hostname(fqdn)
|
||||
ip = ipv4[0]
|
||||
print("Upgrading: {} with ip: {}".format(fqdn, ip))
|
||||
add_downtime(fqdn, apikey_test)
|
||||
add_downtime(fqdn, apikey_prod, monitor_host="monitor.drive.sunet.se")
|
||||
if customers[0] == "common":
|
||||
customers = ["gss", "lookup", "multinode"]
|
||||
for customer in customers:
|
||||
backup_type = "backup"
|
||||
if customer == "gss":
|
||||
backup_type = "gssbackup"
|
||||
elif customer == "lookup":
|
||||
backup_type = "lookupbackup"
|
||||
elif customer == "multinode":
|
||||
backup_command = ['sudo /home/script/bin/backup_multinode_db.sh']
|
||||
backup_type = "multinode-db"
|
||||
|
||||
run_remote_command(fqdn, reboot_command, user = user)
|
||||
success = False
|
||||
backup = build_fqdn(customer, environment, 1, backup_type)
|
||||
print("\tRunning backup command at {}".format(backup))
|
||||
run_remote_command(backup,
|
||||
backup_command,
|
||||
user="script",
|
||||
output=subprocess.DEVNULL)
|
||||
for number in reversed(range(1, 4)):
|
||||
fqdn = build_fqdn(customer, environment, number)
|
||||
ipv4, _ = get_ips_for_hostname(fqdn)
|
||||
ip = ipv4[0]
|
||||
print("Upgrading: {} with ip: {}".format(fqdn, ip))
|
||||
add_downtime(fqdn, apikey_test)
|
||||
add_downtime(fqdn,
|
||||
apikey_prod,
|
||||
monitor_host="monitor.drive.sunet.se")
|
||||
|
||||
for testnumber in reversed(range(1, 32, 2)):
|
||||
print("\tSleeping for {} seconds before smoketest on {}".format(
|
||||
testnumber, fqdn))
|
||||
time.sleep(testnumber)
|
||||
run_remote_command(fqdn, reboot_command, user=user)
|
||||
success = False
|
||||
|
||||
if smoketest_db_node(fqdn, user = user):
|
||||
success = True
|
||||
for testnumber in reversed(range(1, 32, 2)):
|
||||
print(
|
||||
"\tSleeping for {} seconds before smoketest on {}".format(
|
||||
testnumber, fqdn))
|
||||
time.sleep(testnumber)
|
||||
|
||||
break
|
||||
if smoketest_db_node(fqdn, user=user):
|
||||
success = True
|
||||
|
||||
remove_downtime(fqdn, apikey_test)
|
||||
remove_downtime(fqdn, apikey_prod, monitor_host="monitor.drive.sunet.se")
|
||||
if success:
|
||||
print("Upgrade cycle succeeded on {} ".format(fqdn))
|
||||
else:
|
||||
print("Smoketest failed on {} after server reboot command".format(
|
||||
fqdn))
|
||||
break
|
||||
|
||||
return 5
|
||||
print("All {}-servers successfully upgraded for {}".format(
|
||||
environment, customer))
|
||||
remove_downtime(fqdn, apikey_test)
|
||||
remove_downtime(fqdn,
|
||||
apikey_prod,
|
||||
monitor_host="monitor.drive.sunet.se")
|
||||
if success:
|
||||
print("Upgrade cycle succeeded on {} ".format(fqdn))
|
||||
else:
|
||||
print("Smoketest failed on {} after server reboot command".
|
||||
format(fqdn))
|
||||
|
||||
return 5
|
||||
print("All {}-servers successfully upgraded for {}".format(
|
||||
environment, customer))
|
||||
|
||||
return 0
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/env python3.9
|
||||
#!/usr/bin/env python3
|
||||
# vim: set filetype=python:
|
||||
|
||||
import subprocess
|
||||
|
@ -30,7 +30,10 @@ def add_downtime(fqdn: str,
|
|||
post_url = 'https://{}/thruk/r/hosts/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, action)
|
||||
headers = {'X-Thruk-Auth-Key': apikey}
|
||||
requests.post(post_url, data=data, headers=headers)
|
||||
try:
|
||||
requests.post(post_url, data=data, headers=headers)
|
||||
except Exception:
|
||||
print("Failed to add downtime for: {}".format(fqdn))
|
||||
|
||||
|
||||
def remove_downtime(fqdn: str,
|
||||
|
@ -42,15 +45,18 @@ def remove_downtime(fqdn: str,
|
|||
get_url = 'https://{}/thruk/r/hosts?name={}&columns=services'.format(
|
||||
monitor_host, fqdn)
|
||||
headers = {'X-Thruk-Auth-Key': apikey}
|
||||
req = requests.get(get_url, headers=headers)
|
||||
action = 'del_active_service_downtimes'
|
||||
for service in req.json()[0]['services']:
|
||||
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, urllib.parse.quote(service), action)
|
||||
requests.post(post_url, headers=headers)
|
||||
try:
|
||||
req = requests.get(get_url, headers=headers)
|
||||
action = 'del_active_service_downtimes'
|
||||
for service in req.json()[0]['services']:
|
||||
post_url = 'https://{}/thruk/r/services/{}/{}/cmd/{}'.format(
|
||||
monitor_host, fqdn, urllib.parse.quote(service), action)
|
||||
requests.post(post_url, headers=headers)
|
||||
except Exception:
|
||||
print("Failed to remove downtime for: {}".format(fqdn))
|
||||
|
||||
|
||||
def run_command(command: list[str]) -> tuple:
|
||||
def run_command(command: list) -> tuple:
|
||||
with subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE) as proc:
|
||||
|
@ -68,6 +74,7 @@ def main() -> int:
|
|||
apikey_test = "<%= @apikey_test %>"
|
||||
apikey_prod = "<%= @apikey_prod %>"
|
||||
|
||||
backup_command = ['sudo /home/script/bin/backup_db.sh']
|
||||
cosmos_command = ['sudo run-cosmos']
|
||||
nc_upgrade_command = 'sudo /usr/local/bin/occ config:editable --on '
|
||||
nc_upgrade_command += '&& sudo /usr/local/bin/occ upgrade '
|
||||
|
@ -80,9 +87,18 @@ def main() -> int:
|
|||
repair_command += '&& sudo /usr/local/bin/occ db:add-missing-primary-keys'
|
||||
reboot_command = ['sudo /usr/local/bin/safer_reboot']
|
||||
server_type = "node"
|
||||
backup_type = "backup"
|
||||
if customer == "common":
|
||||
customer = "gss"
|
||||
server_type = "gss"
|
||||
backup_type = "gssbackup"
|
||||
|
||||
backup = build_fqdn(customer, environment, 1, backup_type)
|
||||
print("\tRunning backup command at {}".format(backup))
|
||||
run_remote_command(backup,
|
||||
backup_command,
|
||||
user="script",
|
||||
output=subprocess.DEVNULL)
|
||||
|
||||
for number in reversed(range(1, 4)):
|
||||
fqdn = build_fqdn(customer, environment, number, server_type)
|
||||
|
|
|
@ -11,5 +11,7 @@ else
|
|||
fi
|
||||
|
||||
docker exec ${container} /do_backup.sh ${customer}
|
||||
chown root:script /opt/mariadb_backup/
|
||||
chmod 750 /opt/mariadb_backup/
|
||||
chmod 755 ${backupdir}
|
||||
chown -R script:root ${backupdir}
|
||||
|
|
107
templates/scriptreceiver/bashrc.erb.sh
Normal file
107
templates/scriptreceiver/bashrc.erb.sh
Normal file
|
@ -0,0 +1,107 @@
|
|||
# THIS FILE IS MANAGED BY PUPPET
|
||||
# ~/.bashrc: executed by bash(1) for non-login shells.
|
||||
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
|
||||
# for examples
|
||||
|
||||
# If not running interactively, don't do anything
|
||||
[ -z "$PS1" ] && return
|
||||
|
||||
# don't put duplicate lines in the history. See bash(1) for more options
|
||||
# ... or force ignoredups and ignorespace
|
||||
HISTCONTROL=ignoredups:ignorespace
|
||||
|
||||
# append to the history file, don't overwrite it
|
||||
shopt -s histappend
|
||||
|
||||
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
|
||||
HISTSIZE=1000
|
||||
HISTFILESIZE=2000
|
||||
|
||||
# check the window size after each command and, if necessary,
|
||||
# update the values of LINES and COLUMNS.
|
||||
shopt -s checkwinsize
|
||||
|
||||
# make less more friendly for non-text input files, see lesspipe(1)
|
||||
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
|
||||
|
||||
# set variable identifying the chroot you work in (used in the prompt below)
|
||||
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
|
||||
debian_chroot=$(cat /etc/debian_chroot)
|
||||
fi
|
||||
|
||||
# set a fancy prompt (non-color, unless we know we "want" color)
|
||||
case "$TERM" in
|
||||
xterm-color) color_prompt=yes ;;
|
||||
esac
|
||||
|
||||
# uncomment for a colored prompt, if the terminal has the capability; turned
|
||||
# off by default to not distract the user: the focus in a terminal window
|
||||
# should be on the output of commands, not on the prompt
|
||||
#force_color_prompt=yes
|
||||
|
||||
if [ -n "$force_color_prompt" ]; then
|
||||
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
|
||||
# We have color support; assume it's compliant with Ecma-48
|
||||
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
|
||||
# a case would tend to support setf rather than setaf.)
|
||||
color_prompt=yes
|
||||
else
|
||||
color_prompt=
|
||||
fi
|
||||
fi
|
||||
if [ "$color_prompt" = yes ]; then
|
||||
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@$(hostname -f)\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
|
||||
else
|
||||
PS1='${debian_chroot:+($debian_chroot)}\u@$(hostname -f):\w\$ '
|
||||
fi
|
||||
unset color_prompt force_color_prompt
|
||||
|
||||
# If this is an xterm set the title to user@host:dir
|
||||
case "$TERM" in
|
||||
xterm* | rxvt*)
|
||||
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@$(hostname -f): \w\a\]$PS1"
|
||||
;;
|
||||
*) ;;
|
||||
|
||||
esac
|
||||
|
||||
# enable color support of ls and also add handy aliases
|
||||
if [ -x /usr/bin/dircolors ]; then
|
||||
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
|
||||
alias ls='ls --color=auto'
|
||||
#alias dir='dir --color=auto'
|
||||
#alias vdir='vdir --color=auto'
|
||||
|
||||
alias grep='grep --color=auto'
|
||||
alias fgrep='fgrep --color=auto'
|
||||
alias egrep='egrep --color=auto'
|
||||
fi
|
||||
|
||||
# some more ls aliases
|
||||
alias ll='ls -alF'
|
||||
alias la='ls -A'
|
||||
alias l='ls -CF'
|
||||
|
||||
if [[ $(hostname) =~ redis ]]; then
|
||||
if [ -f /opt/redis/node-0/server.conf ]; then
|
||||
alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
|
||||
else
|
||||
alias redis-cli="redis-cli -a $(grep requirepass /opt/redict/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Alias definitions.
|
||||
# You may want to put all your additions into a separate file like
|
||||
# ~/.bash_aliases, instead of adding them here directly.
|
||||
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
|
||||
|
||||
if [ -f ~/.bash_aliases ]; then
|
||||
. ~/.bash_aliases
|
||||
fi
|
||||
|
||||
# enable programmable completion features (you don't need to enable
|
||||
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
|
||||
# sources /etc/bash.bashrc).
|
||||
#if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
|
||||
# . /etc/bash_completion
|
||||
#fi
|
31
templates/scriptreceiver/check_max_uptime
Executable file
31
templates/scriptreceiver/check_max_uptime
Executable file
|
@ -0,0 +1,31 @@
|
|||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-w', '--warning',
|
||||
help='Warning threashold',
|
||||
required=True)
|
||||
parser.add_argument('-c', '--critical',
|
||||
help='Critical threashold',
|
||||
required=True)
|
||||
|
||||
args = parser.parse_args()
|
||||
warning = int(args.warning)
|
||||
critical = int(args.critical)
|
||||
|
||||
with open('/proc/uptime', 'r') as f:
|
||||
uptime_seconds = float(f.readline().split()[0])
|
||||
days = int(uptime_seconds / 86400)
|
||||
|
||||
status = "OK"
|
||||
exit = 0
|
||||
if days > warning:
|
||||
status = "WARNING"
|
||||
exit = 1
|
||||
if days > critical:
|
||||
status = "CRITICAL"
|
||||
exit = 2
|
||||
|
||||
print(f"{status}: uptime {days} days | uptime={days};{warning};{critical};")
|
||||
sys.exit(exit)
|
|
@ -38,7 +38,7 @@ preexisting="$(docker exec -u www-data -i "${container}" php --define apc.enable
|
|||
if [[ -z ${preexisting} ]]; then
|
||||
docker exec -u www-data -i "${container}" php --define apc.enable_cli=1 /var/www/html/occ files_external:create "${user_bucket_name}" \
|
||||
amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
|
||||
amazons3::accesskey --user ${user}
|
||||
-c useMultipartCopy=false amazons3::accesskey --user ${user}
|
||||
for shareid in $(docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:export ${user} | jq -r '.[].mount_id'); do
|
||||
docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:option ${shareid} enable_sharing true
|
||||
done
|
||||
|
|
|
@ -7,7 +7,7 @@ bucket=${4}
|
|||
user=${5}
|
||||
/usr/local/bin/occ files_external:create "${bucket}" \
|
||||
amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
|
||||
amazons3::accesskey --user "${user}"
|
||||
-c useMultipartCopy=false amazons3::accesskey --user "${user}"
|
||||
for shareid in $(/usr/local/bin/occ files_external:export "${user}" | jq -r '.[].mount_id'); do
|
||||
/usr/local/bin/occ files_external:option "${shareid}" enable_sharing true
|
||||
done
|
||||
|
|
12
templates/scriptreceiver/get_drive_customers.erb.sh
Executable file
12
templates/scriptreceiver/get_drive_customers.erb.sh
Executable file
|
@ -0,0 +1,12 @@
|
|||
#!/bin/bash
|
||||
|
||||
mode=${1}
|
||||
if [[ "${mode}" == "multi" ]]; then
|
||||
filter='.multinode_mapping| keys | join("\n")'
|
||||
elif [[ "${mode}" == "single" ]]; then
|
||||
filter='.singlenodes| join("\n")'
|
||||
else
|
||||
filter='.fullnodes | join("\n")'
|
||||
fi
|
||||
yq -r "${filter}" < /etc/hiera/data/common.yaml
|
||||
|
Loading…
Reference in a new issue