Compare commits

..

158 commits

Author SHA1 Message Date
1a41a66355
Allow calendar servers to talk to db 2025-04-04 12:31:19 +02:00
da7be10d58
Fix typos 2025-03-26 14:09:24 +01:00
44e398a418
Remove fault file 2025-03-26 12:58:12 +01:00
d903f4f90d
Merge branch 'stable' 2025-03-26 12:57:46 +01:00
ad13e65250
Remove fault file 2025-03-26 12:56:27 +01:00
b63229092c
Add sql for roundcube 2025-03-26 12:56:23 +01:00
4b3c189481
Add sql for roundcube
Signed-off-by: Micke Nordin <kano@sunet.se>
2025-03-26 12:56:19 +01:00
8d57b773a0
Add sql for roundcube 2025-03-26 12:53:28 +01:00
3c815b9a38
Add sql for roundcube
Signed-off-by: Micke Nordin <kano@sunet.se>
2025-03-26 12:52:14 +01:00
55cc71871f
Create dirs 2025-03-25 19:08:44 +01:00
46f1330b5c
Disable bruteforce 2025-03-25 19:07:38 +01:00
d774349950
Disable bruteforce 2025-03-25 19:07:34 +01:00
249fe90c54
Create dirs 2025-03-25 19:01:44 +01:00
87b6ea8ddb
Disable bruteforce 2025-03-24 15:06:39 +01:00
dc39cb5d85
Disable bruteforce 2025-03-24 15:00:35 +01:00
beff4242ac
Merge branch 'main' into stable 2025-03-14 14:37:25 +01:00
cc31a2f038
Remove sysctl file that did not help 2025-03-13 16:16:32 +01:00
6a63031f84
use (new) node3_hosts group for check that only runs on node3 2025-03-13 13:01:45 +01:00
1c3eecdd85
final version of the newline problem 2025-03-13 12:45:45 +01:00
16c4f3650c
and yet another newline 2025-03-13 12:25:21 +01:00
57b90cced4
Merge branch 'main' into testing 2025-03-13 12:21:38 +01:00
4e67d6f1b8
needs newlines 2025-03-13 12:21:18 +01:00
9bb2afa68b
Merge branch 'main' into testing 2025-03-13 12:16:08 +01:00
26ea8e8e8d
first attempt at creating extra_host_groups 2025-03-13 12:15:34 +01:00
5c42f9d7d2
Merge branch 'main' into testing 2025-03-12 11:56:34 +01:00
f87fac3c3b
Remove quotes 2025-03-12 11:56:22 +01:00
582d88d2f1
Merge branch 'main' into testing 2025-03-12 11:53:31 +01:00
4f84f71070
Typo 2025-03-12 11:53:20 +01:00
4caed83e80
Merge branch 'main' into testing 2025-03-12 11:49:47 +01:00
38174166a0
Typo 2025-03-12 11:49:00 +01:00
3b42817c57
Merge branch 'main' into testing 2025-03-12 11:44:53 +01:00
5950751c73
Merge branch 'main' of ssh://platform.sunet.se:22022/Drive/sunetdrive 2025-03-12 11:44:20 +01:00
d010d27f20
Let's gooooo 2025-03-12 11:44:08 +01:00
d5c3486425
don't print empty line 2025-03-11 08:13:35 +01:00
1aea2f5674
apparently "You do not need to declare variables before using them" in python - cleanup 2025-03-11 08:13:33 +01:00
0b3a2d43cb
buckets needs to be reset each loop 2025-03-11 08:13:30 +01:00
4b952bac40
trying to improve output of check_nextcloud_mounts script 2025-03-11 08:13:28 +01:00
e150fa3625
separating multinode from app_type 2025-03-11 08:13:26 +01:00
3579682de8
testing if $is_multinode is the problem - correcter regex 2025-03-11 08:13:23 +01:00
8f090bfcac
testing if $is_multinode is the problem 2025-03-11 08:13:20 +01:00
0e189ad0ef
change check_nextcloud_mounts.py output 2025-03-11 08:13:15 +01:00
e27cf34987
changed wording 2025-03-11 08:13:10 +01:00
17d8d8b2de
changed wording 2025-03-11 08:13:06 +01:00
3fbfe26c66
adding check for nextcloud mounts 2025-03-11 08:13:00 +01:00
4a6d35b984
Merge branch 'main' into stable 2025-03-10 14:50:17 +01:00
ef1eb5b4ad
Merge branch 'testing' of ssh://platform.sunet.se:22022/Drive/sunetdrive into testing 2025-03-10 14:49:51 +01:00
dc5b61c9e6
Merge branch 'main' into testing 2025-03-10 14:49:44 +01:00
33389e842a
Set useMultiPartCopy=true everywhere and switch to nocc 2025-03-10 14:49:29 +01:00
bb7ab09db8
don't print empty line 2025-03-07 08:35:33 +01:00
bc481d44d5
apparently "You do not need to declare variables before using them" in python - cleanup 2025-03-06 16:26:05 +01:00
5619cb8270
buckets needs to be reset each loop 2025-03-06 15:42:32 +01:00
fbe409efff
trying to improve output of check_nextcloud_mounts script 2025-03-06 15:05:00 +01:00
9096bf7c33
separating multinode from app_type 2025-03-05 16:23:42 +01:00
1c38cfbf3c
Merge branch 'main' into testing 2025-03-05 15:39:03 +01:00
d332a810b9
Add webmail 2025-03-05 15:38:44 +01:00
74da008b5e
testing if $is_multinode is the problem - correcter regex 2025-03-05 15:00:14 +01:00
a6eaddbd8f
testing if $is_multinode is the problem 2025-03-05 14:58:01 +01:00
0da62f097d
change check_nextcloud_mounts.py output 2025-03-05 14:15:45 +01:00
ed3d4b1b9c
changed wording 2025-03-05 13:28:33 +01:00
bb44e6da8f
changed wording 2025-03-05 13:21:56 +01:00
de3d18b0a1
adding check for nextcloud mounts 2025-03-05 12:49:32 +01:00
aa6be0ce34
StrictHostKeyChecking=no to avoid problems whith re-install 2025-03-03 15:39:57 +01:00
83fcc67c91
- bucketnames have to be lower case
- output from occ changed so --show-password is needed now
2025-03-03 13:22:40 +01:00
ec4e2bdb38
Merge branch 'main' into stable 2025-02-28 12:50:14 +01:00
a68fea86b8
Don' allocate tty 2025-02-28 12:48:42 +01:00
a1b7a03908
Merge branch 'main' into stable 2025-02-28 12:18:49 +01:00
7363284353
Missed one path 2025-02-28 12:17:03 +01:00
9f0eccdb3d
Merge branch 'main' into stable 2025-02-28 12:07:30 +01:00
be9f1a7152
Use full path 2025-02-28 12:06:24 +01:00
f6cd170e6f
Merge branch 'main' into stable 2025-02-27 12:53:52 +01:00
dcb0ca8e48
Remove scriptherder from count 2025-02-27 12:53:06 +01:00
e0f427fcc8
Merge branch 'main' into stable 2025-02-27 11:20:37 +01:00
7090f7ca5e
Run in prod 2025-02-27 11:15:12 +01:00
705479f29c
Merge branch 'main' into stable 2025-02-27 11:12:20 +01:00
3007fc6de4
shfmt 2025-02-27 11:09:29 +01:00
4a270b9f65
Fix check 2025-02-27 11:08:13 +01:00
b2942339d1
Fix output 2025-02-27 11:03:37 +01:00
e0e1d5148f
Merge branch 'main' into stable 2025-02-26 12:37:44 +01:00
919ff971b4
Also report container 2025-02-26 12:36:58 +01:00
8b216f6730
Merge branch 'main' into stable 2025-02-26 12:33:24 +01:00
434cd6d1e4
Remove gss related manifests and config 2025-02-26 12:32:50 +01:00
b3c42225e1
Remove lookup related manifests and config 2025-02-26 12:32:39 +01:00
06d7fabf4b
Run cronjob in scriptherder and report errors found 2025-02-26 12:30:30 +01:00
800b2c4afc
Lets only sleep if we are not allready running 2025-02-26 12:09:59 +01:00
1654186ddc
Add a deterministic sleep to spread out the load and lets run it at night in test 2025-02-26 12:08:03 +01:00
2c974bd178
Change guard condition 2025-02-26 11:50:34 +01:00
41e9045833
This is the way 2025-02-26 11:13:22 +01:00
7e53313d94
Can I now have it outside? 2025-02-26 11:09:17 +01:00
ebe6f9640d
I am blind 2025-02-26 11:05:31 +01:00
74f96705f1
negation 2025-02-26 11:03:28 +01:00
c7e337f3c4
multinodes are confused 2025-02-26 11:01:46 +01:00
2944823676
Only run on node3 and multinodes 2025-02-26 10:34:44 +01:00
fc0a790bd1
Add scan cronjob to test 2025-02-26 10:32:07 +01:00
a4a9e492f5
Move get_containers to all nodes 2025-02-26 10:05:50 +01:00
cbb291be47
Make sure we know where our backups are located 2025-02-25 20:21:40 +01:00
d760626e0b
Make sure we know where our backups are located 2025-02-25 20:18:52 +01:00
ec6cda87fb Remove gss related manifests and config 2025-02-25 11:55:43 +01:00
00c494193a Remove lookup related manifests and config 2025-02-25 11:49:09 +01:00
76e110ab90
Merge branch 'main' into stable 2025-02-20 17:00:35 +01:00
3315e17346
Add nocc command 2025-02-13 15:33:27 +01:00
f1c816cba1
Remove gss and lookup 2025-02-13 14:12:52 +01:00
331db3e24c
Merge branch 'testing' 2025-02-13 14:10:01 +01:00
a7d737c8c4 Merge pull request 'Enable pretty URLs (https://docs.nextcloud.com/server/latest/admin_manual/installation/source_installation.html\#pretty-urls)' (#3) from pretty_urls into testing
Reviewed-on: #3
2025-02-13 12:53:39 +00:00
2a273c649d
Enable pretty URLs (https://docs.nextcloud.com/server/latest/admin_manual/installation/source_installation.html\#pretty-urls) 2025-02-13 13:50:42 +01:00
96632b111b
Containers changed names 2025-02-10 10:26:31 +01:00
f2774b9c38
Containers changed names 2025-02-10 10:25:59 +01:00
0ded3c46aa
Containers changed names 2025-02-10 10:25:36 +01:00
74343d15ef
Allow long running check for scriptherder 2025-02-03 15:11:45 +01:00
602fb12a7f
Allow long running check for scriptherder 2025-02-03 15:10:57 +01:00
fd0ed43035
Allow long running check for scriptherder 2025-02-03 15:10:28 +01:00
0046349756
Merge branch 'main' into stable 2025-02-03 15:06:16 +01:00
1f30a23a5a
Merge branch 'main' into testing 2025-02-03 15:00:52 +01:00
def0a0a77b
Allow more inc backups 2025-02-03 15:00:28 +01:00
Lars Delhage
98158e1be5
Remove recursive for mysql db dir 2025-01-20 12:29:01 +01:00
78bc2b3f7e
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-20 12:28:59 +01:00
26ee034887
Set expiration for multinode 2025-01-20 12:28:56 +01:00
d891874202
Add full_backup_retention 2025-01-20 12:28:53 +01:00
c11e95f5fe
Only run this on monitor hosts, as it takes a long time 2025-01-20 12:28:49 +01:00
9596f9d210
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-20 12:28:47 +01:00
e57349f011
Set expiration for multinode 2025-01-20 12:28:44 +01:00
02578640a9
Add full_backup_retention 2025-01-20 12:28:42 +01:00
c9bdb766eb
Run as root user 2025-01-20 12:28:39 +01:00
9425d56716
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-20 12:28:37 +01:00
b2d3b80f37
Set expiration for multinode 2025-01-20 12:28:35 +01:00
d67cc24318
Add full_backup_retention 2025-01-20 12:28:32 +01:00
efd05971b1
SATOSA: Always have alwayshttps 2025-01-20 12:28:30 +01:00
8dd5534b0f
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-20 12:28:27 +01:00
d24fa23262
Set expiration for multinode 2025-01-20 12:28:24 +01:00
84fc07cffe
Add full_backup_retention 2025-01-20 12:28:21 +01:00
5b68f26771
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-20 12:28:19 +01:00
dc59842fe8
Set expiration for multinode 2025-01-20 12:28:16 +01:00
a91bb85313
Add full_backup_retention 2025-01-20 12:28:12 +01:00
5f0c24865f
added ability to set trashbin_retention_obligation and versions_retention_obligation in config.php 2025-01-20 12:28:09 +01:00
8ee75b0601
changed nft rules for proxysql - ipv6 2025-01-20 12:28:06 +01:00
62670483c1
changed nft rules for proxysql 2025-01-20 12:28:04 +01:00
ace56103fc
duplicate 2025-01-20 12:28:01 +01:00
4afec74a34
one more 2025-01-20 12:27:58 +01:00
d4d998cf81
Fix notify 2025-01-20 12:27:55 +01:00
9794b03d44
Can not redeclare class 2025-01-20 12:27:52 +01:00
45a44789a4
add back image 2025-01-20 12:27:50 +01:00
4201f18ce6
Fix path 2025-01-20 12:27:47 +01:00
8057a2fa22
convert config to use newer docker_compose class 2025-01-20 12:27:44 +01:00
Lars Delhage
2625269aba
Remove recursive for mysql db dir 2025-01-20 11:53:25 +01:00
38cd097f71
Only run this on monitor hosts, as it takes a long time 2025-01-20 09:00:14 +01:00
ab2d70303f
Run as root user 2025-01-20 08:45:55 +01:00
0154a533ce
SATOSA: Always have alwayshttps 2025-01-16 15:16:33 +01:00
63b780028f
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-16 13:14:37 +01:00
31f4c1eb9b
Set expiration for multinode 2025-01-16 12:47:16 +01:00
b6c0bb2f46
Merge branch 'testing' 2025-01-16 12:41:18 +01:00
9e6e33e137
Add full_backup_retention 2025-01-16 12:41:12 +01:00
977bd84f50
duplicate 2025-01-16 10:23:18 +01:00
c7de56f73f
one more 2025-01-16 10:23:17 +01:00
6cfa2b3da7 Merge branch 'main' into stable 2024-12-24 16:10:24 +01:00
ee6887bfeb Fix redis name 2024-12-24 16:00:34 +01:00
5a04dcdd0f Add back proxysql networ 2024-12-24 16:00:20 +01:00
8927f194d9 Proxysql no longer has external network 2024-12-24 15:59:03 +01:00
04e4592f4c Revert "Use new uptime check"
This reverts commit 60fc3ef307.
2024-12-24 15:59:03 +01:00
45989b8d3d Use new uptime check 2024-12-24 15:59:03 +01:00
34 changed files with 312 additions and 348 deletions

View file

@ -0,0 +1,6 @@
net.core.rmem_max=67108864
net.core.wmem_max=67108864
net.ipv4.tcp_rmem=4096 87380 33554432
net.ipv4.tcp_wmem=4096 87380 33554432
net.core.default_qdisc=fq
net.ipv4.tcp_congestion_control=bbr

View file

@ -2,13 +2,7 @@
function sunetdrive::get_customer() >> String {
$hostnameparts = split($facts['networking']['fqdn'],'\.')
if $hostnameparts[1] == 'drive' {
if $hostnameparts[0] =~ /^gss/ {
return 'gss'
} elsif $hostnameparts[0] =~ /^lookup/ {
return 'lookup'
} else {
return 'common'
}
return 'common'
} elsif $hostnameparts[0] =~ /idp-proxy/ {
return 'common'
}

View file

@ -66,15 +66,11 @@ define sunetdrive::app_type (
# These are encrypted values from local.eyaml
$gss_jwt_key = safe_hiera('gss_jwt_key')
$smtppassword = safe_hiera('smtp_password')
#These are global values from common.yaml
$gs_enabled = hiera('gs_enabled')
$gs_federation = hiera('gs_federation')
$gss_master_admin = hiera_array('gss_master_admin')
$gss_master_url = hiera("gss_master_url_${environment}")
$lookup_server = hiera("lookup_server_${environment}")
$mail_domain = hiera("mail_domain_${environment}")
$mail_smtphost = hiera("mail_smtphost_${environment}")
$mail_from_address = hiera("mail_from_address_${environment}")
@ -82,9 +78,6 @@ define sunetdrive::app_type (
$smtpuser = hiera("smtp_user_${environment}")
$tug_office = hiera_array('tug_office')
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
$gss_mode = hiera('gss_mode')
# These are global values from common.yaml but can be overridden in group.yaml
$drive_email_template_text_left = $config['drive_email_template_text_left']
$drive_email_template_plain_text_left = $config['drive_email_template_plain_text_left']
@ -100,6 +93,48 @@ define sunetdrive::app_type (
unless $is_multinode{
user { 'www-data': ensure => present, system => true }
file { '/usr/local/bin/get_containers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/get_containers'),
mode => '0744',
}
if ($nodenumber == 3) {
file { '/usr/lib/nagios/plugins/check_nextcloud_mounts.py':
ensure => present,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/check_nextcloud_mounts.py'),
mode => '0744',
}
sunet::sudoer {'nagios_run_nextcloud_mounts_command':
user_name => 'nagios',
collection => 'nrpe_nextcloud_mounts_check',
command_line => '/usr/lib/nagios/plugins/check_nextcloud_mounts.py'
}
sunet::nagios::nrpe_command {'check_nextcloud_mounts':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_nextcloud_mounts.py'
}
}
if ($nodenumber == 3) {
file { '/usr/local/bin/scan_external_mounts':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/scan_external_mounts.sh'),
mode => '0744',
}
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
cmd => '/usr/local/bin/scan_external_mounts',
hour => '1',
minute => '20',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
}
file { '/opt/nextcloud/cron.sh':
ensure => file,
owner => 'root',
@ -113,16 +148,22 @@ define sunetdrive::app_type (
minute => '*/5',
}
file { '/opt/nextcloud/user-sync.sh':
ensure => file,
ensure => absent,
}
file { '/usr/local/bin/nocc':
ensure => present,
force => true,
owner => 'root',
group => 'root',
mode => '0700',
content => template('sunetdrive/application/user-sync.erb.sh'),
content => template('sunetdrive/application/nocc.erb'),
mode => '0740',
}
-> cron { 'gss_user_sync':
command => '/opt/nextcloud/user-sync.sh',
user => 'root',
minute => '*/5',
file { '/etc/sudoers.d/99-nocc':
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
mode => '0440',
owner => 'root',
group => 'root',
}
file { '/usr/local/bin/occ':
ensure => present,
@ -314,23 +355,7 @@ define sunetdrive::app_type (
mode => '0744',
}
}
if $location =~ /^gss-test/ {
file { '/opt/nextcloud/mappingfile.json':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/application/mappingfile-test.json.erb'),
mode => '0644',
}
} elsif $location =~ /^gss/ {
file { '/opt/nextcloud/mappingfile.json':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/application/mappingfile-prod.json.erb'),
mode => '0644',
}
} elsif $location =~ /^kau/ {
if $location =~ /^kau/ {
file { '/mnt':
ensure => directory,
owner => 'www-data',
@ -380,5 +405,4 @@ define sunetdrive::app_type (
}
}
}
}

View file

@ -15,6 +15,7 @@ define sunetdrive::db_type(
$backup_password = safe_hiera('backup_password')
$proxysql_password = safe_hiera('proxysql_password')
$mysql_user_password = safe_hiera('mysql_user_password')
$roundcube_password = safe_hiera('roundcube_password')
$mariadb_dir = '/etc/mariadb'
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
$server_id = 1000 + Integer($facts['networking']['hostname'][-1])
@ -33,8 +34,10 @@ define sunetdrive::db_type(
$ports = [3306, 4444, 4567, 4568]
if $location =~ /^multinode/ {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['kube'] + $config['kube_v6']
} elsif $location == 'sunet-test' or $location == 'sunet-prod' {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6']
} elsif $location == 'sunet-prod' {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6'] + $config['webmail'] + $config['webmail_v6']
} elsif $location == 'sunet-test' {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6'] + $config['webmail'] + $config['webmail_v6'] + $config['calendar'] + $config['calendar_v6']
} else {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6
}
@ -46,11 +49,7 @@ define sunetdrive::db_type(
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
if $location =~ /^lookup/ {
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '05-lookup.sql']
} else {
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql']
}
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql', '05-roundcube.sql']
$sql_files.each |$sql_file|{
file { "${mariadb_dir}/init/${sql_file}":
ensure => present,

View file

@ -6,8 +6,6 @@ class sunetdrive::infra_script (
$environment = sunetdrive::get_environment()
$customer = 'common'
$config = hiera_hash($environment)
$gss_backup_server = $config['gss_backup_server']
$lookup_backup_server = $config['lookup_backup_server']
$ssh_config = "Host *.sunet.se
User script
IdentityFile /root/.ssh/id_script"
@ -59,14 +57,16 @@ class sunetdrive::infra_script (
mode => '0700',
}
sunet::scriptherder::cronjob { 'backupgssdb':
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
ensure => 'absent',
cmd => 'true',
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
sunet::scriptherder::cronjob { 'backuplookupdb':
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
ensure => 'absent',
cmd => 'true',
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],

View file

@ -1,47 +0,0 @@
#Class for SUNET-Drive-Lookup-Server
class sunetdrive::lookup (
$bootstrap = undef,
$location = undef
) {
$environment = sunetdrive::get_environment()
$config = lookup($environment, undef, undef, undef)
$public_url = "https://${config['site_name']}"
# Firewall settings
$nextcloud_ip = hiera_array("${location}_app", [])
$tug_office = hiera_array('tug_office')
$dbhost = '127.0.0.1'
$gss_jwt_key = safe_hiera('gss_jwt_key')
$replication_auth = safe_hiera('replication_auth')
$mysql_user_password = safe_hiera('mysql_user_password')
$lookup_version = hiera("lookup_version_${environment}")
$email_sender = $config['email_sender']
#Create users
user { 'www-data': ensure => present, system => true }
file { '/opt/lookup/config.php':
ensure => file,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/lookup/config.php.erb'),
mode => '0644',
}
sunet::docker_compose { 'drive_lookup_docker_compose':
content => template('sunetdrive/lookup/docker-compose_lookup.yml.erb'),
service_name => 'lookup',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Lookup server',
}
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
}
}

View file

@ -14,8 +14,6 @@ class sunetdrive::multinode (
$db_ip = hiera_hash($environment)['db']
$admin_password = hiera('admin_password')
$cluster_admin_password = hiera('cluster_admin_password')
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
$gss_mode = hiera('gss_mode')
$twofactor_enforced_groups = []
$twofactor_enforced_excluded_groups = []
@ -40,6 +38,59 @@ class sunetdrive::multinode (
user { 'www-data': ensure => present, system => true }
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
ensure_resource('file', '/opt/nextcloud' , { ensure => directory, recurse => true } )
file { '/usr/local/bin/get_containers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/get_containers'),
mode => '0744',
}
file { '/usr/lib/nagios/plugins/check_nextcloud_mounts.py':
ensure => present,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/check_nextcloud_mounts.py'),
mode => '0744',
}
sunet::sudoer {'nagios_run_nextcloud_mounts_command':
user_name => 'nagios',
collection => 'nrpe_nextcloud_mounts_check',
command_line => '/usr/lib/nagios/plugins/check_nextcloud_mounts.py'
}
sunet::nagios::nrpe_command {'check_nextcloud_mounts':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_nextcloud_mounts.py'
}
file { '/usr/local/bin/scan_external_mounts':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/scan_external_mounts.sh'),
mode => '0744',
}
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
cmd => '/usr/local/bin/scan_external_mounts',
hour => '1',
minute => '20',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
file { '/usr/local/bin/nocc':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/nocc.erb'),
mode => '0740',
}
file { '/etc/sudoers.d/99-nocc':
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
mode => '0440',
owner => 'root',
group => 'root',
}
file { '/usr/local/bin/occ':
ensure => present,
force => true,
@ -74,14 +125,6 @@ class sunetdrive::multinode (
content => template('sunetdrive/multinode/get_non_paying_customers.erb.sh'),
mode => '0744',
}
file { '/usr/local/bin/get_containers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/multinode/get_containers'),
mode => '0744',
}
file { '/usr/local/bin/restart_and_prune':
ensure => present,
force => true,
@ -213,7 +256,7 @@ MACAddressPolicy=none'
}
# if $nodenumber == '2' {
# cron { 'add_back_bucket_for_karin_nordgren':
# command => '(/usr/local/bin/occ nextcloud-kmh_app_1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh_app_1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true',
# command => '(/usr/local/bin/occ nextcloud-kmh-app-1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh-app-1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true',
# user => 'root',
# minute => '*/10',
# }
@ -222,7 +265,7 @@ MACAddressPolicy=none'
$customer_config_full = hiera_hash($customer)
$customer_config = $customer_config_full[$environment]
cron { "multinode_cron_${customer}":
command => "/opt/nextcloud/cron.sh nextcloud-${customer}_app_1",
command => "/opt/nextcloud/cron.sh nextcloud-${customer}-app-1",
require => File['/opt/nextcloud/cron.sh'],
user => 'root',
minute => '*/10',
@ -256,10 +299,7 @@ MACAddressPolicy=none'
$gs_enabled = hiera('gs_enabled')
$gs_federation = hiera('gs_federation')
$gss_master_admin = hiera_array('gss_master_admin')
$gss_master_url = hiera("gss_master_url_${environment}")
$https_port = hiera_hash('multinode_mapping')[$customer]['port']
$lookup_server = hiera("lookup_server_${environment}")
$mail_domain = hiera("mail_domain_${environment}")
$mail_from_address = hiera("mail_from_address_${environment}")
$mail_smtphost = hiera("mail_smtphost_${environment}")
@ -312,7 +352,6 @@ MACAddressPolicy=none'
$secret = safe_hiera("${customer}_secret")
$passwordsalt= safe_hiera("${customer}_passwordsalt")
$redis_host_password = safe_hiera("${customer}_redis_host_password")
$gss_jwt_key = safe_hiera('gss_jwt_key')
$smtppassword = safe_hiera('smtp_password')
$extra_config = {

View file

@ -41,7 +41,10 @@ class sunetdrive::multinode_db(){
group => 'root',
mode => '0600',
}
file { '/root/tasks/listusersbydep.sh':
file { '/root/tasks/':
ensure => directory,
}
-> file { '/root/tasks/listusersbydep.sh':
ensure => file,
content => template('sunetdrive/mariadb/listusersdep.sh.erb'),
owner => 'root',
@ -55,7 +58,10 @@ class sunetdrive::multinode_db(){
group => 'root',
mode => '0700',
}
file {'/opt/mariadb/statistics/custdata.json':
file {'/opt/mariadb/statistics/':
ensure => directory,
}
-> file {'/opt/mariadb/statistics/custdata.json':
ensure => file,
content => template('sunetdrive/mariadb/custconfig.json.erb'),
owner => 'root',

View file

@ -254,8 +254,11 @@ class sunetdrive::script (
group => 'root',
mode => '0700',
}
file { '/root/tasks/backupsinglenodedb.sh':
ensure => absent,
file_line { 'FIXME_remove_when_s3_migration_done_in_sto3':
ensure => 'present',
line => '37.156.195.53 s3.sto3.safedc.net',
path => '/etc/hosts',
match => '^37.156.195.53',
}
if $environment == 'test' {
sunet::scriptherder::cronjob { 'reboot-customer':
@ -388,7 +391,7 @@ class sunetdrive::script (
$singlenodes.each | $singlenode| {
$multinode = hiera_hash('multinode_mapping')[$singlenode]['server']
$multinodeserver = "${multinode}.${site_name}"
$nccontainer = "nextcloud-${singlenode}_app_1"
$nccontainer = "nextcloud-${singlenode}-app-1"
sunet::scriptherder::cronjob { "backup${singlenode}db":
ensure => absent,
@ -418,17 +421,17 @@ class sunetdrive::script (
}
}
}
$gss_backup_server = $config['gss_backup_server']
$lookup_backup_server = $config['lookup_backup_server']
sunet::scriptherder::cronjob { 'backupgssdb':
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
ensure => 'absent',
cmd => 'true',
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
sunet::scriptherder::cronjob { 'backuplookupdb':
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
ensure => 'absent',
cmd => 'true',
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
@ -488,7 +491,6 @@ class sunetdrive::script (
}
sunet::scriptherder::cronjob { 'backupbuckets':
cmd => '/root/tasks/backupbuckets.sh',
environment => ['RICHIR_TEST=true', 'OTHER_RICHIR=false'],
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],

View file

@ -5,7 +5,19 @@ class sunetdrive::scriptreceiver()
sunet::system_user {'script': username => 'script', group => 'script', managehome => true, shell => '/bin/bash' }
# These tasks correspond to a ${task}.erb.sh template
$tasks = ['list_users', 'list_files_for_user', 'create_bucket', 'backup_db', 'purge_backups', 'maintenancemode', 'restart_sunet_service', 'start_sentinel', 'stop_sentinel', 'removeswap', 'backup_multinode_db']
$tasks = [
'list_users',
'list_files_for_user',
'create_bucket',
'backup_db',
'purge_backups',
'maintenancemode',
'restart_sunet_service',
'start_sentinel',
'stop_sentinel',
'removeswap',
'backup_multinode_db'
]
$environment = sunetdrive::get_environment()
$config = hiera_hash($environment)
@ -35,7 +47,9 @@ class sunetdrive::scriptreceiver()
type => 'ssh-ed25519',
key => $script_pub_key,
}
file { '/etc/sysctl.d/gofasta.conf':
ensure => 'absent',
}
file { '/opt/rotate':
ensure => directory,
mode => '0750',

View file

@ -11,6 +11,9 @@ class sunetdrive::sitemonitornaemon() {
$environment = sunetdrive::get_environment()
$influx_passwd = safe_hiera('influx_passwd')
$slack_url = safe_hiera('slack_url')
$extra_host_groups = {
node3_hosts => join($facts['configured_hosts_in_cosmos']['all'].filter |$host| { $host =~ /^node3\./ }, ',')
}
file { '/usr/local/bin/slack_nagios.sh':
ensure => present,
@ -45,9 +48,14 @@ class sunetdrive::sitemonitornaemon() {
content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'),
mode => '0644',
}
file { '/etc/naemon/conf.d/sunetdrive_extra_hostgroups.cfg':
ensure => present,
content => template('sunetdrive/monitor/sunetdrive_extra_hostgroups.cfg.erb'),
mode => '0644',
}
nagioscfg::service {'check_scriptherder':
hostgroup_name => ['sunetdrive::nrpe'],
check_command => 'check_nrpe_1arg_to30!check_scriptherder',
check_command => 'check_nrpe_1arg_to300!check_scriptherder',
description => 'Scriptherder Status',
contact_groups => ['naemon-admins'],
}
@ -99,6 +107,12 @@ class sunetdrive::sitemonitornaemon() {
description => 'Status of sarimner interface',
contact_groups => ['alerts']
}
nagioscfg::service {'check_nextcloud_mounts':
hostgroup_name => ['node3_hosts','sunetdrive::multinode'],
check_command => 'check_nrpe_1arg!check_nextcloud_mounts',
description => 'S3 buckets with multiple Nextcloud mounts',
contact_groups => ['alerts']
}
}

View file

@ -0,0 +1,42 @@
#!/usr/bin/env python3
from collections import Counter
import json
import shlex
import subprocess
import sys
exit = 0
base_message = "OK: no duplicate mounts"
long_message = ""
get_containers = subprocess.Popen('/usr/local/bin/get_containers', stdout=subprocess.PIPE).stdout.read()
containers = get_containers.decode().splitlines()
for i, container in enumerate(containers, start=1):
buckets = []
list_command = f"/usr/local/bin/nocc {container} files_external:list --all --show-password --output json"
command = shlex.split(list_command)
mount_data_byte = subprocess.Popen(command, stdout=subprocess.PIPE).stdout.read()
try:
mount_data = json.loads(mount_data_byte.decode())
except json.decoder.JSONDecodeError as err:
if i == 1 or i != len(containers):
base_message = "WARNING: invalid json"
long_message += f"\ncontainer: {container} - json decode error: {err}"
# lets do exit 0 for now
# exit = 1
continue
for items in mount_data:
buckets.append(items["configuration"]["bucket"])
bucket_count = dict(Counter(buckets))
for k, v in bucket_count.items():
if v > 1:
base_message = "WARNING: buckets with multiple mounts"
long_message += f"\ncontainer: {container} - bucket: {k} - {v}"
# lets do exit 0 for now
# exit = 1
print(base_message)
if long_message != "":
print(long_message.lstrip())
sys.exit(exit)

View file

@ -1,10 +1,5 @@
<?php
$CONFIG = array (
'app_install_overwrite' =>
array (
0 => 'globalsiteselector',
),
'apps_paths' =>
array (
0 =>
@ -21,6 +16,7 @@ $CONFIG = array (
),
),
'appstoreenabled' => false,
'auth.bruteforce.protection.enabled' => false,
'config_is_read_only' => true,
'csrf.disabled' => true,
'datadirectory' => '/var/www/html/data',
@ -48,28 +44,12 @@ $CONFIG = array (
'gs.enabled' => '<%= @gs_enabled %>',
'gs.federation' => '<%= @gs_federation %>',
'gs.trustedHosts' => ['*.sunet.se'],
'gss.discovery.manual.mapping.file' => '/var/www/html/mappingfile.json',
'gss.discovery.manual.mapping.parameter' => 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6',
'gss.discovery.manual.mapping.regex' => true,
'gss.jwt.key' => '<%= @gss_jwt_key %>',
'gss.master.admin' =>
array (
<%- index = 0 -%>
<%- @gss_master_admin.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
),
'gss.master.url' => '<%= @gss_master_url %>',
'gss.mode' => '<%= @gss_mode %>',
'gss.user.discovery.module' => '\\OCA\\GlobalSiteSelector\\UserDiscoveryModules\\ManualUserMapping',
'gss.username_format' => 'sanitize',
'htaccess.RewriteBase' => '/',
'installed' => true,
'instanceid' => '<%= @instanceid %>',
'integrity.check.disabled' => true,
'log_type' => 'file',
'loglevel' => 1,
'lookup_server' => '<%= @lookup_server %>',
'mail_domain' => '<%= @mail_domain %>',
'mail_from_address' => '<%= @mail_from_address %>',
'mail_sendmailmode' => 'smtp',
@ -97,7 +77,7 @@ $CONFIG = array (
'region' => 'us-east-1',
'hostname' => '<%= @s3_host %>',
'port' => '',
'useMultipartCopy' => false,
'useMultipartCopy' => true,
'objectPrefix' => 'urn:oid:',
'autocreate' => false,
'use_ssl' => true,
@ -105,47 +85,11 @@ $CONFIG = array (
'legacy_auth' => false,
),
),
'overwrite.cli.url' => 'https://<%= @site_name %>',
'overwrite.cli.url' => 'https://<%= @site_name %>/',
'overwritehost' => '<%= @site_name %>',
'overwriteprotocol' => 'https',
'passwordsalt' => '<%= @passwordsalt %>',
<% if @location == 'gss-test' -%>
'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>',
'read_timeout' => 0.0,
'seeds' => [
'redis1.drive.test.sunet.se:6379',
'redis2.drive.test.sunet.se:6379',
'redis3.drive.test.sunet.se:6379',
'redis1.drive.test.sunet.se:6380',
'redis2.drive.test.sunet.se:6380',
'redis3.drive.test.sunet.se:6380',
'redis1.drive.test.sunet.se:6381',
'redis2.drive.test.sunet.se:6381',
'redis3.drive.test.sunet.se:6381'
],
'timeout' => 1.1
],
<% elsif @location == 'gss-prod' -%>
'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>',
'read_timeout' => 0.0,
'seeds' => [
'redis1.drive.sunet.se:6379',
'redis2.drive.sunet.se:6379',
'redis3.drive.sunet.se:6379',
'redis1.drive.sunet.se:6380',
'redis2.drive.sunet.se:6380',
'redis3.drive.sunet.se:6380',
'redis1.drive.sunet.se:6381',
'redis2.drive.sunet.se:6381',
'redis3.drive.sunet.se:6381'
],
'timeout' => 1.1
],
<% elsif @environment == 'test' && ! @is_multinode -%>
<% if @environment == 'test' && ! @is_multinode -%>
'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>',
@ -163,7 +107,7 @@ $CONFIG = array (
],
'timeout' => 1.1
],
<% elsif @environment == 'prod' && ! @is_multinode && @location != 'gss-prod' -%>
<% elsif @environment == 'prod' && ! @is_multinode -%>
'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>',

View file

@ -24,9 +24,6 @@ services:
<%- if @skeletondirectory -%>
- /opt/nextcloud/skeleton:<%= @skeletondirectory %>
<%- end -%>
<%- if @location =~ /^gss/ -%>
- /opt/nextcloud/mappingfile.json:/var/www/html/mappingfile.json
<%- end -%>
<% if @location =~ /^kau/ -%>
- /mnt:/opt/tmp/
<%- end -%>

21
templates/application/nocc.erb Executable file
View file

@ -0,0 +1,21 @@
#!/bin/bash
if [[ "${1}" =~ ^nextcloud ]]; then
container=${1}
shift
else
container="nextcloud_app_1"
fi
oc_list=$(env| grep 'OC_')
if [[ "x${oc_list}" != "x" ]]; then
for row in $(echo "${oc_list}"); do
MY_VARS="${MY_VARS} -e ${row}"
done
fi
docker exec -i ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
exit 0

View file

@ -52,7 +52,7 @@ echo '
"region": "'${region}'",
"secret": "'${secret}'",
"storageClass": "",
"useMultipartCopy": false,
"useMultipartCopy": true,
"use_path_style": true,
"use_ssl": true
},

View file

@ -0,0 +1,34 @@
#!/bin/bash
error_ids=""
# Only run if this is the only instance of this script running
# note: since this script forks to run pgrep, we need -eq 2 here
# shellcheck disable=SC2126
if [[ $(pgrep -a -f "${0}" | grep -v scriptherder | wc -l) -eq 2 ]]; then
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
# a specific host, but will differ between hosts
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
errors=''
for container in $(/usr/local/bin/get_containers); do
error_ids="${error_ids} ${container}: "
for id in $(/usr/local/bin/nocc "${container}" files_external:list --all --output json | jq '.[].mount_id' | jq .); do
/usr/local/bin/nocc "${container}" files_external:scan "${id}" | grep Error
# shellcheck disable=SC2181
if [[ ${?} -eq 0 ]]; then
errors="${errors} ${id}"
error_ids="${error_ids} ${id}"
fi
done
done
else
echo "Another instance of this script is already running, exiting"
pgrep -a -f "${0}" | grep -v scriptherder
exit 0
fi
if [[ -n "${errors}" ]]; then
echo "Errors found in the following mounts: ${error_ids}"
exit 1
fi
echo "No errors found"
exit 0

View file

@ -1,29 +0,0 @@
<?php
$CONFIG = [
'AUTH_KEY' => "<%= @gss_jwt_key %>",
'DB' => [
'host' => "<%= @dbhost %>",
'db' => "lookup" ,
'user' => "lookup",
'pass' => "<%= @mysql_user_password %>",
],
'EMAIL_SENDER' => '<%= @email_sender %>',
'ERROR_VERBOSE' => false,
'GLOBAL_SCALE' => true,
'IP_BLACKLIST' => [
],
'MAX_REQUESTS' => 10000,
'MAX_SEARCH_PAGE' => 10,
'PUBLIC_URL' => '<%= @public_url %>',
'REPLICATION_AUTH' => '<%= @replication_auth %>',
'REPLICATION_HOSTS' => [
],
'SPAM_BLACKLIST' => [
],
'TWITTER' => [
'CONSUMER_KEY' => '',
'CONSUMER_SECRET' => '',
'ACCESS_TOKEN' => '',
'ACCESS_TOKEN_SECRET' => '',
],
];

View file

@ -1,17 +0,0 @@
version: '3.2'
services:
app:
container_name: lookup_app_1
image: docker.sunet.se/drive/nextcloud-lookup:<%= @lookup_version %>
restart: always
volumes:
- /opt/lookup/config.php:/var/www/html/config/config.php
network_mode: host
dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32
command: apachectl -D FOREGROUND
tty: true

View file

@ -1,62 +0,0 @@
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
CREATE DATABASE IF NOT EXISTS `lookup` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
USE `lookup`;
CREATE USER 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
GRANT ALL PRIVILEGES ON lookup.* TO 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
DROP TABLE IF EXISTS `emailValidation`;
CREATE TABLE IF NOT EXISTS `emailValidation` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`storeId` int(11) NOT NULL,
`token` varchar(16) COLLATE utf8mb4_unicode_ci NOT NULL,
PRIMARY KEY (`id`),
KEY `token` (`token`),
KEY `storeId` (`storeId`)
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
DROP TABLE IF EXISTS `store`;
CREATE TABLE IF NOT EXISTS `store` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`userId` int(11) NOT NULL,
`k` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`v` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`valid` tinyint(1) NOT NULL DEFAULT '0',
PRIMARY KEY (`id`),
KEY `key` (`k`(191)),
KEY `value` (`v`(191)),
KEY `userId` (`userId`)
) ENGINE=InnoDB AUTO_INCREMENT=51 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
DROP TABLE IF EXISTS `users`;
CREATE TABLE IF NOT EXISTS `users` (
`id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT,
`federationId` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
KEY `federationId` (`federationId`(191))
) ENGINE=InnoDB AUTO_INCREMENT=15 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
DROP TABLE IF EXISTS `toVerify`;
CREATE TABLE IF NOT EXISTS `toVerify` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`userId` int(11) NOT NULL,
`storeId` int(11) NOT NULL,
`property` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL,
`location` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`tries` int(11) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;

View file

@ -0,0 +1,3 @@
CREATE SCHEMA roundcubemail;
CREATE USER 'roundcube'@'%' IDENTIFIED BY '<%= @roundcube_password %>';
GRANT ALL PRIVILEGES ON roundcubemail.* TO 'roundcube'@'%' IDENTIFIED BY '<%= @roundcube_password %>';

View file

@ -0,0 +1,8 @@
<% @extra_host_groups.each do |group, members| -%>
# <%= group %>
define hostgroup {
hostgroup_name <%= group %>
alias <%= group %>
members <%= members %>
}
<% end -%>

View file

@ -131,32 +131,6 @@ define host {
<% end -%>
use monitor-site
}
<% if site.match('lookup') %>
define service {
notes_url https://<%= site %>
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
check_command check_https
check_interval 5
check_period 24x7
<% if @environment == 'prod' %>
contacts slack
<% else -%>
contact_groups naemon-admins
<% end -%>
host_name <%= site %>
max_check_attempts 3
notification_interval 60
notification_period 24x7
retry_interval 1
service_description HTTPS
<% if site.match('test') -%>
servicegroups test-sites
<% else -%>
servicegroups prod-sites
<% end -%>
}
<% end -%>
<% unless site.match('lookup') %>
define service {
notes_url https://<%= site %>/status.php
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
@ -164,7 +138,6 @@ define service {
check_interval 5
check_period 24x7
<% cur_cust = site.gsub(/\.drive.*/,'') %>
<% cur_cust = cur_cust.gsub(/drive.*/,'gss') %>
# 'check_nextcloud' command definition
<% if @environment == 'prod' and not site.match('test') and @fullnodes.include?(cur_cust) %>
contacts slack
@ -209,4 +182,3 @@ define service {
<% end -%>
}
<% end -%>
<% end -%>

View file

@ -20,7 +20,7 @@ for project in $(ls ${data_dir}); do
if [[ "${issixmonths}" == "true" ]]; then
number_of_full_to_keep=6
fi
max_num_inc=$((32 * number_of_full_to_keep))
max_num_inc=$((50 * number_of_full_to_keep))
max_num_full=$((2 * number_of_full_to_keep))
tabular_data=$(cat "${data_dir}/${project}/${bucket}.dat")

View file

@ -5,7 +5,7 @@ shift
include_userbuckets="${1}"
shift
environment="<%= @environment %>"
container="nextcloud-${customer}_app_1"
container="nextcloud-${customer}-app-1"
yq="/usr/local/bin/yq"
if ! [[ -x ${yq} ]]; then

View file

@ -1,10 +1,10 @@
#!/bin/bash
customer="${1}"
multinode="${2}"
customer="${1}"
multinode="${2}"
environment="<%= @environment %>"
location="${customer}-${environment}"
userjson=$(ssh "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud-${customer}_app_1")
userjson=$(ssh -o StrictHostKeyChecking=no "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud-${customer}-app-1")
project="statistics"
bucket="drive-server-coms"
base_dir="${project}:${bucket}"

View file

@ -17,7 +17,7 @@ if [[ "${ENVIRONMENT}" == "prod" ]]; then
fi
if [[ "${CUSTOMER}" == "common" ]]; then
customer=""
types="multinode gss"
types="multinode"
fi
domain="${customer}drive.${env}sunet.se"

View file

@ -43,9 +43,11 @@ for eppn in $(echo "${users}" | jq -r keys[]); do
username=${eppn%@*}
# Remove underscore from username
user=${username//_/-}
# convert user to lower case for bucket naming rules
user_lower=${user,,}
echo "$(date) - Check bucket status for ${eppn}"
bucketname="${user}-${site_name//./-}"
bucketname="${user_lower}-${site_name//./-}"
if ! echo "${buckets}" | grep "${bucketname}" &> /dev/null; then
echo "$(date) - ${eppn} has no mounts configured, adding bucket and mounts..."
${rclone} mkdir "${rcp}:${bucketname}"

View file

@ -67,12 +67,10 @@ def main() -> int:
reboot_command = ['sudo /usr/local/bin/safer_reboot']
if customers[0] == "common":
customers = ["lookup", "multinode"]
customers = ["multinode"]
for customer in customers:
backup_type = "backup"
if customer == "lookup":
backup_type = "lookupbackup"
elif customer == "multinode":
if customer == "multinode":
backup_command = ['sudo /home/script/bin/backup_multinode_db.sh']
backup_type = "multinode-db"

View file

@ -11,7 +11,7 @@ function usage {
exit 1
}
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*_app_1$ ]]; then
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*-app-1$ ]]; then
usage
fi
if ! [[ ${bucket} =~ ^[a-zA-Z0-9]+ ]]; then
@ -26,19 +26,19 @@ echo "$(date): Start executing create_bucket.sh ${1} ${2} ${3}"
rclone_config="/opt/nextcloud/rclone.conf"
if [[ "${container}" != "nextcloud_app_1" ]]; then
customer=$(echo "${container}" | sed -e 's/^nextcloud-//' -e 's/_app_1$//')
customer=$(echo "${container}" | sed -e 's/^nextcloud-//' -e 's/-app-1$//')
rclone_config="/opt/multinode/${customer}/rclone.conf"
fi
key=$(grep access_key_id "${rclone_config}" | awk '{print $3}')
secret=$(grep secret_access_key "${rclone_config}"| awk '{print $3}')
endpoint=$(grep endpoint "${rclone_config}" | awk '{print $3}')
preexisting="$(docker exec -u www-data -i "${container}" php --define apc.enable_cli=1 /var/www/html/occ files_external:list --output json "${user}" | jq -r '.[] | .configuration.bucket' | grep "${bucket}")"
preexisting="$(docker exec -u www-data -i "${container}" php --define apc.enable_cli=1 /var/www/html/occ files_external:list --output json --show-password "${user}" | jq -r '.[] | .configuration.bucket' | grep "${bucket}")"
if [[ -z ${preexisting} ]]; then
docker exec -u www-data -i "${container}" php --define apc.enable_cli=1 /var/www/html/occ files_external:create "${user_bucket_name}" \
amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
-c useMultipartCopy=false amazons3::accesskey --user ${user}
-c useMultipartCopy=true amazons3::accesskey --user ${user}
for shareid in $(docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:export ${user} | jq -r '.[].mount_id'); do
docker exec -u www-data -i ${container} php --define apc.enable_cli=1 /var/www/html/occ files_external:option ${shareid} enable_sharing true
done

View file

@ -7,7 +7,7 @@ bucket=${4}
user=${5}
/usr/local/bin/occ files_external:create "${bucket}" \
amazons3 -c bucket="${bucket}" -c key="${key}" -c secret="${secret}" -c hostname="${endpoint}" -c use_ssl=true -c use_path_style=true -c region=us-east-1 \
-c useMultipartCopy=false amazons3::accesskey --user "${user}"
for shareid in $(/usr/local/bin/occ files_external:export "${user}" | jq -r '.[].mount_id'); do
/usr/local/bin/occ files_external:option "${shareid}" enable_sharing true
-c useMultipartCopy=true amazons3::accesskey --user "${user}"
for shareid in $(/usr/local/bin/nocc files_external:export "${user}" | jq -r '.[].mount_id'); do
/usr/local/bin/nocc files_external:option "${shareid}" enable_sharing true
done

View file

@ -9,7 +9,7 @@ function usage {
exit 1
}
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*_app_1$ ]]; then
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*-app-1$ ]]; then
usage
fi

View file

@ -2,7 +2,7 @@
container=${1}
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*_app_1$ ]]; then
if ! [[ ${container} == 'nextcloud_app_1' ]] && ! [[ ${container} =~ ^nextcloud-[a-z]*-app-1$ ]]; then
echo "Usage: ${0} <nextcloud container name>"
echo "Example : ${0} nextcloud_app_1"
exit 1