Compare commits

...

1005 commits

Author SHA1 Message Date
1a41a66355
Allow calendar servers to talk to db 2025-04-04 12:31:19 +02:00
da7be10d58
Fix typos 2025-03-26 14:09:24 +01:00
44e398a418
Remove fault file 2025-03-26 12:58:12 +01:00
d903f4f90d
Merge branch 'stable' 2025-03-26 12:57:46 +01:00
ad13e65250
Remove fault file 2025-03-26 12:56:27 +01:00
b63229092c
Add sql for roundcube 2025-03-26 12:56:23 +01:00
4b3c189481
Add sql for roundcube
Signed-off-by: Micke Nordin <kano@sunet.se>
2025-03-26 12:56:19 +01:00
8d57b773a0
Add sql for roundcube 2025-03-26 12:53:28 +01:00
3c815b9a38
Add sql for roundcube
Signed-off-by: Micke Nordin <kano@sunet.se>
2025-03-26 12:52:14 +01:00
55cc71871f
Create dirs 2025-03-25 19:08:44 +01:00
46f1330b5c
Disable bruteforce 2025-03-25 19:07:38 +01:00
d774349950
Disable bruteforce 2025-03-25 19:07:34 +01:00
249fe90c54
Create dirs 2025-03-25 19:01:44 +01:00
87b6ea8ddb
Disable bruteforce 2025-03-24 15:06:39 +01:00
dc39cb5d85
Disable bruteforce 2025-03-24 15:00:35 +01:00
beff4242ac
Merge branch 'main' into stable 2025-03-14 14:37:25 +01:00
cc31a2f038
Remove sysctl file that did not help 2025-03-13 16:16:32 +01:00
6a63031f84
use (new) node3_hosts group for check that only runs on node3 2025-03-13 13:01:45 +01:00
1c3eecdd85
final version of the newline problem 2025-03-13 12:45:45 +01:00
16c4f3650c
and yet another newline 2025-03-13 12:25:21 +01:00
57b90cced4
Merge branch 'main' into testing 2025-03-13 12:21:38 +01:00
4e67d6f1b8
needs newlines 2025-03-13 12:21:18 +01:00
9bb2afa68b
Merge branch 'main' into testing 2025-03-13 12:16:08 +01:00
26ea8e8e8d
first attempt at creating extra_host_groups 2025-03-13 12:15:34 +01:00
5c42f9d7d2
Merge branch 'main' into testing 2025-03-12 11:56:34 +01:00
f87fac3c3b
Remove quotes 2025-03-12 11:56:22 +01:00
582d88d2f1
Merge branch 'main' into testing 2025-03-12 11:53:31 +01:00
4f84f71070
Typo 2025-03-12 11:53:20 +01:00
4caed83e80
Merge branch 'main' into testing 2025-03-12 11:49:47 +01:00
38174166a0
Typo 2025-03-12 11:49:00 +01:00
3b42817c57
Merge branch 'main' into testing 2025-03-12 11:44:53 +01:00
5950751c73
Merge branch 'main' of ssh://platform.sunet.se:22022/Drive/sunetdrive 2025-03-12 11:44:20 +01:00
d010d27f20
Let's gooooo 2025-03-12 11:44:08 +01:00
d5c3486425
don't print empty line 2025-03-11 08:13:35 +01:00
1aea2f5674
apparently "You do not need to declare variables before using them" in python - cleanup 2025-03-11 08:13:33 +01:00
0b3a2d43cb
buckets needs to be reset each loop 2025-03-11 08:13:30 +01:00
4b952bac40
trying to improve output of check_nextcloud_mounts script 2025-03-11 08:13:28 +01:00
e150fa3625
separating multinode from app_type 2025-03-11 08:13:26 +01:00
3579682de8
testing if $is_multinode is the problem - correcter regex 2025-03-11 08:13:23 +01:00
8f090bfcac
testing if $is_multinode is the problem 2025-03-11 08:13:20 +01:00
0e189ad0ef
change check_nextcloud_mounts.py output 2025-03-11 08:13:15 +01:00
e27cf34987
changed wording 2025-03-11 08:13:10 +01:00
17d8d8b2de
changed wording 2025-03-11 08:13:06 +01:00
3fbfe26c66
adding check for nextcloud mounts 2025-03-11 08:13:00 +01:00
4a6d35b984
Merge branch 'main' into stable 2025-03-10 14:50:17 +01:00
ef1eb5b4ad
Merge branch 'testing' of ssh://platform.sunet.se:22022/Drive/sunetdrive into testing 2025-03-10 14:49:51 +01:00
dc5b61c9e6
Merge branch 'main' into testing 2025-03-10 14:49:44 +01:00
33389e842a
Set useMultiPartCopy=true everywhere and switch to nocc 2025-03-10 14:49:29 +01:00
bb7ab09db8
don't print empty line 2025-03-07 08:35:33 +01:00
bc481d44d5
apparently "You do not need to declare variables before using them" in python - cleanup 2025-03-06 16:26:05 +01:00
5619cb8270
buckets needs to be reset each loop 2025-03-06 15:42:32 +01:00
fbe409efff
trying to improve output of check_nextcloud_mounts script 2025-03-06 15:05:00 +01:00
9096bf7c33
separating multinode from app_type 2025-03-05 16:23:42 +01:00
1c38cfbf3c
Merge branch 'main' into testing 2025-03-05 15:39:03 +01:00
d332a810b9
Add webmail 2025-03-05 15:38:44 +01:00
74da008b5e
testing if $is_multinode is the problem - correcter regex 2025-03-05 15:00:14 +01:00
a6eaddbd8f
testing if $is_multinode is the problem 2025-03-05 14:58:01 +01:00
0da62f097d
change check_nextcloud_mounts.py output 2025-03-05 14:15:45 +01:00
ed3d4b1b9c
changed wording 2025-03-05 13:28:33 +01:00
bb44e6da8f
changed wording 2025-03-05 13:21:56 +01:00
de3d18b0a1
adding check for nextcloud mounts 2025-03-05 12:49:32 +01:00
aa6be0ce34
StrictHostKeyChecking=no to avoid problems whith re-install 2025-03-03 15:39:57 +01:00
83fcc67c91
- bucketnames have to be lower case
- output from occ changed so --show-password is needed now
2025-03-03 13:22:40 +01:00
ec4e2bdb38
Merge branch 'main' into stable 2025-02-28 12:50:14 +01:00
a68fea86b8
Don' allocate tty 2025-02-28 12:48:42 +01:00
a1b7a03908
Merge branch 'main' into stable 2025-02-28 12:18:49 +01:00
7363284353
Missed one path 2025-02-28 12:17:03 +01:00
9f0eccdb3d
Merge branch 'main' into stable 2025-02-28 12:07:30 +01:00
be9f1a7152
Use full path 2025-02-28 12:06:24 +01:00
f6cd170e6f
Merge branch 'main' into stable 2025-02-27 12:53:52 +01:00
dcb0ca8e48
Remove scriptherder from count 2025-02-27 12:53:06 +01:00
e0f427fcc8
Merge branch 'main' into stable 2025-02-27 11:20:37 +01:00
7090f7ca5e
Run in prod 2025-02-27 11:15:12 +01:00
705479f29c
Merge branch 'main' into stable 2025-02-27 11:12:20 +01:00
3007fc6de4
shfmt 2025-02-27 11:09:29 +01:00
4a270b9f65
Fix check 2025-02-27 11:08:13 +01:00
b2942339d1
Fix output 2025-02-27 11:03:37 +01:00
e0e1d5148f
Merge branch 'main' into stable 2025-02-26 12:37:44 +01:00
919ff971b4
Also report container 2025-02-26 12:36:58 +01:00
8b216f6730
Merge branch 'main' into stable 2025-02-26 12:33:24 +01:00
434cd6d1e4
Remove gss related manifests and config 2025-02-26 12:32:50 +01:00
b3c42225e1
Remove lookup related manifests and config 2025-02-26 12:32:39 +01:00
06d7fabf4b
Run cronjob in scriptherder and report errors found 2025-02-26 12:30:30 +01:00
800b2c4afc
Lets only sleep if we are not allready running 2025-02-26 12:09:59 +01:00
1654186ddc
Add a deterministic sleep to spread out the load and lets run it at night in test 2025-02-26 12:08:03 +01:00
2c974bd178
Change guard condition 2025-02-26 11:50:34 +01:00
41e9045833
This is the way 2025-02-26 11:13:22 +01:00
7e53313d94
Can I now have it outside? 2025-02-26 11:09:17 +01:00
ebe6f9640d
I am blind 2025-02-26 11:05:31 +01:00
74f96705f1
negation 2025-02-26 11:03:28 +01:00
c7e337f3c4
multinodes are confused 2025-02-26 11:01:46 +01:00
2944823676
Only run on node3 and multinodes 2025-02-26 10:34:44 +01:00
fc0a790bd1
Add scan cronjob to test 2025-02-26 10:32:07 +01:00
a4a9e492f5
Move get_containers to all nodes 2025-02-26 10:05:50 +01:00
cbb291be47
Make sure we know where our backups are located 2025-02-25 20:21:40 +01:00
d760626e0b
Make sure we know where our backups are located 2025-02-25 20:18:52 +01:00
ec6cda87fb Remove gss related manifests and config 2025-02-25 11:55:43 +01:00
00c494193a Remove lookup related manifests and config 2025-02-25 11:49:09 +01:00
76e110ab90
Merge branch 'main' into stable 2025-02-20 17:00:35 +01:00
3315e17346
Add nocc command 2025-02-13 15:33:27 +01:00
f1c816cba1
Remove gss and lookup 2025-02-13 14:12:52 +01:00
331db3e24c
Merge branch 'testing' 2025-02-13 14:10:01 +01:00
a7d737c8c4 Merge pull request 'Enable pretty URLs (https://docs.nextcloud.com/server/latest/admin_manual/installation/source_installation.html\#pretty-urls)' (#3) from pretty_urls into testing
Reviewed-on: #3
2025-02-13 12:53:39 +00:00
2a273c649d
Enable pretty URLs (https://docs.nextcloud.com/server/latest/admin_manual/installation/source_installation.html\#pretty-urls) 2025-02-13 13:50:42 +01:00
96632b111b
Containers changed names 2025-02-10 10:26:31 +01:00
f2774b9c38
Containers changed names 2025-02-10 10:25:59 +01:00
0ded3c46aa
Containers changed names 2025-02-10 10:25:36 +01:00
74343d15ef
Allow long running check for scriptherder 2025-02-03 15:11:45 +01:00
602fb12a7f
Allow long running check for scriptherder 2025-02-03 15:10:57 +01:00
fd0ed43035
Allow long running check for scriptherder 2025-02-03 15:10:28 +01:00
0046349756
Merge branch 'main' into stable 2025-02-03 15:06:16 +01:00
1f30a23a5a
Merge branch 'main' into testing 2025-02-03 15:00:52 +01:00
def0a0a77b
Allow more inc backups 2025-02-03 15:00:28 +01:00
Lars Delhage
98158e1be5
Remove recursive for mysql db dir 2025-01-20 12:29:01 +01:00
78bc2b3f7e
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-20 12:28:59 +01:00
26ee034887
Set expiration for multinode 2025-01-20 12:28:56 +01:00
d891874202
Add full_backup_retention 2025-01-20 12:28:53 +01:00
c11e95f5fe
Only run this on monitor hosts, as it takes a long time 2025-01-20 12:28:49 +01:00
9596f9d210
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-20 12:28:47 +01:00
e57349f011
Set expiration for multinode 2025-01-20 12:28:44 +01:00
02578640a9
Add full_backup_retention 2025-01-20 12:28:42 +01:00
c9bdb766eb
Run as root user 2025-01-20 12:28:39 +01:00
9425d56716
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-20 12:28:37 +01:00
b2d3b80f37
Set expiration for multinode 2025-01-20 12:28:35 +01:00
d67cc24318
Add full_backup_retention 2025-01-20 12:28:32 +01:00
efd05971b1
SATOSA: Always have alwayshttps 2025-01-20 12:28:30 +01:00
8dd5534b0f
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-20 12:28:27 +01:00
d24fa23262
Set expiration for multinode 2025-01-20 12:28:24 +01:00
84fc07cffe
Add full_backup_retention 2025-01-20 12:28:21 +01:00
5b68f26771
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-20 12:28:19 +01:00
dc59842fe8
Set expiration for multinode 2025-01-20 12:28:16 +01:00
a91bb85313
Add full_backup_retention 2025-01-20 12:28:12 +01:00
5f0c24865f
added ability to set trashbin_retention_obligation and versions_retention_obligation in config.php 2025-01-20 12:28:09 +01:00
8ee75b0601
changed nft rules for proxysql - ipv6 2025-01-20 12:28:06 +01:00
62670483c1
changed nft rules for proxysql 2025-01-20 12:28:04 +01:00
ace56103fc
duplicate 2025-01-20 12:28:01 +01:00
4afec74a34
one more 2025-01-20 12:27:58 +01:00
d4d998cf81
Fix notify 2025-01-20 12:27:55 +01:00
9794b03d44
Can not redeclare class 2025-01-20 12:27:52 +01:00
45a44789a4
add back image 2025-01-20 12:27:50 +01:00
4201f18ce6
Fix path 2025-01-20 12:27:47 +01:00
8057a2fa22
convert config to use newer docker_compose class 2025-01-20 12:27:44 +01:00
Lars Delhage
09deca0172
Remove recursive for mysql db dir 2025-01-20 11:57:20 +01:00
0e154b0e53
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-20 11:57:17 +01:00
9ee8f6ea5d
Set expiration for multinode 2025-01-20 11:57:13 +01:00
7a977b79a2
Add full_backup_retention 2025-01-20 11:57:08 +01:00
Lars Delhage
2625269aba
Remove recursive for mysql db dir 2025-01-20 11:53:25 +01:00
19015d9061
Only run this on monitor hosts, as it takes a long time 2025-01-20 09:01:00 +01:00
9ac6531700
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-20 09:00:59 +01:00
28a94fca51
Set expiration for multinode 2025-01-20 09:00:58 +01:00
20709f5dba
Add full_backup_retention 2025-01-20 09:00:56 +01:00
38cd097f71
Only run this on monitor hosts, as it takes a long time 2025-01-20 09:00:14 +01:00
8027abaa82
Run as root user 2025-01-20 08:50:47 +01:00
1868c2a0b7
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-20 08:50:46 +01:00
0a415b5129
Set expiration for multinode 2025-01-20 08:50:46 +01:00
250396dbb4
Add full_backup_retention 2025-01-20 08:50:43 +01:00
ab2d70303f
Run as root user 2025-01-20 08:45:55 +01:00
2ba22cd30b
SATOSA: Always have alwayshttps 2025-01-16 15:17:56 +01:00
ec95b0754f
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-16 15:17:55 +01:00
61daa44afa
Set expiration for multinode 2025-01-16 15:17:54 +01:00
9c1e10ff4c
Add full_backup_retention 2025-01-16 15:17:51 +01:00
0154a533ce
SATOSA: Always have alwayshttps 2025-01-16 15:16:33 +01:00
ca8f0d5b89
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-16 13:17:18 +01:00
63b780028f
Multinode: Double book keeping
Unfortunatly we must do the same calcultions for multinode
2025-01-16 13:14:37 +01:00
7e0f744e5d
Set expiration for multinode 2025-01-16 12:48:05 +01:00
1d5d61b4a7
Add full_backup_retention 2025-01-16 12:48:04 +01:00
31f4c1eb9b
Set expiration for multinode 2025-01-16 12:47:16 +01:00
b6c0bb2f46
Merge branch 'testing' 2025-01-16 12:41:18 +01:00
9e6e33e137
Add full_backup_retention 2025-01-16 12:41:12 +01:00
977bd84f50
duplicate 2025-01-16 10:23:18 +01:00
c7de56f73f
one more 2025-01-16 10:23:17 +01:00
2abf9ecf27 Merge pull request 'added ability to set trashbin_retention_obligation and versions_retention_obligation in config.php' (#2) from richir-nextcloud-retention into testing
Reviewed-on: #2
2025-01-16 09:22:30 +00:00
501ca8edfb
added ability to set trashbin_retention_obligation and versions_retention_obligation in config.php 2025-01-16 10:13:59 +01:00
fff058613a
Merge branch 'main' into testing 2025-01-14 16:30:21 +01:00
369492be90
changed nft rules for proxysql - ipv6 2025-01-14 16:30:00 +01:00
856bc3b2c5
Merge branch 'main' into testing 2025-01-14 16:10:39 +01:00
ebdf6f3b46
changed nft rules for proxysql 2025-01-14 16:07:18 +01:00
53e9a65b9f
duplicate 2025-01-14 13:00:59 +01:00
0400e89f36
one more 2025-01-14 12:58:16 +01:00
571535f569
Fix notify 2025-01-14 12:53:43 +01:00
09ee93515c
Fix notify 2025-01-14 12:53:25 +01:00
9f721c38e8
Merge branch 'testing' 2025-01-14 12:49:44 +01:00
d4fae26777
Can not redeclare class 2025-01-14 12:49:18 +01:00
c4f95af173
add back image 2025-01-14 12:43:52 +01:00
e0c4ddcd6e
Fix path 2025-01-14 12:36:12 +01:00
24a5abbefe Merge pull request 'convert config to use newer docker_compose class' (#1) from richir-move-satosa-to-docker-compose into main
Reviewed-on: #1
2025-01-14 11:29:04 +00:00
65a236dcaa
convert config to use newer docker_compose class 2025-01-14 12:26:50 +01:00
6cfa2b3da7 Merge branch 'main' into stable 2024-12-24 16:10:24 +01:00
29aecdfe2a Fix redis name 2024-12-24 16:04:42 +01:00
ee6887bfeb Fix redis name 2024-12-24 16:00:34 +01:00
5a04dcdd0f Add back proxysql networ 2024-12-24 16:00:20 +01:00
8927f194d9 Proxysql no longer has external network 2024-12-24 15:59:03 +01:00
04e4592f4c Revert "Use new uptime check"
This reverts commit 60fc3ef307.
2024-12-24 15:59:03 +01:00
45989b8d3d Use new uptime check 2024-12-24 15:59:03 +01:00
2c58c02864 Merge branch 'stable' 2024-12-24 15:58:31 +01:00
c0c964282b Fix redis name 2024-12-24 15:56:32 +01:00
8f7cd413ff Add netamsk 2024-12-24 11:10:38 +01:00
536a4a11d3 Use other format 2024-12-24 11:05:42 +01:00
2883693967 Add back proxysql networ 2024-12-24 11:05:42 +01:00
86a5d1d307 Proxysql no longer has external network 2024-12-24 11:04:48 +01:00
60c776088e Revert "Use new uptime check"
This reverts commit 60fc3ef307.
2024-12-24 11:04:48 +01:00
af323bdfd9 Use new uptime check 2024-12-24 11:04:48 +01:00
c2db1c8671 Use other format 2024-12-24 11:03:46 +01:00
d9b4ff9f34 Add back proxysql networ 2024-12-24 10:41:08 +01:00
a942487f30 Add new option 2024-12-24 10:41:08 +01:00
67b72f387a Revert "Use new uptime check"
This reverts commit 60fc3ef307.
2024-12-24 10:40:25 +01:00
7c1456702f Use new uptime check 2024-12-24 10:40:25 +01:00
d62724d63d Add back proxysql networ 2024-12-24 10:39:15 +01:00
5c89469f3a Add new option 2024-12-24 10:36:42 +01:00
9ef9bab2cd Allow on all interfaces and on ipv6 2024-12-24 10:12:24 +01:00
32339a3fbf Revert "Use new uptime check"
This reverts commit 60fc3ef307.
2024-12-24 10:12:24 +01:00
601ff83b9f Use new uptime check 2024-12-24 10:12:24 +01:00
67b46d3f75 Allow on all interfaces and on ipv6 2024-12-24 10:11:12 +01:00
11ab90e981 Open ports 2024-12-24 09:53:15 +01:00
0f746f67ce Revert "Use new uptime check"
This reverts commit 60fc3ef307.
2024-12-24 09:53:15 +01:00
b09c79cc3d Use new uptime check 2024-12-24 09:53:15 +01:00
4bc3a128ac Open ports 2024-12-24 09:51:57 +01:00
e31f4c8c61 Proxysql no longer has external network 2024-12-24 09:43:04 +01:00
19473d6d82 Revert "Use new uptime check"
This reverts commit 60fc3ef307.
2024-12-24 09:43:04 +01:00
4561e417c6 Use new uptime check 2024-12-24 09:43:04 +01:00
64d8ad253d Proxysql no longer has external network 2024-12-24 09:42:19 +01:00
b100e18eea Proxysql no longer has external network 2024-12-24 09:37:46 +01:00
80e1034695 Revert "Use new uptime check"
This reverts commit 60fc3ef307.
2024-12-24 09:37:46 +01:00
ab039d217b Use new uptime check 2024-12-24 09:37:46 +01:00
75cbaed902 Proxysql no longer has external network 2024-12-24 09:35:53 +01:00
39285f075f
Remove gss 2024-12-12 14:59:16 +01:00
ba792bd851
Revert "Use new uptime check"
This reverts commit 60fc3ef307.
2024-12-12 14:59:15 +01:00
1c43f32440
Use new uptime check 2024-12-12 14:59:14 +01:00
7f39e96bf9
Remove gss 2024-12-12 14:58:23 +01:00
3e5a76a37d
Revert "Use new uptime check"
This reverts commit 60fc3ef307.
2024-12-12 10:45:23 +01:00
a4b4f4c013
Use new uptime check 2024-12-12 10:45:22 +01:00
c3ce7089ad
Remove ref to gss 2024-12-12 10:35:17 +01:00
6cff186121
Revert "Use new uptime check"
This reverts commit 60fc3ef307.
2024-12-12 10:35:16 +01:00
8e2ba68b80
Use new uptime check 2024-12-12 10:35:15 +01:00
d648c987c7
Remove ref to gss 2024-12-12 10:32:01 +01:00
49c3a69282
Allow backups from root 2024-12-11 16:24:17 +01:00
9a58486d11
Set mariadb version 2024-12-11 16:24:16 +01:00
596f05f099
Run same version as db cluster 2024-12-11 16:24:15 +01:00
849bd7f846
Revert "Use new uptime check"
This reverts commit 60fc3ef307.
2024-12-11 16:24:14 +01:00
c3fbf4c1ce
Use new uptime check 2024-12-11 16:24:13 +01:00
62d172b2f5
Allow backups from root 2024-12-11 16:22:09 +01:00
2c61a00ebd
Set mariadb version 2024-12-11 16:11:12 +01:00
a98fe62a43
Run same version as db cluster 2024-12-11 16:08:04 +01:00
8e5e7fe34f
Revert "Use new uptime check"
This reverts commit 60fc3ef307.
2024-12-11 09:38:52 +01:00
60fc3ef307
Use new uptime check 2024-12-11 09:28:08 +01:00
43081e0955
Merge branch 'main' into stable 2024-11-17 14:35:41 +01:00
99f768708a
Install kopia 2024-11-17 13:57:57 +01:00
30952f7448
Newline 2024-11-13 09:34:57 +01:00
0e83c47dd1
Add container for multinode purposes 2024-11-12 16:45:24 +01:00
b769f1637a
Add container for multinode purposes 2024-11-12 16:44:22 +01:00
25d97470da
Merge branch 'main' into stable 2024-11-12 16:05:20 +01:00
ce745ff6b0
Fix port and add more output 2024-11-12 16:01:43 +01:00
1aae63ba07
Clean up 2024-11-12 15:51:54 +01:00
5913537c34
Remove newlines and such 2024-11-12 15:44:15 +01:00
932d91364e
Script to remount userbuckets as project buckets 2024-11-12 15:33:35 +01:00
812a56cf99
check_replication: Add user to docker exec to be able to check rootless builds. 2024-11-04 10:58:52 +01:00
d5e88dd431
Dummy edit 2024-10-24 12:29:31 +02:00
f27a9c353c
Merge branch 'testing' 2024-10-24 12:19:26 +02:00
bff0aff823
remove admin pass from env 2024-10-24 12:19:05 +02:00
a8060b7774
Decoupling the admin_password from proxysql 2024-10-24 12:18:05 +02:00
dac082f4f5
Fix ntp 2024-10-24 12:18:04 +02:00
48f8f6d690
enable sysstat timer on debian 2024-10-24 12:18:03 +02:00
deabba2e22
Oops 2024-10-24 12:18:03 +02:00
22bfeaf244
Set permissions on redis 2024-10-24 12:18:02 +02:00
d6b4098932
Fix mode on log files 2024-10-24 12:18:01 +02:00
42363a9aaf
Rollback fix 2024-10-24 12:18:00 +02:00
4dd95bdb39
We want full compat 2024-10-24 12:17:59 +02:00
714e2d98f6
Add mysql command to host 2024-10-24 12:17:58 +02:00
64e3746628
Add fix to init script from mandersson 2024-10-24 12:17:57 +02:00
a563e16309
Move nrpe class from opsrepo 2024-10-24 12:17:56 +02:00
124203cd92
Add audit log to other logs 2024-10-24 12:17:55 +02:00
6365549250
Move hee from cosmos-sites.pp 2024-10-24 12:17:54 +02:00
9a367b0955
Decoupling the admin_password from proxysql 2024-10-24 12:15:08 +02:00
a5580567af
Fix ntp 2024-10-22 10:57:31 +02:00
a69ae092d1
enable sysstat timer on debian 2024-10-22 10:57:30 +02:00
df6557a91b
Oops 2024-10-22 10:57:30 +02:00
4dcdf87f32
Set permissions on redis 2024-10-22 10:57:29 +02:00
2258108232
Fix mode on log files 2024-10-22 10:57:28 +02:00
38825adce8
Rollback fix 2024-10-22 10:57:27 +02:00
3baf31c7ed
We want full compat 2024-10-22 10:57:26 +02:00
221c89564a
Add mysql command to host 2024-10-22 10:57:25 +02:00
6d3bca981d
Add fix to init script from mandersson 2024-10-22 10:57:24 +02:00
2e171b1d04
Add audit log to other logs 2024-10-22 10:57:23 +02:00
22785da3e0
Fix mode on log files 2024-10-22 10:57:22 +02:00
67d9e825a5
Rollback fix 2024-10-22 10:57:22 +02:00
b6bd77a52b
We want full compat 2024-10-22 10:57:21 +02:00
38c56a07cd
Add mysql command to host 2024-10-22 10:57:20 +02:00
fa5a34aeba
Add fix to init script from mandersson 2024-10-22 10:57:19 +02:00
76700f595d
Rollback fix 2024-10-22 10:57:18 +02:00
bb8f2d8b67
We want full compat 2024-10-22 10:57:17 +02:00
2f47e6e109
Add mysql command to host 2024-10-22 10:57:16 +02:00
a2ee732d5d
We want full compat 2024-10-22 10:57:15 +02:00
453e72a79d
Add mysql command to host 2024-10-22 10:57:15 +02:00
662d9592fa
format 2024-10-22 10:57:14 +02:00
1a223f2846
Add fix to init script from mandersson 2024-10-22 10:57:13 +02:00
25309ea51f
Add audit log to other logs 2024-10-22 10:57:12 +02:00
72d71a69c7
Create parent dir 2024-10-22 10:57:11 +02:00
28b6584a6c
Dump customer datastructure for statistics logic to be transferred to script1. 2024-10-22 10:57:10 +02:00
d597ec106a
Gen users on department lists by a cronjob. 2024-10-22 10:57:09 +02:00
d4eb4af079
genusersondepartmentlists: Make status count 2024-10-22 10:57:08 +02:00
4e45dd1733
genusersondepartmentlists: Add som logic for error handling 2024-10-22 10:57:07 +02:00
2c7acd1ec6
Lookup statistics secret on multinode-db1 in env test. 2024-10-22 10:57:07 +02:00
992f74d3a1
multinode-db1: Add initial upload logic of deprtment filtered user lists. 2024-10-22 10:57:06 +02:00
2f682f8bd8
multinode-db1: Add logic and template to create userlists by department. 2024-10-22 10:57:05 +02:00
9ee7f6f7b8
puppet-lint --fix 2024-10-22 10:57:04 +02:00
bf3ed2e92f
Fix mode on log files 2024-10-22 10:57:03 +02:00
6d2d863076
Rollback fix 2024-10-22 10:57:02 +02:00
8391219068
We want full compat 2024-10-22 10:57:01 +02:00
f36659353d
Add mysql command to host 2024-10-22 10:57:00 +02:00
2b9e199899
format 2024-10-22 10:56:59 +02:00
71bca06a08
Add fix to init script from mandersson 2024-10-22 10:56:59 +02:00
fb58999b22
Add audit log to other logs 2024-10-22 10:56:58 +02:00
91b9955db2
Move hee from cosmos-sites.pp 2024-10-22 10:56:57 +02:00
1d3970c134
Create parent dir 2024-10-22 10:56:56 +02:00
1086053d50
multinode-db1: Add script to list users by department domain in email. 2024-10-22 10:56:55 +02:00
57b2ca6316
multinode-db1 test: Add rclone and statistics remote 2024-10-22 10:56:54 +02:00
d3d0807d43
Fix ntp 2024-10-22 10:55:45 +02:00
cb13190197
enable sysstat timer on debian 2024-10-22 10:55:44 +02:00
671b5f95df
Oops 2024-10-22 10:55:43 +02:00
dc8ff6a712
Set permissions on redis 2024-10-22 10:55:43 +02:00
124c2f6c5d
Fix mode on log files 2024-10-22 10:55:42 +02:00
142fc78f6b
Rollback fix 2024-10-22 10:55:41 +02:00
4b4d0626cf
We want full compat 2024-10-22 10:55:40 +02:00
e3a729e1a8
Add mysql command to host 2024-10-22 10:55:39 +02:00
8cae1a668a
Add fix to init script from mandersson 2024-10-22 10:55:38 +02:00
7177dfa12e
Add audit log to other logs 2024-10-22 10:55:37 +02:00
76640002c0
Move hee from cosmos-sites.pp 2024-10-22 10:55:36 +02:00
b8a16bd700
Fix ntp 2024-10-22 10:54:57 +02:00
90372ddd01
enable sysstat timer on debian 2024-10-22 10:46:45 +02:00
5487bb9d9c
Oops 2024-10-22 10:46:44 +02:00
d27990988a
Set permissions on redis 2024-10-22 10:46:44 +02:00
12d07a5ab0
Fix mode on log files 2024-10-22 10:46:43 +02:00
3eb87725af
Rollback fix 2024-10-22 10:46:42 +02:00
5c228af37a
We want full compat 2024-10-22 10:46:41 +02:00
a507c0f43f
Add mysql command to host 2024-10-22 10:46:40 +02:00
e8e8a9ef7d
Add fix to init script from mandersson 2024-10-22 10:46:39 +02:00
6a5e2aed62
Add audit log to other logs 2024-10-22 10:46:38 +02:00
374afd5e5a
Fix mode on log files 2024-10-22 10:46:37 +02:00
eecb8a9e34
Rollback fix 2024-10-22 10:46:36 +02:00
1e3d79542c
We want full compat 2024-10-22 10:46:36 +02:00
37d8165cc5
Add mysql command to host 2024-10-22 10:46:35 +02:00
73d96f7f6d
Add fix to init script from mandersson 2024-10-22 10:46:34 +02:00
36131d0b21
Rollback fix 2024-10-22 10:46:33 +02:00
9a419cd214
We want full compat 2024-10-22 10:46:32 +02:00
86fea06b02
Add mysql command to host 2024-10-22 10:46:31 +02:00
b6bfa49bd7
We want full compat 2024-10-22 10:46:30 +02:00
16b645b845
Add mysql command to host 2024-10-22 10:46:29 +02:00
92cd7dc19f
format 2024-10-22 10:46:28 +02:00
5dc6a9e574
Add fix to init script from mandersson 2024-10-22 10:46:28 +02:00
959e0e6228
Add audit log to other logs 2024-10-22 10:46:27 +02:00
e3a511521b
Create parent dir 2024-10-22 10:46:26 +02:00
c28c6ac91b
Dump customer datastructure for statistics logic to be transferred to script1. 2024-10-22 10:46:25 +02:00
4fee7b1042
Gen users on department lists by a cronjob. 2024-10-22 10:46:24 +02:00
cb2a62349d
genusersondepartmentlists: Make status count 2024-10-22 10:46:23 +02:00
06c4f4ec68
genusersondepartmentlists: Add som logic for error handling 2024-10-22 10:46:22 +02:00
da0e74313d
Lookup statistics secret on multinode-db1 in env test. 2024-10-22 10:46:21 +02:00
aea23bbf86
multinode-db1: Add initial upload logic of deprtment filtered user lists. 2024-10-22 10:46:21 +02:00
3abec90157
multinode-db1: Add logic and template to create userlists by department. 2024-10-22 10:46:20 +02:00
57958e6475
puppet-lint --fix 2024-10-22 10:46:19 +02:00
4504de3160
Fix mode on log files 2024-10-22 10:46:18 +02:00
9de3ab2c60
Rollback fix 2024-10-22 10:46:17 +02:00
e8c5a9e8ac
We want full compat 2024-10-22 10:46:16 +02:00
4ec58c3b61
Add mysql command to host 2024-10-22 10:46:15 +02:00
fdb57e1b99
format 2024-10-22 10:46:14 +02:00
9fb353de79
Add fix to init script from mandersson 2024-10-22 10:46:13 +02:00
1cc0e86664
Add audit log to other logs 2024-10-22 10:46:13 +02:00
ce6774487a
Create parent dir 2024-10-22 10:46:12 +02:00
2bed7d8914
multinode-db1: Add script to list users by department domain in email. 2024-10-22 10:46:11 +02:00
7222cf5703
multinode-db1 test: Add rclone and statistics remote 2024-10-22 10:46:10 +02:00
a64b2533bb
enable sysstat timer on debian 2024-10-22 10:32:04 +02:00
6865d6833f
Oops 2024-10-22 10:32:03 +02:00
958ff7daa9
Set permissions on redis 2024-10-22 10:32:03 +02:00
c649a7a6b4
Fix mode on log files 2024-10-22 10:32:02 +02:00
ac9e29d39e
Rollback fix 2024-10-22 10:32:01 +02:00
56f8a5d149
We want full compat 2024-10-22 10:32:00 +02:00
cb35c3bbf7
Add mysql command to host 2024-10-22 10:31:59 +02:00
b280257882
Add fix to init script from mandersson 2024-10-22 10:31:58 +02:00
d3cb9b9e14
Add audit log to other logs 2024-10-22 10:31:57 +02:00
39384f7e40
enable sysstat timer on debian 2024-10-22 10:30:59 +02:00
6969510b5d
Oops 2024-10-21 16:29:12 +02:00
170d1cfc77
Set permissions on redis 2024-10-21 16:29:11 +02:00
7be06a1215
Fix mode on log files 2024-10-21 16:29:10 +02:00
bff3c3620b
Rollback fix 2024-10-21 16:29:09 +02:00
880e0401cc
We want full compat 2024-10-21 16:29:08 +02:00
04061b41c1
Add mysql command to host 2024-10-21 16:29:07 +02:00
5f425cf2bf
Add fix to init script from mandersson 2024-10-21 16:29:06 +02:00
7f7743b538
Add audit log to other logs 2024-10-21 16:29:05 +02:00
8510d4e958
Fix mode on log files 2024-10-21 16:29:05 +02:00
7dad764539
Rollback fix 2024-10-21 16:29:04 +02:00
cebb4929a9
We want full compat 2024-10-21 16:29:03 +02:00
f95fc106c9
Add mysql command to host 2024-10-21 16:29:02 +02:00
64e5e61879
Add fix to init script from mandersson 2024-10-21 16:29:01 +02:00
9a37885da6
Rollback fix 2024-10-21 16:29:00 +02:00
ed08626dc8
We want full compat 2024-10-21 16:28:59 +02:00
f0cb47dd97
Add mysql command to host 2024-10-21 16:28:58 +02:00
29acc076b3
We want full compat 2024-10-21 16:28:58 +02:00
d57135d906
Add mysql command to host 2024-10-21 16:28:57 +02:00
1d6e08b852
format 2024-10-21 16:28:56 +02:00
978d073bde
Add fix to init script from mandersson 2024-10-21 16:28:55 +02:00
8e2f1d032d
Add audit log to other logs 2024-10-21 16:28:54 +02:00
66093b13b8
Create parent dir 2024-10-21 16:28:53 +02:00
1438c94ca5
Dump customer datastructure for statistics logic to be transferred to script1. 2024-10-21 16:28:52 +02:00
5c3fa64086
Gen users on department lists by a cronjob. 2024-10-21 16:28:51 +02:00
2fb9e754cc
genusersondepartmentlists: Make status count 2024-10-21 16:28:50 +02:00
f03e6241bf
genusersondepartmentlists: Add som logic for error handling 2024-10-21 16:28:49 +02:00
3c7fe38458
Lookup statistics secret on multinode-db1 in env test. 2024-10-21 16:28:49 +02:00
6853993a34
multinode-db1: Add initial upload logic of deprtment filtered user lists. 2024-10-21 16:28:48 +02:00
1ab9fbc14e
multinode-db1: Add logic and template to create userlists by department. 2024-10-21 16:28:47 +02:00
d680ed8597
puppet-lint --fix 2024-10-21 16:28:46 +02:00
96734c8736
Fix mode on log files 2024-10-21 16:28:45 +02:00
f948bfb4ae
Rollback fix 2024-10-21 16:28:44 +02:00
b7a17f7520
We want full compat 2024-10-21 16:28:43 +02:00
ecc158b48e
Add mysql command to host 2024-10-21 16:28:42 +02:00
6445c0434e
format 2024-10-21 16:28:41 +02:00
330f167a2c
Add fix to init script from mandersson 2024-10-21 16:28:41 +02:00
a0df922493
Add audit log to other logs 2024-10-21 16:28:40 +02:00
9648aa184c
Create parent dir 2024-10-21 16:28:39 +02:00
1cede4b87c
multinode-db1: Add script to list users by department domain in email. 2024-10-21 16:28:38 +02:00
438e2c24e0
multinode-db1 test: Add rclone and statistics remote 2024-10-21 16:28:37 +02:00
46d1db97bc
Indent 2024-10-21 16:27:30 +02:00
ad0b7abff9
Oops 2024-10-21 16:27:30 +02:00
88c8e159bc
Set permissions on redis 2024-10-21 16:27:29 +02:00
3818772b87
Fix mode on log files 2024-10-21 16:27:28 +02:00
f79d7acb02
Rollback fix 2024-10-21 16:27:27 +02:00
1eaa62a150
We want full compat 2024-10-21 16:27:26 +02:00
a4d0294c10
Add mysql command to host 2024-10-21 16:27:25 +02:00
b24c8ec683
Add fix to init script from mandersson 2024-10-21 16:27:24 +02:00
90569f54d3
Add audit log to other logs 2024-10-21 16:27:23 +02:00
2aba03fe41
Fix mode on log files 2024-10-21 16:27:22 +02:00
52f563692d
Rollback fix 2024-10-21 16:27:22 +02:00
f2abb855ba
We want full compat 2024-10-21 16:27:21 +02:00
4b3e9c0da1
Add mysql command to host 2024-10-21 16:27:20 +02:00
8e982592c2
Add fix to init script from mandersson 2024-10-21 16:27:19 +02:00
2b543373ed
Rollback fix 2024-10-21 16:27:18 +02:00
a089d02cc4
We want full compat 2024-10-21 16:27:17 +02:00
a6d680aaed
Add mysql command to host 2024-10-21 16:27:16 +02:00
af2385dffe
We want full compat 2024-10-21 16:27:15 +02:00
6aa7238ee2
Add mysql command to host 2024-10-21 16:27:15 +02:00
f06aa65ec5
format 2024-10-21 16:27:14 +02:00
f973b83d1b
Add fix to init script from mandersson 2024-10-21 16:27:13 +02:00
a8f2bb9bb6
Add audit log to other logs 2024-10-21 16:27:12 +02:00
751ad087e6
Create parent dir 2024-10-21 16:27:11 +02:00
f886d53f3c
Dump customer datastructure for statistics logic to be transferred to script1. 2024-10-21 16:27:10 +02:00
a51cc9fdc4
Gen users on department lists by a cronjob. 2024-10-21 16:27:09 +02:00
a329a46491
genusersondepartmentlists: Make status count 2024-10-21 16:27:08 +02:00
c9e7d8eb2c
genusersondepartmentlists: Add som logic for error handling 2024-10-21 16:27:07 +02:00
ac85bddc8a
Lookup statistics secret on multinode-db1 in env test. 2024-10-21 16:27:06 +02:00
49f00af783
multinode-db1: Add initial upload logic of deprtment filtered user lists. 2024-10-21 16:27:06 +02:00
d471c7f271
multinode-db1: Add logic and template to create userlists by department. 2024-10-21 16:27:05 +02:00
9b2490e415
puppet-lint --fix 2024-10-21 16:27:04 +02:00
58af85571b
Fix mode on log files 2024-10-21 16:27:03 +02:00
6664271b49
Rollback fix 2024-10-21 16:27:02 +02:00
dc16a41e0f
We want full compat 2024-10-21 16:27:01 +02:00
b1ae0c0465
Add mysql command to host 2024-10-21 16:27:00 +02:00
862f94b4eb
format 2024-10-21 16:26:59 +02:00
1c9365e121
Add fix to init script from mandersson 2024-10-21 16:26:59 +02:00
1ff784e5e4
Create parent dir 2024-10-21 16:26:58 +02:00
3304d408a5
multinode-db1: Add script to list users by department domain in email. 2024-10-21 16:26:57 +02:00
38447ceb37
multinode-db1 test: Add rclone and statistics remote 2024-10-21 16:26:56 +02:00
e0f6d3bafa
Oops 2024-10-21 16:20:25 +02:00
f531a60514
Fix mode on log files 2024-10-21 16:20:24 +02:00
8646f1eda7
Rollback fix 2024-10-21 16:20:23 +02:00
f5f97e1c3a
We want full compat 2024-10-21 16:20:22 +02:00
81341be6e7
Add mysql command to host 2024-10-21 16:20:21 +02:00
51ddfa985c
Add fix to init script from mandersson 2024-10-21 16:20:20 +02:00
158f982aed
Add audit log to other logs 2024-10-21 16:20:19 +02:00
a7fe6cecc7
Oops 2024-10-21 16:19:04 +02:00
45ac19bec8
Fix mode on log files 2024-10-21 16:16:29 +02:00
a74899332e
Rollback fix 2024-10-21 16:16:28 +02:00
d63c586998
We want full compat 2024-10-21 16:16:27 +02:00
beb69036a1
Add mysql command to host 2024-10-21 16:16:26 +02:00
74af996a76
Add fix to init script from mandersson 2024-10-21 16:16:26 +02:00
e76c22f6d5
Add audit log to other logs 2024-10-21 16:16:25 +02:00
b065ac7ae6
Set permissions on redis 2024-10-21 16:15:54 +02:00
019a3d9792
puppet-lint --fix 2024-10-21 16:15:53 +02:00
6224fe3e96
Fix mode on log files 2024-10-21 16:15:52 +02:00
cc2a9e9ebf
Rollback fix 2024-10-21 16:15:51 +02:00
daa647beb9
We want full compat 2024-10-21 16:15:50 +02:00
1ecf6c8128
Add mysql command to host 2024-10-21 16:15:49 +02:00
4111ac7500
Add fix to init script from mandersson 2024-10-21 16:15:49 +02:00
8b0d90e4eb
Add audit log to other logs 2024-10-21 16:15:48 +02:00
e3a66f95df
Set permissions on redis 2024-10-21 16:13:51 +02:00
b096522c36 Merge branch 'testing' 2024-10-09 14:27:29 +02:00
d7e7e260c2 puppet-lint --fix 2024-10-09 14:26:06 +02:00
52869ca089 Fix mode on log files 2024-10-09 14:26:06 +02:00
1a39e0dbec Rollback fix 2024-10-09 14:26:06 +02:00
e0c06d4571 We want full compat 2024-10-09 14:26:06 +02:00
f29e074ded Add mysql command to host 2024-10-09 14:26:06 +02:00
c761c56052 Add fix to init script from mandersson 2024-10-09 14:26:06 +02:00
026c5c5d03 Add audit log to other logs 2024-10-09 14:26:06 +02:00
7460019ec5 puppet-lint --fix 2024-10-09 14:25:34 +02:00
5c0e05394d Fix mode on log files 2024-10-09 14:24:09 +02:00
f2b8406826 Rollback fix 2024-10-09 14:24:09 +02:00
6d78fdde7c We want full compat 2024-10-09 14:24:09 +02:00
15beb333ec Add mysql command to host 2024-10-09 14:24:09 +02:00
8ca506f608 Add fix to init script from mandersson 2024-10-09 14:24:09 +02:00
00d4c96d28 Fix mode on log files 2024-10-09 14:23:08 +02:00
3e98d5f477 Rollback fix 2024-10-07 12:23:08 +02:00
bcbcb6e469 We want full compat 2024-10-07 12:23:08 +02:00
4b13d54f23 Add mysql command to host 2024-10-07 12:23:08 +02:00
60d37feb61 format 2024-10-07 12:23:08 +02:00
301d451c3a Create parent dir 2024-10-07 12:23:08 +02:00
bbee5643d6 multinode-db1: Add script to list users by department domain in email. 2024-10-07 12:23:08 +02:00
01b3c4cf56 multinode-db1 test: Add rclone and statistics remote 2024-10-07 12:23:08 +02:00
d3e0f41ff1 Rollback fix 2024-10-07 12:22:03 +02:00
8b74bd9c75 We want full compat 2024-10-07 12:22:03 +02:00
5c16a783e9 Add mysql command to host 2024-10-07 12:22:03 +02:00
2b590b3f01 Rollback fix 2024-10-07 12:19:25 +02:00
aade6fb2c7 We want full compat 2024-10-04 10:27:40 +02:00
f2c31c79cd Add mysql command to host 2024-10-04 10:27:40 +02:00
b79eb5d4bf format 2024-10-04 10:27:40 +02:00
606b14db17 multinode-db1: Add script to list users by department domain in email. 2024-10-04 10:27:40 +02:00
b4e496bf58 multinode-db1 test: Add rclone and statistics remote 2024-10-04 10:27:40 +02:00
e4093f5e40 We want full compat 2024-10-04 10:05:20 +02:00
b573820906 We want full compat 2024-10-04 10:04:43 +02:00
141f9334f2 Add mysql command to host 2024-10-04 10:01:17 +02:00
07ddb2220a format 2024-10-04 10:01:17 +02:00
722a8bdb68 Add mysql command to host 2024-10-04 10:00:50 +02:00
d832f2a274 format 2024-10-03 16:14:33 +02:00
0bf3ebfc53 Add fix to init script from mandersson 2024-10-03 15:31:26 +02:00
ccdb53310d Add fix to init script from mandersson 2024-10-03 14:18:52 +02:00
07d54eba92 Add fix to init script from mandersson 2024-10-03 14:18:32 +02:00
62bb2da068 Use all adresses 2024-10-03 14:15:24 +02:00
f5297850d9 Add uptime check taht warns if server has been up for too long 2024-10-03 14:14:05 +02:00
652f2fbdf7 multinode-db1: Add script to list users by department domain in email. 2024-10-03 14:14:05 +02:00
66a81768c8 multinode-db1 test: Add rclone and statistics remote 2024-10-03 14:14:05 +02:00
260fd526fe Use all adresses 2024-10-02 17:39:01 +02:00
ac917b9811 Use all adresses 2024-10-02 17:37:35 +02:00
4ec608adf2
twofactor_enforced: Make template work with nil values 2024-10-01 11:51:27 +02:00
b66e072cf7
twofactor_enforced: Change manifest to work with fullnode 2024-10-01 11:50:56 +02:00
e3697719d2
twofactor_enforced: Make template work with nil values 2024-10-01 11:26:10 +02:00
766c337cb2
twofactor_enforced: Change manifest to work with fullnode 2024-10-01 11:25:25 +02:00
24c2fcbdfc
twofactor_enforced: Make template work with nil values 2024-10-01 11:19:43 +02:00
b7e94c2259
twofactor_enforced: Change manifest to work with fullnode 2024-10-01 10:34:37 +02:00
cf0a778781 Add uptime check taht warns if server has been up for too long 2024-09-24 22:31:26 +02:00
15a752a65d Add uptime check taht warns if server has been up for too long 2024-09-24 22:31:01 +02:00
cd1a4ba19c Node3 sunet-prod reinstalled 2024-09-24 18:11:45 +02:00
2c216513d3 multinode-db1: Add script to list users by department domain in email. 2024-09-24 18:11:45 +02:00
8ba567e385 multinode-db1 test: Add rclone and statistics remote 2024-09-24 18:11:45 +02:00
911edbed58 Node3 sunet-prod reinstalled 2024-09-24 18:11:00 +02:00
b4b099b237 Node3 sunet-prod reinstalled 2024-09-24 18:10:38 +02:00
f8dce117a8 Correct hostgroup name 2024-09-24 17:47:32 +02:00
1a63b4884f Correct hostgroup name 2024-09-24 17:47:18 +02:00
eeca6dfbe9 Correct hostgroup name 2024-09-24 17:46:50 +02:00
66bd2e9bab Move nrpe class from opsrepo 2024-09-24 17:33:20 +02:00
473f702943 multinode-db1: Add script to list users by department domain in email. 2024-09-24 17:33:20 +02:00
d135f415d0 multinode-db1 test: Add rclone and statistics remote 2024-09-24 17:33:20 +02:00
dbac473c26 Move nrpe class from opsrepo 2024-09-24 17:30:21 +02:00
7673aca60e Move nrpe class from opsrepo 2024-09-24 17:29:07 +02:00
fc2079e2fa Add audit log to other logs 2024-09-24 16:46:56 +02:00
f339aa2865 multinode-db1: Add script to list users by department domain in email. 2024-09-24 16:46:56 +02:00
705d3c8732 multinode-db1 test: Add rclone and statistics remote 2024-09-24 16:46:56 +02:00
1822ecd683 Add audit log to other logs 2024-09-24 15:36:43 +02:00
74768985cc Add audit log to other logs 2024-09-24 15:36:06 +02:00
b0ded4d0ef Move hee from cosmos-sites.pp 2024-09-23 17:07:21 +02:00
98a02615cc Install netcat on nodes and backup 2024-09-23 17:07:21 +02:00
d0260d7c05 multinode-db1: Add script to list users by department domain in email. 2024-09-23 17:07:21 +02:00
dd022213a6 multinode-db1 test: Add rclone and statistics remote 2024-09-23 17:07:21 +02:00
e20e41edf8 Move hee from cosmos-sites.pp 2024-09-23 17:05:08 +02:00
663021dc61 Move hee from cosmos-sites.pp 2024-09-23 17:03:40 +02:00
59e137e918 Merge branch 'main' into testing 2024-09-18 12:38:39 +02:00
d7c724570f Install netcat on nodes and backup 2024-09-18 12:38:17 +02:00
ffc5170fc0 Create parent dir 2024-09-12 17:29:20 +02:00
ea09557c1a Don't use incudes like this in newer puppet 2024-09-12 17:29:20 +02:00
9364391ad5 multinode-db1: Add script to list users by department domain in email. 2024-09-12 17:29:20 +02:00
6a5000a557 multinode-db1 test: Add rclone and statistics remote 2024-09-12 17:29:20 +02:00
e4ccdde169 Create parent dir 2024-09-12 17:27:36 +02:00
589f62fc0a Create parent dir 2024-09-12 17:27:16 +02:00
6827c0af7c
Dump customer datastructure for statistics logic to be transferred to script1. 2024-09-09 16:22:37 +02:00
bff8d8ab22 Don't use incudes like this in newer puppet 2024-09-09 16:03:55 +02:00
2152b5b4a9 Don't use incudes like this in newer puppet 2024-09-09 16:03:30 +02:00
75c3a77c6d
Gen users on department lists by a cronjob. 2024-09-03 14:33:46 +02:00
eac5b172f2
genusersondepartmentlists: Make status count 2024-09-03 13:43:43 +02:00
05bde6adb6
genusersondepartmentlists: Add som logic for error handling 2024-09-03 12:43:13 +02:00
d8eaa122e3
Lookup statistics secret on multinode-db1 in env test. 2024-09-03 10:06:05 +02:00
7402202b9a
multinode-db1: Add initial upload logic of deprtment filtered user lists. 2024-08-30 15:39:01 +02:00
60cb53e631
multinode-db1: Add logic and template to create userlists by department. 2024-08-30 08:57:17 +02:00
c134f0771f
multinode-db1: Add script to list users by department domain in email. 2024-08-29 13:46:23 +02:00
8acd0c40b8
multinode-db1 test: Add rclone and statistics remote 2024-08-29 11:02:30 +02:00
2c13b2f51d
Merge branch 'main' into stable 2024-08-26 13:26:03 +02:00
04a3c96250
Multinode2: disable cronjob karin bucket. 2024-08-26 13:03:04 +02:00
3651afd778
Initial script to list multinode customer users filtered on mail domain. 2024-08-23 10:31:27 +02:00
728b152f85 Merge branch 'main' into stable 2024-08-14 14:49:32 +02:00
c8051b065a Merge branch 'main' of git+ssh://platform.sunet.se:22022/Drive/sunetdrive 2024-08-14 14:48:47 +02:00
41dd736b20 we have no naemon user 2024-08-14 14:48:33 +02:00
08be347419 Syntax error 2024-07-01 16:45:49 +02:00
9285c155db Alllow running scan jobs 2024-07-01 15:26:34 +02:00
76fe97382d loglevel 1
https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/logging_configuration.html

This should mean we still have all relevant loginfo we need, such as logins and file operations
2024-06-28 10:58:54 +02:00
12ca587858 loglevel 1
https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/logging_configuration.html

This should mean we still have all relevant loginfo we need, such as logins and file operations
2024-06-28 10:57:13 +02:00
cdb1157172 Merge branch 'main' into stable 2024-06-28 10:06:49 +02:00
39b571f3d1 Support redict 2024-06-28 10:05:56 +02:00
d30dabc534 Merge branch 'main' into stable 2024-06-27 09:34:31 +02:00
d353069660 Make a standard about which params a script gets.
project="${1}"
mirror="${2}"
bucket="${3}"
customer="${4}"
environment="${5}"
2024-06-27 09:31:37 +02:00
d696c19242 Add backup script for hb 2024-06-26 12:51:24 +02:00
eef539928e Switch logic 2024-06-26 12:51:24 +02:00
4cf8cf3adb Add backup script for hb 2024-06-26 12:48:50 +02:00
7f7d38c87e Switch logic 2024-06-26 10:19:53 +02:00
adeaea5a37 Fix bug in script 2024-06-26 10:04:41 +02:00
4a45670bec Fix syntax error 2024-06-26 10:04:41 +02:00
3738e703b6 Clean up logic 2024-06-26 10:04:41 +02:00
bac25d27c8 Fix bug in script 2024-06-26 09:58:18 +02:00
42831948e1 Fix syntax error 2024-06-26 09:40:36 +02:00
db92a5d255 Clean up logic 2024-06-26 09:14:59 +02:00
6e5831367e No ports for mode host 2024-06-25 18:09:54 +02:00
1dea001ed9 No ports for mode host 2024-06-25 18:09:13 +02:00
765e0b41cb Use correect variable 2024-06-25 11:23:52 +02:00
4d6e6f67af Use correect variable 2024-06-25 11:19:53 +02:00
f21a489e46 Merge branch 'main' into stable 2024-06-25 10:25:52 +02:00
b20c4c7397 Modify timeout for scriptherder 2024-06-25 10:15:41 +02:00
16496dfded Fix rclone cert issue 2024-06-19 09:10:35 +02:00
a836417db2 Fix rclone cert issue 2024-06-19 09:09:59 +02:00
a76397aafd Use correct variable 2024-06-19 08:59:05 +02:00
ea51f0b56b Trigger extra backup jobs if they exist 2024-06-12 10:55:11 +02:00
6c8a4f430b See if we can get the jobs out 2024-06-12 09:47:05 +02:00
097fce5f87 Merge branch 'main' into testing 2024-06-12 09:40:36 +02:00
5c46f5bc38 Start work on extra_backup_jobs 2024-06-12 09:39:23 +02:00
bd1519430f
Merge makebucket changes from 'main' into testing 2024-06-11 14:16:48 +02:00
5f47ce7fca
Make listusers.sh saml user aware 2024-06-11 14:12:38 +02:00
45321052de Merge branch 'main' into testing 2024-06-05 11:17:32 +02:00
717350780f Typo 2024-06-05 11:17:12 +02:00
a03d4437e2 Merge branch 'main' into stable 2024-05-24 13:31:19 +02:00
35362991d0 Merge branch 'main' into testing 2024-05-24 13:30:50 +02:00
a4d0afe9fd Allow mail servers to connect 2024-05-24 13:30:37 +02:00
c90a6ac9a1 Merge branch 'main' into stable 2024-05-21 19:52:55 +02:00
3ad7d95161 Merge branch 'main' into testing 2024-05-21 19:46:36 +02:00
450f31e2d5 Remove drive resolvers 2024-05-21 19:46:20 +02:00
840d2000a6 Merge branch 'main' into stable 2024-05-21 17:48:21 +02:00
00a2f1f425 Merge branch 'main' into testing 2024-05-21 17:47:56 +02:00
b3d9873176 Switch to boolean 2024-05-21 17:47:42 +02:00
bf5abc073e
Change hostnet to bool 2024-05-21 17:29:59 +02:00
aa2a869203
Move gss prod to redis cluster 2024-05-21 17:14:03 +02:00
5b7c702573 Merge branch 'main' into testing 2024-05-21 12:54:28 +02:00
cbf4086993 Goes together with switch to host networking 2024-05-21 12:54:06 +02:00
5f74eaaab0 Merge branch 'main' into testing 2024-05-20 13:10:54 +02:00
05814cfd24 proxysql network does not exist 2024-05-20 13:10:20 +02:00
4376aad6e2 Merge branch 'main' into testing 2024-05-20 11:47:10 +02:00
c485935f30 Try ensure file 2024-05-20 11:46:58 +02:00
6299dad4fd Update config for lookup server 2024-05-20 11:10:52 +02:00
718bc0b496 Merge branch 'main' into testing 2024-05-20 10:44:56 +02:00
472977fde4 Update config for lookup server 2024-05-20 10:44:37 +02:00
eba9546cdc
Merge branch 'main' into testing 2024-05-15 07:57:11 +02:00
a16c9d68e8
Disable MultipartCopy with user bucket and restore scripts and primary bucket. 2024-05-08 13:31:22 +02:00
2f548f2431 Merge branch 'mandersson-nmhost' into testing 2024-05-07 13:10:19 +02:00
47bf118116 Merge branch 'main' into mandersson-nmhost 2024-05-07 13:09:29 +02:00
72cf4e570c Merge branch 'main' into testing 2024-05-07 10:54:07 +02:00
51a695ed9e Don't fail on problems with downtime for db either 2024-05-07 10:53:44 +02:00
02454ebd39 Merge branch 'main' into testing 2024-05-07 10:51:12 +02:00
cb16f03710 Don't fail on problems with downtime 2024-05-07 10:50:55 +02:00
5ba11977b0 Merge branch 'main' into testing 2024-04-29 16:39:41 +02:00
27d0bc15cc Move config def up 2024-04-29 16:39:23 +02:00
01fc5a9e86 Merge branch 'main' into testing 2024-04-29 16:35:35 +02:00
bd074c73d5 Fix syntax 2024-04-29 16:34:38 +02:00
d33f80273d Fix syntax 2024-04-29 16:30:18 +02:00
4a8a4d642c Fix syntax 2024-04-29 16:29:50 +02:00
df4fe9985b Merge branch 'main' into testing 2024-04-29 16:25:47 +02:00
d13b22e3a2 Make it possible to configure s3 bucket 2024-04-29 16:25:22 +02:00
c774d67745
App and proxysql: Fixes for network_type: host 2024-04-24 21:32:25 +02:00
d5c6b9a5fd
proxysql: Do not create docker network with network_mode: host 2024-04-24 20:52:26 +02:00
8d2c481569
Frontend node: Use netork mode host with dockerhost2 2024-04-24 20:44:41 +02:00
36f163414f Merge branch 'main' into testing 2024-04-22 12:48:45 +02:00
b5b3885c30 Allways make config editable for occ 2024-04-22 12:05:07 +02:00
77ad3ac26d Merge branch 'main' into testing 2024-04-22 11:09:05 +02:00
e72c42924a Disallow external files to mount local storage 2024-04-22 11:08:41 +02:00
069eb7ce93
Merge branch 'main' into testing 2024-04-17 17:42:40 +02:00
d07ed76d36
config.php: Increase davstorage request timeout to support bigger files. 2024-04-17 16:38:09 +02:00
f626156c4c Add jupyter site 2024-03-18 15:17:57 +01:00
b4ce0046de Add jupyter site 2024-03-18 15:17:30 +01:00
314149c7f3
Fix proxysql container name. 2024-03-18 11:10:05 +01:00
65abea7daa
Fix proxysql container name. 2024-03-18 11:08:54 +01:00
181f2e52fe
Hardcode nextcloud container name. 2024-03-18 10:33:00 +01:00
f1cac6ce35
Hardcode nextcloud container name. 2024-03-18 10:30:10 +01:00
49386124c3
Maria Backup: Add fixed container name to mitigate name statndard change docker-compose 2/3 2024-03-18 08:44:29 +01:00
cc6f2c8d7b
Maria Backup: Add fixed container name to mitigate name statndard change docker-compose 2/3 2024-03-18 08:41:11 +01:00
3fce5c6fab
Hardcode name of mariadb container to make name consistent between docker compose 2 and 3. 2024-03-08 11:13:37 +01:00
4f6786363b
Hardcode name of mariadb container to make name consistent between docker compose 2 and 3. 2024-03-08 10:32:02 +01:00
0837f82d87 Merge branch 'main' into testing 2024-03-04 10:58:19 +01:00
10b0f30155 Add support for individual nextcloud versions for multinode 2024-03-04 10:57:42 +01:00
922de25fd8 Merge branch 'main' into testing 2024-03-01 10:13:32 +01:00
1a70154efc Sites moved to i18n 2024-03-01 10:12:22 +01:00
cbb6ce3aa3 Fix typo 2024-02-27 16:35:49 +01:00
7f19e0de62 Fix typo 2024-02-27 16:35:39 +01:00
49243acaa9 Merge branch 'main' into testing 2024-02-27 16:31:45 +01:00
393f2c7034 Open port 80 for acme verification 2024-02-27 16:31:27 +01:00
f10864ff91 Merge branch 'main' into testing 2024-02-27 13:32:46 +01:00
Micke Nordin
f511383a7a Add portal 2024-02-27 11:59:32 +01:00
668c1389d5
Merge branch 'main' into testing 2024-01-08 09:18:10 +01:00
585319b177
HKR ended contract with Sunet Drive 2024-01-08 09:17:40 +01:00
0d505d2482
Add focemfa as forcemfa group to template. 2023-12-17 23:36:11 +01:00
db6bfcc517
Add focemfa as forcemfa group to template. 2023-12-17 23:18:02 +01:00
8aa9c14d35
Multinode: Add support for twofactor_enforced(_excluded)_groups 2023-11-30 11:09:36 +01:00
2cdacfe0f5
Multinode: Add support for twofactor_enforced(_excluded)_groups 2023-11-30 11:05:02 +01:00
Micke Nordin
c425f90b0f Merge branch 'main' into testing 2023-11-22 12:13:09 +01:00
Micke Nordin
7dc9667bec Fix typo 2023-11-22 12:12:57 +01:00
Micke Nordin
3b82d5b0e3 Merge branch 'main' into testing 2023-11-22 11:59:30 +01:00
Micke Nordin
f581835f7c For full node for now 2023-11-22 11:59:15 +01:00
Micke Nordin
31b1bcb92a Merge branch 'main' into testing 2023-11-22 11:53:34 +01:00
Micke Nordin
6265472396 Make it possible to delete announcement with just subject 2023-11-22 11:52:58 +01:00
Micke Nordin
a150d23f1a Merge branch 'main' into testing 2023-11-22 11:04:36 +01:00
Micke Nordin
b40beeb420 Install xmlstarlet 2023-11-22 11:04:20 +01:00
Micke Nordin
300e877b3a Make curl silent 2023-11-22 11:01:52 +01:00
8cf9f0eafa Merge branch 'main' into testing 2023-11-10 12:40:23 +01:00
436ea4f1a5 update template for readability. SD-285 2023-11-10 12:40:00 +01:00
c83e8b2982 Merge branch 'main' into testing 2023-11-07 14:01:27 +01:00
a08f473954 remove subscript of type 2023-11-07 14:01:12 +01:00
2f629c289c Merge branch 'main' into testing 2023-11-01 16:57:18 +01:00
f4eca0b747 Merge in changes from stable 2023-11-01 16:56:45 +01:00
9d5e53c0cc Merge branch 'stable' 2023-11-01 16:55:10 +01:00
ec679013e3 Merge branch 'main' into testing 2023-11-01 16:36:09 +01:00
7aa4a25dbd os.distro.id is the fact name 2023-11-01 16:35:55 +01:00
2ba33ad5d6 Merge branch 'main' into testing 2023-11-01 16:31:58 +01:00
ed65bb0d94 Use other command in debian 2023-11-01 16:31:24 +01:00
689c625b90 Force installation of drive-utils 2023-11-01 15:26:10 +01:00
df731fdb44 Force installation of drive-utils 2023-11-01 15:25:31 +01:00
440bcec0d2 python3.9 -> python3 2023-11-01 15:21:27 +01:00
9e4879d9c8 python3.9 -> python3 2023-11-01 15:20:23 +01:00
3f1ea1b694 Merge branch 'main' into testing 2023-10-31 14:17:22 +01:00
3491a1a5f7 change MDU readme document. SD-285. 2023-10-31 13:49:37 +01:00
76bc7edaed Remove @ from mappingfile in prod and test and update regex to comply with Nextcloud keys 2023-10-24 14:46:37 +01:00
7d25ae0859 Remove @ from mappingfile in prod and update regex to comply with Nextcloud keys 2023-10-24 14:43:45 +01:00
3899dab9b3 Remove @ from mappingfile in test to comply with Nextcloud keys 2023-10-24 10:43:05 +01:00
3cdf7d4084
Revert "4 backslashes in mappingfile-test.json.erb. Does not work."
This reverts commit 14b6535efd.
2023-10-23 15:13:11 +02:00
14b6535efd
Test of adding 4 backslashes in mappingfile-test.json.erb to deal with json handling of backslashes. 2023-10-23 14:49:46 +02:00
0148a727ca Revert mapping file for prod 2023-10-23 12:36:10 +02:00
e06a29a8a1 Revert mapping file for prod 2023-10-23 12:35:24 +02:00
af873e7133 Remove test 2023-10-19 15:01:55 +02:00
65a6b1787d Allow smtp servers 2023-10-19 15:01:55 +02:00
4e496bcb13 Allow imap servers to connect 2023-10-19 15:01:55 +02:00
5c5ed31f13 Remove test 2023-10-19 14:50:06 +02:00
21d9a5e3d0 Remove test 2023-10-19 14:49:46 +02:00
a5f5ddb293 Allow smtp servers 2023-10-18 18:07:33 +02:00
eb0bb65f4a Allow smtp servers 2023-10-18 18:06:11 +02:00
333d712548 Allow imap servers to connect 2023-10-18 09:46:36 +02:00
bdd858f76c Allow imap servers to connect 2023-10-18 09:46:14 +02:00
03fb0a23ac Nextcloud: Implement logs to stdout 2023-10-18 08:43:46 +02:00
81bb47819c Add fuse3 package to script serever.Dependecy to rclone mount. 2023-10-18 08:43:46 +02:00
9e6321e743 backup-all-buckets: Add comment about safeguard 2023-10-18 08:43:46 +02:00
b1ea687f2d backup-all-buckets: Safeguard backup runs if a backup process on that bucket is already in progress. 2023-10-18 08:43:46 +02:00
33d49b7ec0 Change regex to send @.shh.se cases to external node 2023-10-18 08:43:46 +02:00
f988081b39 Improved mappingfile that should work for all cases now 2023-10-18 08:43:22 +02:00
7d755324f8 Typo 2023-10-17 17:07:25 +02:00
b72315b1a2 Merge branch 'testing' 2023-10-17 17:06:49 +02:00
6220d691a3 Use collabora enterprise image 2023-10-17 17:03:43 +02:00
ec9115df06 Nextcloud: Implement logs to stdout 2023-10-17 17:01:50 +02:00
aa1d8710e1
Add fuse3 package to script serever.Dependecy to rclone mount. 2023-10-17 13:20:02 +02:00
6211f3aaea
Add fuse3 package to script serever.Dependecy to rclone mount. 2023-10-17 13:16:31 +02:00
176338f537 Merge branch 'main' into testing 2023-10-17 10:25:55 +02:00
1e9164dcd4 Nextcloud: Implement logs to stdout 2023-10-17 10:25:33 +02:00
e39b6cbaed
Update mappingfile also in production 2023-10-13 13:10:49 +02:00
11607f208c
Change regex to send @.shh.se cases to external node 2023-10-13 13:10:16 +02:00
823e172278
Improved mappingfile that should work for all cases now 2023-10-13 13:09:39 +02:00
6afbcf5beb
listusers.sh: disabel tls verify for rclone mkdir because we use ip address with webdav. 2023-10-13 11:05:02 +02:00
ec6ba465f9 backup-all-buckets: Add comment about safeguard 2023-10-13 10:47:01 +02:00
63631b2b51 backup-all-buckets: Safeguard backup runs if a backup process on that bucket is already in progress. 2023-10-13 10:47:01 +02:00
059bd041e6
backup-all-buckets: Add comment about safeguard 2023-10-12 09:21:57 +02:00
5df8dba25c
backup-all-buckets: Safeguard backup runs if a backup process on that bucket is already in progress. 2023-10-12 09:03:48 +02:00
1e7108297b listusers.sh: disabel tls verify for rclone mkdir because we use ip address with webdav. 2023-10-10 12:34:46 +02:00
cdf16db309 Typo 2023-10-10 12:30:53 +02:00
5472b767c8 Update mappingfile also in production 2023-10-10 12:30:53 +02:00
e990b05401 Change regex to send @.shh.se cases to external node 2023-10-10 12:30:53 +02:00
ca27f46ffe
listusers.sh: disabel tls verify for rclone mkdir because we use ip address with webdav. 2023-10-10 10:48:43 +02:00
e57de0ebd0 Typo 2023-10-10 10:33:28 +02:00
28a1b95114 Merge branch 'main' into testing 2023-10-10 10:27:07 +02:00
88ee8c2b67 Use collabora enterprise image 2023-10-10 10:26:43 +02:00
ade4bd42b3 Update mappingfile also in production 2023-10-09 11:10:46 +01:00
4c597442e1 Change regex to send @.shh.se cases to external node 2023-10-06 09:23:37 +01:00
829f3f01c7 Improved mappingfile that should work for all cases now 2023-10-05 12:10:21 +01:00
7198109d56
Must have a cmd 2023-10-03 17:23:48 +02:00
8752500f4d
Redactore userbucket creation and opt out 2023-10-03 17:12:17 +02:00
0a09f73f12
SU and LNU opts out of userbuckets according to freitag and anders from customer meeting 2023-10-03 17:08:37 +02:00
f6cca5bcdc
Merge branch 'main' into testing 2023-09-28 14:48:47 +02:00
37863c7a17
Use rclone for reva 2023-09-28 14:48:30 +02:00
63a2b363a7
Merge branch 'main' into testing 2023-09-28 11:05:29 +02:00
a21ea6fdd5
Allow connections from kubeservers to multinode db 2023-09-28 11:04:47 +02:00
0dea8dbc91
Merge branch 'main' into testing 2023-09-26 14:59:46 +02:00
0e5653aa0c
Fix 2023-09-26 14:59:34 +02:00
56291a4165
Merge branch 'main' into testing 2023-09-26 14:38:58 +02:00
3bac4ac9f3
Fix 2023-09-26 14:38:44 +02:00
c6a1b2cd66
Merge branch 'main' into testing 2023-09-26 14:26:20 +02:00
39137a4dfc
Typo 2023-09-26 14:26:07 +02:00
216c706457
Fix parameters 2023-09-26 14:25:10 +02:00
c013b861e9
Fix parameters 2023-09-26 14:24:18 +02:00
d5b9bb88fa
Merge branch 'main' into testing 2023-09-26 14:19:17 +02:00
f606d6c1b6
Update config 2023-09-26 14:15:59 +02:00
4a7af7a049
Merge branch 'main' into testing 2023-09-26 09:45:20 +02:00
3d4025e738
Set seccomp:unconfined 2023-09-26 09:44:56 +02:00
b4b90799e6
Update reva version 2023-09-26 09:24:36 +02:00
a44cdc1bb6
Update reva version 2023-09-26 09:23:12 +02:00
d2ced8d1cd
Backups: This script does not seem used and is confusing 2023-09-25 09:34:19 +02:00
b9e35dcf92 Give multinode a sixmonts backup policy. 2023-09-20 10:26:01 +02:00
b5f8abf934
Allow source missmatch after changing bucket name 2023-09-12 16:50:48 +02:00
6d33b53a92
Fix backup directory for multinode script as well 2023-09-12 16:37:36 +02:00
631f3616c6
Create bespoe directory, and clean up afterwards 2023-09-12 16:35:52 +02:00
174c72eb70
BACKUPDB: Fix bug that put backup in a file instead of a directory
We found a bug that moved the backups to a file, but we want that to be
a directory, meaning that all the files were overwritten one by one, and
we were not taking the backups as we though.
2023-09-12 16:19:17 +02:00
b989bd3d54
Hide deprecation warnings in Ubuntu 20.04 2023-09-08 07:39:34 +02:00
234d19b652
Merge branch 'main' into stable 2023-09-02 14:16:48 +02:00
22f5dfac62
Allways backup before restarting 2023-09-02 12:50:08 +02:00
1b60d41dab
Get password 2023-09-02 12:38:34 +02:00
7207ef2bbf
Allways backup the db 2023-09-02 12:30:16 +02:00
c7bd0fa965
Try again 2023-09-02 12:19:33 +02:00
d540c90afa
Try again 2023-09-02 12:15:54 +02:00
b927716aad
Fix comment 2023-09-02 12:12:59 +02:00
ac6430df51
Add announce script to script servers 2023-09-02 12:08:17 +02:00
88b3c1b4fb
Merge branch 'main' into testing 2023-09-02 09:29:06 +02:00
5bc0ddfa00
Add admin password as env variable to image 2023-09-02 09:28:39 +02:00
99c88c0c3b
Dont redeclare 2023-08-22 17:24:38 +02:00
8bfa2d58a9
Merge branch 'main' into stable 2023-08-22 17:15:00 +02:00
3b58bcd2ae
Get drive customers 2023-08-22 17:14:07 +02:00
364a8685a7
Get drive customers 2023-08-22 17:13:02 +02:00
3bd7a44d7a Merge branch 'main' into testing 2023-08-16 12:55:35 +02:00
f56a5c6eae Add get drive customers script to all servers 2023-08-16 12:53:08 +02:00
feb02cc2cc
Merge branch 'main' into testing 2023-07-25 13:38:12 +02:00
58fd21e0aa
Typo 2023-07-25 13:37:56 +02:00
1e0879769c
Merge branch 'main' into testing 2023-07-25 13:35:46 +02:00
3986e69349
See if we can get ipv6 for monitor 2023-07-25 13:35:31 +02:00
d0ca012129
Merge branch 'main' into stable 2023-07-25 08:15:40 +02:00
753512f90f
Merge branch 'main' into testing 2023-07-20 11:24:06 +02:00
598be8c2f1
Add full support for common infra 2023-07-20 11:23:48 +02:00
1cea9c9173
Remove replicate readwrite db 2023-07-18 15:59:47 +02:00
6df092764f
Remove replicate readwrite db 2023-07-18 15:59:08 +02:00
735eaf7067
Merge branch 'main' into testing 2023-07-18 15:53:04 +02:00
54de0adb0f
Remove replicate readwrite db 2023-07-18 15:52:33 +02:00
42b57f3655
Merge branch 'main' into stable 2023-07-18 11:13:19 +02:00
ed9353198d
Merge branch 'main' into testing 2023-07-06 14:51:04 +02:00
1152fd972f
Add 'gss.username_format' => 'sanitize', 2023-07-06 14:50:47 +02:00
604d485d29
Fix gss_mode for multinode 2023-06-28 15:53:13 +02:00
c7072d6dae
Fix gss_mode for multinode 2023-06-28 15:52:52 +02:00
3281824eac
Fix gss_mode for multinode 2023-06-28 15:51:43 +02:00
df1277e04e
Merge branch 'main' into stable 2023-06-15 19:27:15 +02:00
b15bc5ad03
Merge branch 'main' into testing 2023-06-15 19:24:58 +02:00
befc3207d9
Fix conficting bashrc declarations 2023-06-15 19:24:31 +02:00
b166a683a6
Merge branch 'main' into stable 2023-06-15 18:32:53 +02:00
bffb06333d
Merge branch 'main' into testing 2023-06-15 12:01:19 +02:00
bc329be450
Add back safer_reboot 2023-06-15 12:00:47 +02:00
1e23d134f1
Add fqdn to PS1 2023-06-13 14:15:53 +02:00
50dcefdb3d
Merge branch 'main' into stable 2023-06-13 12:17:46 +02:00
ff3a5c143c
Pass on vaiables 2023-06-13 12:16:55 +02:00
e0139b0765
BAckups: Fix db backups
DB backup dir clashed with statistics script storage location, and also
some permission errors.

diff --git a/templates/script/backupdb.erb.sh b/templates/script/backupdb.erb.sh
index b35acbc..1eb7992 100644
--- a/templates/script/backupdb.erb.sh
+++ b/templates/script/backupdb.erb.sh
@@ -6,11 +6,16 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
 number_of_full_to_keep="<%= @full_backup_retention %>"

 backup="${1}"
+if [[ -z ${backup} ]]; then
+  backup="backup1.$(hostname -d)"
+fi
 if ! [[ ${backup} =~ backup1.*sunet.se$ ]]; then
 	echo "Usage: ${0} <fqdn of backup server>"
 	echo "Example: ${0} backup1.sunet.drive.sunet.se"
 fi
 backup_dir="/opt/backups"
+backup_dir_long="${backup_dir}/backups"
+mkdir -p ${backup_dir}
 bucket="db-backups"
 mirror="<%= @customer %>-<%= @environment %>-mirror"
 if [[ ${mirror} =~ common-(test|prod)-mirror ]]; then
@@ -23,11 +28,8 @@ ssh ${backup} "sudo /home/script/bin/backup_db.sh"
 echo "Cleaning up old backups for ${backup}"
 ssh ${backup} "sudo /home/script/bin/purge_backups.sh /opt/mariadb_backup/backups/"
 echo "Copying backups here"
-mkdir -p ${backup_dir}
-scp script@${backup}:/opt/mariadb_backup/backups/$(date +%Y/%m/%d)/*.gz ${backup_dir}
+scp script@${backup}:/opt/mariadb_backup/backups/$(date +%Y/%m/%d)/*.gz ${backup_dir_long}
 echo "Copying backups to remote bucket"
 rclone mkdir ${mirror}:${bucket}
 duplicity --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption ${backup_dir} rclone://${mirror}:/${bucket}
 duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${bucket}
-echo "cleaning up"
-rm -r ${backup_dir}
diff --git a/templates/scriptreceiver/backup_db.erb.sh b/templates/scriptreceiver/backup_db.erb.sh
index 025a338..3e15f25 100644
--- a/templates/scriptreceiver/backup_db.erb.sh
+++ b/templates/scriptreceiver/backup_db.erb.sh
@@ -11,5 +11,7 @@ else
 fi

 docker exec ${container} /do_backup.sh ${customer}
+chown root:script /opt/mariadb_backup/
+chmod 750 /opt/mariadb_backup/
 chmod 755 ${backupdir}
 chown -R script:root ${backupdir}
2023-06-13 12:15:19 +02:00
fa93c01846
Fix permissions 2023-06-13 12:09:13 +02:00
a9fdb43bf6
Add a default for backup 2023-06-13 11:59:19 +02:00
95030efba2
Run backup on same dir as before 2023-06-13 11:52:15 +02:00
2119f71008
Backup dir overwritten by data script 2023-06-13 11:46:25 +02:00
08243f555d
Add back index 2023-06-12 11:42:00 +02:00
61c56fd2f2
Pass on variable 2023-06-12 11:29:51 +02:00
b7a26db915
Fix fact 2023-06-12 10:01:03 +02:00
b225e8d364
Update legacy fact name 2023-06-12 09:32:10 +02:00
0f8f27773b
Add support for replicating from multinode dbs 2023-06-12 09:31:41 +02:00
f4ff3fea76
Add nordu.net 2023-06-08 16:43:20 +02:00
e7ffd6fac3
Add nordu.net 2023-06-08 16:43:05 +02:00
8f3becd914
Add nordu.net 2023-06-08 16:33:04 +02:00
3531be828e
Add nordu.net 2023-06-08 16:32:43 +02:00
24b0b1eedc
Revert "Use nce image"
This reverts commit 20471ebaa8.
2023-06-01 11:47:16 +02:00
20471ebaa8
Use nce image 2023-06-01 11:44:50 +02:00
6c22905a04
Merge branch 'stable' 2023-05-22 10:14:53 +02:00
149ae70210
Add dns 2023-05-12 16:27:31 +02:00
402b6e1c4b
Add dns 2023-05-12 16:23:22 +02:00
8593d0bb00
Fix regex in mappingfile-test 2023-05-09 10:56:25 +02:00
3dab127050 Merge branch 'main' into stable 2023-05-08 09:15:12 +02:00
fc4b51b4ac update mapping file to fix problems for shh.se 2023-05-08 09:14:00 +02:00
a14a71f5d6 Merge branch 'main' into stable 2023-05-03 15:32:38 +02:00
52c11917ce modify mappingfile for GSS to help SHH to login. 2023-05-03 15:00:24 +02:00
ca2cf7e3f2
Allow file locks to be debug logged for sunet test 2023-04-27 16:44:06 +02:00
ef7c470244
If at first you don succeed... 2023-04-26 16:28:40 +02:00
5be9aedc56
Fix so cron job only runns in common 2023-04-26 16:25:29 +02:00
422ebd7ace
Fix backups of multinode in prod 2023-04-26 16:09:19 +02:00
75f75732e7
Fix repair command 2023-04-26 11:22:25 +02:00
8ca66c10ad
Add more repair commands and use tty for occ 2023-04-26 09:26:46 +02:00
7d10d168bd
Fix nc_upgrade_command 2023-04-25 17:48:13 +02:00
036dcf010c
Add gss 2023-04-24 16:39:09 +02:00
3b5d14a189
Try again 2023-04-24 16:34:37 +02:00
e840903eae
Can I force string 2023-04-24 16:30:12 +02:00
db516e3b72
New name 2023-04-24 16:27:21 +02:00
a9695aeee1
Two passes 2023-04-24 16:25:37 +02:00
713f12e1d4
Terminate regex 2023-04-24 16:23:01 +02:00
568cb87ad6
Trim version 2023-04-24 16:21:22 +02:00
709d2165ac
Gsub 2023-04-24 16:13:11 +02:00
37e419cf75
Try regsubst 2023-04-24 16:06:55 +02:00
54868e8518
Yet another try 2023-04-24 15:59:47 +02:00
20d1558866
Yet another try 2023-04-24 15:56:22 +02:00
f1276d52d3
Fingers crossed 2023-04-24 15:51:49 +02:00
9b44a99135
Print debug to file 2023-04-24 15:47:56 +02:00
9ec76cfc7f
Yet another try 2023-04-24 15:39:32 +02:00
8d5471d88e
Fix pip 2023-04-24 15:35:10 +02:00
71fd8a3287
Yet another try 2023-04-24 15:34:02 +02:00
f95675f6ee
Yet another try 2023-04-24 15:27:51 +02:00
e06b7ffb1c
Try to lookup fact yet another way 2023-04-24 15:23:57 +02:00
1045898d40
Diffent fact syntax 2023-04-24 15:13:10 +02:00
4af4a23d1b
Use new facts in template 2023-04-24 15:06:51 +02:00
31651be3be
Install yq everywhere 2023-04-24 14:51:51 +02:00
2a59d0995c
Add nextcloud version knowledge 2023-04-24 14:48:56 +02:00
c5e5279f0e
Add reference to self 2023-04-24 12:10:30 +02:00
1ce82c89cc
Fix namespacing 2023-04-24 11:20:02 +02:00
824be30b44
Install yq 2023-04-24 11:04:39 +02:00
bbf00f7f4a
More syntax 2023-04-24 10:53:37 +02:00
1632657f46
Fix syntax error 2023-04-24 10:50:47 +02:00
165415b725
Less dangerous pruning script 2023-04-24 10:47:36 +02:00
f2f919a59c
Lower further 2023-04-05 14:20:52 +02:00
801c5a2e85
Lower interned string value 2023-04-05 14:13:00 +02:00
4948ada609
Use facter 2023-04-05 12:26:11 +02:00
e220a1b1c6
Fix htaccess complaint 2023-04-05 08:55:41 +02:00
bf5e5adf10
Apache would not start with 128mb 2023-04-05 08:27:53 +02:00
0b267c07c5
Fix warning about interned strings on some nodes 2023-04-05 08:08:31 +02:00
e3dbe10187
Sort config and add entries to disable spurious warnings 2023-04-04 14:29:20 +02:00
d93cce4fc0
Handle mau 2023-04-03 14:11:28 +02:00
6dfe1496a4
Purge binlogs regularly 2023-03-24 16:26:56 +01:00
8386df91de
Fix for multinode 2023-03-24 14:36:07 +01:00
6de5312e7c
Support editable config 2023-03-24 13:24:23 +01:00
af020c77aa
New image 2023-03-24 11:47:55 +01:00
cdfd3b9126
Clear scriptherder script 2023-03-22 08:28:28 +01:00
4be1448dab
Remove swap 2023-03-22 08:01:18 +01:00
16bf805717
Fix passwords 2023-03-21 18:28:52 +01:00
38807e62a5
Try to configure all customer 2023-03-21 18:26:23 +01:00
238172397b
Correct network settings 2023-03-21 17:26:36 +01:00
d104fd4299
Move prod to proxysql 2023-03-21 17:08:54 +01:00
3c1ee2ba1d
Fix networks 2023-03-20 09:40:19 +01:00
03808354f3
proxysql.conf gor prod 2023-03-13 18:54:36 +01:00
4963afd5f6
Handle clean up in a different way 2023-03-13 16:33:51 +01:00
18578817f1
Correct dir for backups 2023-03-13 16:27:36 +01:00
3fb6164501
Typo 2023-03-13 16:23:51 +01:00
392e83188b
Fix backups for new multionode dbs 2023-03-13 16:20:03 +01:00
c82fe2db78
Only create mariadb dirs for prod 2023-03-13 14:46:13 +01:00
b2d70279b1
Remove db stuff from test multinode 2023-03-13 14:32:59 +01:00
810c095bda
Add support for maintenance:mode with readonly config 2023-03-13 14:09:43 +01:00
09d3f72162
Add support for maintenance:mode with readonly config 2023-03-13 13:59:53 +01:00
d94ecc9a26
Fix up container name 2023-03-13 13:26:05 +01:00
284bc8ad8e
Rename default container 2023-03-13 13:23:19 +01:00
09e8655a08
Move all test to multinode-db 2023-03-13 13:05:18 +01:00
76a928c060
Migrate db for antagning 2023-03-13 12:56:42 +01:00
abd56622e7
Fix typo 2023-03-13 12:49:03 +01:00
4bde51f883
Typo 2023-03-13 12:46:20 +01:00
19a02f0bf2
Add cmd for scriptherder cronjob 2023-03-13 12:43:50 +01:00
e9e70c5b29
Add in support for fullnode customers to folders in userbuckets 2023-03-13 12:41:23 +01:00
d09dc50d0f
Enable multinode servers to use external db 2023-03-10 12:50:55 +01:00
25d8d94b70
Fix container name 2023-03-10 10:19:05 +01:00
10c44fe5fc
Fix container name 2023-03-10 10:05:21 +01:00
44e782d793
don't make buckets for UU 2023-03-06 18:00:42 +01:00
8443f738cc
Fix type issue 2023-03-01 13:48:23 +01:00
99a93ec9be
Echo nodenumber 2023-03-01 13:45:32 +01:00
08e3bc8fa1
Echo nodenumber 2023-03-01 13:43:12 +01:00
8c355fcc4d
Add bucket back for when Karin will delete user 2023-03-01 13:36:43 +01:00
5abf4b04f4
Fix multinode cronjob 2023-02-28 18:50:13 +01:00
00256a7259
Use python 3.9 2023-02-28 14:24:11 +01:00
23af9a6a24
Network mode host not compatible with ports 2023-02-28 14:17:49 +01:00
45762b076a
Add proxysql network 2023-02-28 14:06:17 +01:00
988efa23d9
Add monitor password 2023-02-28 13:30:18 +01:00
1f36efdcb4
More paramters for proxysql cnf 2023-02-28 13:12:33 +01:00
b60f548e0a
Try again 2023-02-28 13:09:45 +01:00
09a13f6444
Fix sytax 2023-02-28 13:01:15 +01:00
5d286dd5c0
Dont create proxysql directory 2023-02-28 12:48:48 +01:00
7c49d580a7
Make config optional 2023-02-28 12:44:15 +01:00
e5c55de042
Set up proxysql passwords 2023-02-28 11:50:16 +01:00
fd61340a4a
Use numeric index 2023-02-28 10:55:13 +01:00
36d777ff8f
Create a password array 2023-02-28 10:50:09 +01:00
ceb28450ee
Try to string method 2023-02-28 10:37:01 +01:00
bd9e48eca8
Add variables 2023-02-28 10:31:33 +01:00
2ce8d1c01c
Try some thing 2023-02-28 10:27:22 +01:00
08e8cdcdaa
use @ 2023-02-28 10:23:32 +01:00
7728f8a272
Try multinode proxysql 2023-02-28 10:20:06 +01:00
9dd730fcea
New format for multinode container names 2023-02-28 09:52:56 +01:00
58a2b4f041
Only run autoreboot in test 2023-02-28 09:07:56 +01:00
ed764ad8fd
Containers also renamed 2023-02-28 08:50:39 +01:00
ebf5e458f6
Rename network 2023-02-28 08:40:16 +01:00
f810c1cba7
Rename network 2023-02-28 08:35:05 +01:00
4d36d4b44e
Rename network 2023-02-28 08:30:08 +01:00
6c528254d7
Rename network 2023-02-28 08:29:05 +01:00
88f7c95679
Fix issue with key/secret on multinode 2023-02-27 18:01:38 +01:00
1588d5869b
Remove underscore for user name 2023-02-27 15:18:14 +01:00
5a8bca9de7
Syntax error 2023-02-27 10:19:34 +01:00
b62746ac66
Add multinode_db class 2023-02-27 10:14:26 +01:00
dec2ebd0e7
Multinode needs php_memory_limit set 2023-02-25 08:30:43 +01:00
430521d96d
Update scripts to use rclone -c to use checksums instead of mtime 2023-02-24 10:18:40 +01:00
eec40a23ba
Fix swap once again 2023-02-23 16:52:36 +01:00
2d50ca2dac
Use crypt swap 2023-02-23 16:26:31 +01:00
d378b5f2da
Skip over statistics 2023-02-23 16:07:01 +01:00
746859e70f
Remove reference to script that is gone 2023-02-23 15:48:47 +01:00
3712e6228c
Redis sentinel no longer used 2023-02-23 15:44:55 +01:00
6a1895d48e
Merge branch 'main' into stable 2023-02-23 10:33:39 +01:00
98bf6d47b2
Make php memory limit configurable 2023-02-23 10:33:10 +01:00
d4b91c5400
to -> from 2023-02-21 15:13:02 +01:00
121f231397
to -> from 2023-02-21 15:12:24 +01:00
d4668a5086
ip is called to now 2023-02-21 15:05:47 +01:00
707d5cfe32
ip is called to now 2023-02-21 15:04:56 +01:00
5ac40a8bbe
Merge branch 'main' into stable 2023-02-17 17:04:28 +01:00
b132b363cd
Wrapp in if clause for nftables 2023-02-17 17:03:49 +01:00
20755e1361
Wrong place 2023-02-17 16:39:19 +01:00
e06f327f95
Wronmg place 2023-02-17 16:38:28 +01:00
93b5b9352e
Merge branch 'main' into stable 2023-02-17 16:36:20 +01:00
c760a04551
Specify interface name 2023-02-17 16:35:57 +01:00
fc32dfe6fd
Merge branch 'main' into stable 2023-02-17 16:27:43 +01:00
dd6e75cc32
from -> allow_clients 2023-02-17 16:26:22 +01:00
5f1163e257
Missing comma 2023-02-17 16:25:15 +01:00
a16ff712a2
Merge branch 'main' of github.com:SUNET/drive-puppet 2023-02-17 16:25:03 +01:00
599444f537
Missing comma 2023-02-17 16:24:19 +01:00
e013d5baa7
Use ufw from puppet sunet instead 2023-02-17 16:22:22 +01:00
ddc3f673d3
Use ufw from puppet sunet instead 2023-02-17 16:21:48 +01:00
f94b66e3c1
Use ufw from puppet sunet instead 2023-02-17 14:46:22 +01:00
3979367a7d
No longe running with aufs 2023-02-17 13:12:10 +01:00
6dae356334
Fix scriptservers list users as well 2023-02-17 11:09:22 +01:00
edddcbc5da
Use node1 everywhere for statistics 2023-02-17 10:28:18 +01:00
4c014aaa97
Run cron on all servers according to nextcloud [Ticket#9642120] this is safe, aslo sync users to gss from all 2023-02-15 13:15:34 +01:00
f371fbc34d
SUNET now has correct named redis cluster so gss can use the other 2023-02-15 12:04:05 +01:00
a1ce98a14d
Makeswap script 2023-02-14 12:07:43 +01:00
d4c557b290
Use node3 allways 2023-02-14 11:25:16 +01:00
9e18ee62e8
Use ip address and don't check cert 2023-02-13 13:56:54 +01:00
028c358c72
Use ip address and don't check cert 2023-02-13 13:53:28 +01:00
28b54a74a0
Add host header and use ip address 2023-02-13 13:36:00 +01:00
118 changed files with 2895 additions and 1307 deletions

33
facts.d/nc_versions.sh Executable file
View file

@ -0,0 +1,33 @@
#!/bin/bash
if ! [[ $(hostname) =~ monitor ]]; then
exit 0
fi
repo="/var/cache/cosmos/repo"
common="${repo}/global/overlay/etc/hiera/data/common.yaml"
function print_fact {
customer=${1}
environment=${2}
version=${3}
if [[ ${version} != 'null' ]]; then
echo "nextcloud_version_${environment}_${customer}=${version}"
else
echo "nextcloud_version_${environment}_${customer}=$(yq -r ".${key}" "${common}")"
fi
}
for environment in test prod; do
key="nextcloud_version_${environment}"
for customer in $(yq -r '.multinode_mapping | keys| .[]' "${common}"); do
group="${repo}/multinode-common/overlay/etc/hiera/data/group.yaml"
version=$(yq -r ".${key}" "${group}")
print_fact "${customer}" "${environment}" "${version}"
done
for customer in $(yq -r '.fullnodes[]' "${common}"); do
group="${repo}/${customer}-common/overlay/etc/hiera/data/group.yaml"
version=$(yq -r ".${key}" "${group}")
print_fact "${customer}" "${environment}" "${version}"
done
done

View file

@ -0,0 +1,6 @@
net.core.rmem_max=67108864
net.core.wmem_max=67108864
net.ipv4.tcp_rmem=4096 87380 33554432
net.ipv4.tcp_wmem=4096 87380 33554432
net.core.default_qdisc=fq
net.ipv4.tcp_congestion_control=bbr

View file

@ -1,14 +1,8 @@
# Lets determin who the customer is by looking at the hostname
function sunetdrive::get_customer() >> String {
$hostnameparts = split($facts['fqdn'],'\.')
$hostnameparts = split($facts['networking']['fqdn'],'\.')
if $hostnameparts[1] == 'drive' {
if $hostnameparts[0] =~ /^gss/ {
return 'gss'
} elsif $hostnameparts[0] =~ /^lookup/ {
return 'lookup'
} else {
return 'common'
}
return 'common'
} elsif $hostnameparts[0] =~ /idp-proxy/ {
return 'common'
}

View file

@ -1,6 +1,6 @@
# Lets determin where we are by looking at the hostname
function sunetdrive::get_environment() >> String {
$hostname = $facts['fqdn']
$hostname = $facts['networking']['fqdn']
if $hostname =~ /^.*\.drive\.sunet\.se$/ {
if $hostname =~ /^.*\.pilot\.drive\.sunet\.se$/ {
return 'pilot'

View file

@ -1,4 +1,4 @@
# Lets determin where we are by looking at the hostname
function sunetdrive::get_node_number() >> Integer {
Integer(regsubst($::fqdn, /^[a-zA-Z\-]+(\d).*$/, '\\1'))
Integer(regsubst($facts['networking']['fqdn'], /^[a-zA-Z\-]+(\d).*$/, '\\1'))
}

View file

@ -5,6 +5,7 @@ define sunetdrive::app_type (
$override_config = undef,
$override_compose = undef
) {
include sunet::packages::netcat_openbsd
# Config from group.yaml and customer specific conf
$environment = sunetdrive::get_environment()
$customer = sunetdrive::get_customer()
@ -14,8 +15,9 @@ define sunetdrive::app_type (
# The config used
$config = $override_config
# Other settings
$admin_password = $config[ 'admin_password' ]
$dbhost = $config[ 'dbhost' ]
$dbname = $config[ 'dbname' ]
$dbuser = $config[ 'dbuser' ]
$instanceid = $config[ 'instanceid' ]
$mysql_user_password = $config[ 'mysql_user_password' ]
$passwordsalt = $config[ 'passwordsalt' ]
@ -28,8 +30,10 @@ define sunetdrive::app_type (
$config = hiera_hash($environment)
$skeletondirectory = $config['skeletondirectory']
# Other settings
$admin_password = safe_hiera('admin_password')
$dbhost = 'proxysql_proxysql_1'
$dbname = 'nextcloud'
$dbuser = 'nextcloud'
$full_backup_retention = hiera('full_backup_retention')
$instanceid = safe_hiera('instanceid')
$mysql_user_password = safe_hiera('mysql_user_password')
$passwordsalt = safe_hiera('passwordsalt')
@ -39,8 +43,8 @@ define sunetdrive::app_type (
$s3_secret = safe_hiera('s3_secret')
$secret = safe_hiera('secret')
}
$twofactor_enforced_groups = hiera_array('twofactor_enforced_groups')
$twofactor_enforced_excluded_groups = hiera_array('twofactor_enforced_excluded_groups')
$twofactor_enforced_groups = $config['twofactor_enforced_groups']
$twofactor_enforced_excluded_groups = $config['twofactor_enforced_excluded_groups']
$nextcloud_version = hiera("nextcloud_version_${environment}")
$nextcloud_version_string = split($nextcloud_version, '[-]')[0]
# Common settings for multinode and full nodes
@ -51,18 +55,22 @@ define sunetdrive::app_type (
$site_name = $config['site_name']
$trusted_domains = $config['trusted_domains']
$trusted_proxies = $config['trusted_proxies']
if $location == 'kau-prod' {
$php_memory_limit_mb = 2048
} else {
$php_memory_limit_mb = 512
}
if $::facts['dockerhost2'] == 'yes' {
$hostnet = true
}
# These are encrypted values from local.eyaml
$gss_jwt_key = safe_hiera('gss_jwt_key')
$smtppassword = safe_hiera('smtp_password')
#These are global values from common.yaml
$gs_enabled = hiera('gs_enabled')
$gs_federation = hiera('gs_federation')
$gss_master_admin = hiera_array('gss_master_admin')
$gss_master_url = hiera("gss_master_url_${environment}")
$lookup_server = hiera("lookup_server_${environment}")
$mail_domain = hiera("mail_domain_${environment}")
$mail_smtphost = hiera("mail_smtphost_${environment}")
$mail_from_address = hiera("mail_from_address_${environment}")
@ -70,17 +78,63 @@ define sunetdrive::app_type (
$smtpuser = hiera("smtp_user_${environment}")
$tug_office = hiera_array('tug_office')
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
$gss_mode = hiera('gss_mode')
# These are global values from common.yaml but can be overridden in group.yaml
$drive_email_template_text_left = $config['drive_email_template_text_left']
$drive_email_template_plain_text_left = $config['drive_email_template_plain_text_left']
$drive_email_template_url_left = $config['drive_email_template_url_left']
$lb_servers = hiera_hash($environment)['lb_servers']
$document_servers = hiera_hash($environment)['document_servers']
# set up cronjob on node3
if $::fqdn[0,5] == 'node3' {
# Calculate some values
$expiration_months = max(12, $full_backup_retention)
$expiration_days_min = $expiration_months * 31
$expiration_days_max = $expiration_months * 31 + 93
unless $is_multinode{
user { 'www-data': ensure => present, system => true }
file { '/usr/local/bin/get_containers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/get_containers'),
mode => '0744',
}
if ($nodenumber == 3) {
file { '/usr/lib/nagios/plugins/check_nextcloud_mounts.py':
ensure => present,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/check_nextcloud_mounts.py'),
mode => '0744',
}
sunet::sudoer {'nagios_run_nextcloud_mounts_command':
user_name => 'nagios',
collection => 'nrpe_nextcloud_mounts_check',
command_line => '/usr/lib/nagios/plugins/check_nextcloud_mounts.py'
}
sunet::nagios::nrpe_command {'check_nextcloud_mounts':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_nextcloud_mounts.py'
}
}
if ($nodenumber == 3) {
file { '/usr/local/bin/scan_external_mounts':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/scan_external_mounts.sh'),
mode => '0744',
}
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
cmd => '/usr/local/bin/scan_external_mounts',
hour => '1',
minute => '20',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
}
file { '/opt/nextcloud/cron.sh':
ensure => file,
owner => 'root',
@ -93,26 +147,24 @@ define sunetdrive::app_type (
user => 'root',
minute => '*/5',
}
if $location =~ /^extern/ {
file { '/opt/nextcloud/user-sync.sh':
ensure => file,
owner => 'root',
group => 'root',
mode => '0700',
content => template('sunetdrive/application/user-sync.erb.sh'),
}
-> cron { 'gss_user_sync':
command => '/opt/nextcloud/user-sync.sh',
user => 'root',
minute => '*/5',
}
file { '/opt/nextcloud/user-sync.sh':
ensure => absent,
}
file { '/usr/local/bin/nocc':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/nocc.erb'),
mode => '0740',
}
file { '/etc/sudoers.d/99-nocc':
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
mode => '0440',
owner => 'root',
group => 'root',
}
}
#Create users
unless $is_multinode{
user { 'www-data': ensure => present, system => true }
package { 'aufs-tools': ensure => latest, provider => 'apt' }
file { '/usr/local/bin/occ':
ensure => present,
force => true,
@ -136,12 +188,21 @@ define sunetdrive::app_type (
content => template('sunetdrive/application/upgrade23-25.erb.sh'),
mode => '0744',
}
file { '/usr/local/bin/remount_user_bucket_as_project.sh':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/remount_user_bucket_as_project.sh'),
mode => '0744',
}
file { '/opt/rotate/conf.d/nextcloud.conf':
ensure => file,
force => true,
owner => 'root',
group => 'root',
content => "#This file is managed by puppet\n#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n",
content => "#This file is managed by puppet
#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n/opt/nextcloud/audit.log:180:256\n",
mode => '0644',
}
file { '/opt/rotate/conf.d/redis.conf':
@ -190,7 +251,14 @@ define sunetdrive::app_type (
force => true,
owner => 'www-data',
group => 'root',
mode => '0644',
mode => '0640',
}
file { '/opt/nextcloud/audit.log':
ensure => file,
force => true,
owner => 'www-data',
group => 'root',
mode => '0640',
}
file { '/opt/nextcloud/rclone.conf':
ensure => file,
@ -287,23 +355,7 @@ define sunetdrive::app_type (
mode => '0744',
}
}
if $location =~ /^gss-test/ {
file { '/opt/nextcloud/mappingfile.json':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/application/mappingfile-test.json.erb'),
mode => '0644',
}
} elsif $location =~ /^gss/ {
file { '/opt/nextcloud/mappingfile.json':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/application/mappingfile-prod.json.erb'),
mode => '0644',
}
} elsif $location =~ /^kau/ {
if $location =~ /^kau/ {
file { '/mnt':
ensure => directory,
owner => 'www-data',
@ -340,10 +392,17 @@ define sunetdrive::app_type (
compose_filename => 'docker-compose.yml',
description => 'Nextcloud application',
}
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
if $::facts['sunet_nftables_enabled'] == 'yes' {
sunet::nftables::docker_expose { 'https':
allow_clients => ['any'],
port => 443,
iif => 'ens3',
}
} else {
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
}
}
}
}

View file

@ -38,8 +38,8 @@ define sunetdrive::cloudimage(
resolver => ['130.242.80.14', '130.242.80.99'],
search => $search,
#
repo => $::cosmos_repo_origin_url,
tagpattern => $::cosmos_tag_pattern,
repo => $facts['cosmos_repo_origin_url'],
tagpattern => $facts['cosmos_tag_pattern'],
#
cpus => $cpus,
memory => $memory,

53
manifests/common.pp Normal file
View file

@ -0,0 +1,53 @@
# Common class
class sunetdrive::common {
include sunet::tools
include sunet::motd
include apt
include apparmor
include sunet::packages::jq
if $::facts['sunet_nftables_enabled'] != 'yes' {
warning('Enabling UFW')
include ufw
} else {
if $facts['networking']['hostname'] =~ /^kube[wc]/ {
warning('Setting nftables to installed but disabled')
ensure_resource ('class','sunet::nftables::init', { enabled => false })
} else {
warning('Enabling nftables')
ensure_resource ('class','sunet::nftables::init', { })
}
}
package {'sysstat': ensure => 'latest'}
package {'needrestart': ensure => installed}
service {'sysstat': provider => 'systemd'}
file_line { 'enable_sa':
ensure => 'present',
line => 'ENABLED="true"',
path => '/etc/default/sysstat',
match => 'ENABLED="false"',
require => Package['sysstat'],
}
file_line { 'sa_cron_comment':
ensure => 'present',
line => '# Activity reports every 2 minutes everyday',
path => '/etc/cron.d/sysstat',
match => '^#\ Activity\ reports\ every\ 10\ minutes\ everyday',
require => Package['sysstat'],
notify => Service['sysstat'],
}
file_line { 'sa_cron':
ensure => 'present',
line => '*/2 * * * * root command -v debian-sa1 > /dev/null && debian-sa1 1 1',
path => '/etc/cron.d/sysstat',
match => '^5-55/10',
require => Package['sysstat'],
notify => Service['sysstat'],
}
if $::facts['os']['distro']['id'] == 'Debian' {
exec { 'sysstat_systemd_timer':
command => 'systemctl enable --now sysstat-collect.timer',
unless => 'systemctl is-enabled --quiet sysstat-collect.timer',
}
}
}

View file

@ -4,59 +4,52 @@ define sunetdrive::db_type(
$bootstrap=undef,
$location=undef,
$override_config = undef,
$override_compose = undef)
$override_compose = undef,
)
{
# Config from group.yaml
$environment = sunetdrive::get_environment()
$mariadb_version = hiera("mariadb_version_${environment}")
$is_multinode = (($override_config != undef) and ($override_compose != undef))
if $is_multinode {
$config = $override_config
$mysql_root_password = $config['mysql_root_password']
$mysql_user_password = $config['mysql_user_password']
$backup_password = $config['backup_password']
$mariadb_dir = $config['mariadb_dir']
$mycnf_path = $config['mycnf_path']
$server_id = '1000'
} else {
$config = hiera_hash($environment)
$mysql_root_password = safe_hiera('mysql_root_password')
$backup_password = safe_hiera('backup_password')
$proxysql_password = safe_hiera('proxysql_password')
$mysql_user_password = safe_hiera('mysql_user_password')
$mariadb_dir = '/etc/mariadb'
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
$server_id = 1000 + Integer($facts['hostname'][-1])
ensure_resource('file',"${mariadb_dir}", { ensure => directory, recurse => true } )
$dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
$dirs.each |$dir| {
ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
}
$config = hiera_hash($environment)
$mysql_root_password = safe_hiera('mysql_root_password')
$backup_password = safe_hiera('backup_password')
$proxysql_password = safe_hiera('proxysql_password')
$mysql_user_password = safe_hiera('mysql_user_password')
$roundcube_password = safe_hiera('roundcube_password')
$mariadb_dir = '/etc/mariadb'
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
$server_id = 1000 + Integer($facts['networking']['hostname'][-1])
ensure_resource('file',$mariadb_dir, { ensure => directory } )
$dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
$dirs.each |$dir| {
ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
}
$nextcloud_ip = $config['app']
unless $is_multinode {
$db_ip = $config['db']
$db_ipv6 = $config['db_v6']
$backup_ip = $config['backup']
$backup_ipv6 = $config['backup_v6']
$ports = [3306, 4444, 4567, 4568]
sunet::misc::ufw_allow { 'mariadb_ports':
from => $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6,
port => $ports,
}
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
}
if $location =~ /^lookup/ {
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '05-lookup.sql']
$db_ip = $config['db']
$db_ipv6 = $config['db_v6']
$backup_ip = $config['backup']
$backup_ipv6 = $config['backup_v6']
$ports = [3306, 4444, 4567, 4568]
if $location =~ /^multinode/ {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['kube'] + $config['kube_v6']
} elsif $location == 'sunet-prod' {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6'] + $config['webmail'] + $config['webmail_v6']
} elsif $location == 'sunet-test' {
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6'] + $config['webmail'] + $config['webmail_v6'] + $config['calendar'] + $config['calendar_v6']
} else {
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql']
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6
}
sunet::misc::ufw_allow { 'mariadb_ports':
from => $from,
port => $ports,
}
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql', '05-roundcube.sql']
$sql_files.each |$sql_file|{
file { "${mariadb_dir}/init/${sql_file}":
ensure => present,
@ -74,6 +67,11 @@ define sunetdrive::db_type(
content => template($mycnf_path),
mode => '0744',
}
file { '/usr/local/bin/purge-binlogs':
ensure => present,
content => template('sunetdrive/mariadb/purge-binlogs.erb.sh'),
mode => '0744',
}
file { "${mariadb_dir}/scripts/run_manual_backup_dump.sh":
ensure => present,
content => template('sunetdrive/mariadb/run_manual_backup_dump.erb.sh'),
@ -84,39 +82,47 @@ define sunetdrive::db_type(
content => template('sunetdrive/mariadb/rename-docker.sh'),
mode => '0744',
}
if $is_multinode {
$docker_compose = $override_compose
} else {
file { '/usr/local/bin/size-test':
ensure => present,
content => template('sunetdrive/mariadb/size-test.erb'),
mode => '0744',
}
file { '/usr/local/bin/status-test':
ensure => present,
content => template('sunetdrive/mariadb/status-test.erb'),
mode => '0744',
}
file { "/etc/sudoers.d/99-size-test":
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/size-test\n",
mode => '0440',
owner => 'root',
group => 'root',
}
file { "/etc/sudoers.d/99-status-test":
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n",
mode => '0440',
owner => 'root',
group => 'root',
}
$docker_compose = sunet::docker_compose { 'drive_mariadb_docker_compose':
content => template('sunetdrive/mariadb/docker-compose_mariadb.yml.erb'),
service_name => 'mariadb',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Mariadb server',
}
sunet::scriptherder::cronjob { 'purge_binlogs':
cmd => '/usr/local/bin/purge-binlogs',
hour => '6',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
file { '/usr/local/bin/mysql':
ensure => present,
content => template('sunetdrive/mariadb/mysql.erb.sh'),
mode => '0744',
}
file { '/usr/local/bin/size-test':
ensure => present,
content => template('sunetdrive/mariadb/size-test.erb'),
mode => '0744',
}
file { '/usr/local/bin/status-test':
ensure => present,
content => template('sunetdrive/mariadb/status-test.erb'),
mode => '0744',
}
file { '/etc/sudoers.d/99-size-test':
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/size-test\n",
mode => '0440',
owner => 'root',
group => 'root',
}
file { '/etc/sudoers.d/99-status-test':
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n",
mode => '0440',
owner => 'root',
group => 'root',
}
$docker_compose = sunet::docker_compose { 'drive_mariadb_docker_compose':
content => template('sunetdrive/mariadb/docker-compose_mariadb.yml.erb'),
service_name => 'mariadb',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Mariadb server',
}
}

View file

@ -1,4 +1,3 @@
include apt
# Wrapper for sunet::dockerhost to do thiss specific things
class sunetdrive::dockerhost(
String $version = safe_hiera('docker_version'),
@ -8,6 +7,8 @@ class sunetdrive::dockerhost(
String $docker_args = '',
Optional[String] $docker_dns = undef,
String $storage_driver = 'aufs',
Boolean $write_daemon_config = false,
Boolean $enable_ipv6 = false,
) {
if $version == 'NOT_SET_IN_HIERA' {
fail('Docker version not set in Hiera')
@ -26,6 +27,8 @@ class sunetdrive::dockerhost(
storage_driver => $storage_driver,
docker_network => true, # let docker choose a network for the 'docker' bridge
compose_version => $compose_version,
write_daemon_config => $write_daemon_config,
enable_ipv6 => $enable_ipv6,
}
file { '/etc/apt/preferences.d/containerd.io.pref':
ensure => absent,

View file

@ -4,10 +4,8 @@ class sunetdrive::infra_script (
$location = undef
) {
$environment = sunetdrive::get_environment()
$customer = "common"
$customer = 'common'
$config = hiera_hash($environment)
$gss_backup_server = $config['gss_backup_server']
$lookup_backup_server = $config['lookup_backup_server']
$ssh_config = "Host *.sunet.se
User script
IdentityFile /root/.ssh/id_script"
@ -59,14 +57,16 @@ class sunetdrive::infra_script (
mode => '0700',
}
sunet::scriptherder::cronjob { 'backupgssdb':
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
ensure => 'absent',
cmd => 'true',
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
sunet::scriptherder::cronjob { 'backuplookupdb':
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
ensure => 'absent',
cmd => 'true',
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],

38
manifests/jupyter_site.pp Normal file
View file

@ -0,0 +1,38 @@
#Class for SUNET-Drive-portal-Server
class sunetdrive::jupyter_site (
String $site_version = '0.0.1-1'
) {
$domain = 'jupyter.sunet.dev'
sunet::docker_compose { 'portal_compose':
content => template('sunetdrive/jupyter_site/docker-compose.erb.yaml'),
service_name => 'jupyter_site',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Web server',
}
exec { 'workaround_docker_compose_dir':
command => 'mkdir -p /opt/jupyter_site/nginx',
unless => 'test -d /opt/jupyter_site/nginx',
}
$nginx_dirs = ['acme', 'certs', 'conf', 'dhparam', 'html', 'vhost']
$nginx_dirs.each | $dir| {
file { "/opt/jupyter_site/nginx/${dir}":
ensure => directory,
owner => 'root',
group => 'root',
mode => '0751',
}
}
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
}
# For acme and redirect
sunet::misc::ufw_allow { 'http':
from => '0.0.0.0/0',
port => 80,
}
}

View file

@ -1,42 +0,0 @@
#Class for SUNET-Drive-Lookup-Server
class sunetdrive::lookup (
$bootstrap = undef,
$location = undef
) {
$environment = sunetdrive::get_environment()
# Firewall settings
$nextcloud_ip = hiera_array("${location}_app", [])
$tug_office = hiera_array('tug_office')
$dbhost = 'proxysql_proxysql_1'
$gss_jwt_key = safe_hiera('gss_jwt_key')
$mysql_user_password = safe_hiera('mysql_user_password')
$lookup_version = hiera("lookup_version_${environment}")
#Create users
user { 'www-data': ensure => present, system => true }
file { '/opt/lookup/config.php':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/lookup/config.php.erb'),
mode => '0644',
}
sunet::docker_compose { 'drive_lookup_docker_compose':
content => template('sunetdrive/lookup/docker-compose_lookup.yml.erb'),
service_name => 'lookup',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Lookup server',
}
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
}
}

View file

@ -4,10 +4,10 @@ class sunetdrive::mariadb (
$location = undef,
$tag_mariadb = undef,
$override_config = undef,
$override_compose = undef
$override_compose = undef,
) {
$quorum_id = $::fqdn
$quorum_id = $facts['networking']['fqdn']
$quorum_password = safe_hiera('quorum_password')
$db = sunetdrive::db_type { 'base_db':
bootstrap => $bootstrap,

View file

@ -1,11 +1,13 @@
# This is a asyncronous replica of the Maria DB Cluster for SUNET Drive
class sunetdrive::mariadb_backup($tag_mariadb=undef, $location=undef) {
include sunet::packages::netcat_openbsd
$dirs = [ 'datadir', 'init', 'conf', 'backups' ]
$dirs.each | $dir | {
ensure_resource('file',"/opt/mariadb_backup/${dir}", { ensure => directory, recurse => true } )
}
# Config from group.yaml
$environment = sunetdrive::get_environment()
$mariadb_version = hiera("mariadb_version_${environment}")
$config = hiera_hash($environment)
$first_db = $config['first_db']

View file

@ -4,11 +4,16 @@ class sunetdrive::multinode (
$location = undef
)
{
$myname = $facts['hostname']
include sunet::packages::yq
$myname = $facts['networking']['hostname']
$is_multinode = true;
$environment = sunetdrive::get_environment()
$lb_servers = hiera_hash($environment)['lb_servers']
$document_servers = hiera_hash($environment)['document_servers']
$nextcloud_ip = hiera_hash($environment)['app']
$db_ip = hiera_hash($environment)['db']
$admin_password = hiera('admin_password')
$cluster_admin_password = hiera('cluster_admin_password')
$twofactor_enforced_groups = []
$twofactor_enforced_excluded_groups = []
@ -22,11 +27,70 @@ class sunetdrive::multinode (
nil
}
}
$php_memory_limit_mb = 512
$nodenumber = $::fqdn[9,1]
$customers = $tempcustomers - nil
$passwords = $allnames.map | $index, $customer | {
hiera("${customer}_mysql_user_password")
}
$transaction_persistent = 1
$monitor_password = hiera('proxysql_password')
user { 'www-data': ensure => present, system => true }
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
ensure_resource('file', '/opt/nextcloud' , { ensure => directory, recurse => true } )
file { '/usr/local/bin/get_containers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/get_containers'),
mode => '0744',
}
file { '/usr/lib/nagios/plugins/check_nextcloud_mounts.py':
ensure => present,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/check_nextcloud_mounts.py'),
mode => '0744',
}
sunet::sudoer {'nagios_run_nextcloud_mounts_command':
user_name => 'nagios',
collection => 'nrpe_nextcloud_mounts_check',
command_line => '/usr/lib/nagios/plugins/check_nextcloud_mounts.py'
}
sunet::nagios::nrpe_command {'check_nextcloud_mounts':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_nextcloud_mounts.py'
}
file { '/usr/local/bin/scan_external_mounts':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/scan_external_mounts.sh'),
mode => '0744',
}
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
cmd => '/usr/local/bin/scan_external_mounts',
hour => '1',
minute => '20',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
file { '/usr/local/bin/nocc':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/application/nocc.erb'),
mode => '0740',
}
file { '/etc/sudoers.d/99-nocc':
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
mode => '0440',
owner => 'root',
group => 'root',
}
file { '/usr/local/bin/occ':
ensure => present,
force => true,
@ -43,19 +107,30 @@ class sunetdrive::multinode (
group => 'root',
}
file { '/usr/local/bin/upgrade23-25.sh':
ensure => absent,
}
file { '/usr/local/bin/get_paying_customers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/multinode/upgrade23-25.erb.sh'),
content => template('sunetdrive/multinode/get_paying_customers.erb.sh'),
mode => '0744',
}
file { '/usr/local/bin/get_containers':
file { '/usr/local/bin/get_non_paying_customers':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/multinode/get_containers'),
content => template('sunetdrive/multinode/get_non_paying_customers.erb.sh'),
mode => '0744',
}
file { '/usr/local/bin/restart_and_prune':
ensure => present,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/multinode/restart_and_prune.erb.sh'),
mode => '0744',
}
file { '/usr/local/bin/add_admin_user':
@ -74,12 +149,29 @@ class sunetdrive::multinode (
content => template('sunetdrive/multinode/prune.erb.sh'),
mode => '0744',
}
cron { 'multinode_prune':
command => '/opt/nextcloud/prune.sh',
require => File['/opt/nextcloud/prune.sh'],
user => 'root',
minute => '25',
hour => '3',
file { '/opt/proxysql/proxysql.cnf':
ensure => file,
force => true,
owner => 'root',
group => 'root',
content => template('sunetdrive/multinode/proxysql.cnf.erb'),
mode => '0644',
}
sunet::scriptherder::cronjob { 'prune_non_paying':
cmd => '/usr/local/bin/restart_and_prune',
weekday => '1-6',
hour => '2',
minute => '45',
ok_criteria => ['exit_status=0','max_age=3d'],
warn_criteria => ['exit_status=1','max_age=5d'],
}
sunet::scriptherder::cronjob { 'prune_all_paying':
cmd => '/usr/local/bin/restart_and_prune include_paying',
weekday => '0',
hour => '2',
minute => '45',
ok_criteria => ['exit_status=0','max_age=7d'],
warn_criteria => ['exit_status=1','max_age=9d'],
}
file { '/opt/nextcloud/apache.php.ini':
ensure => file,
@ -162,19 +254,36 @@ MACAddressPolicy=none'
hour => '0',
weekday => '0',
}
cron { 'multinode_cron':
command => '/opt/nextcloud/cron.sh',
require => File['/opt/nextcloud/cron.sh'],
user => 'root',
minute => '*/10',
}
# if $nodenumber == '2' {
# cron { 'add_back_bucket_for_karin_nordgren':
# command => '(/usr/local/bin/occ nextcloud-kmh-app-1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh-app-1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true',
# user => 'root',
# minute => '*/10',
# }
# }
$customers.each | $index, $customer | {
$customer_config_full = hiera_hash($customer)
$customer_config = $customer_config_full[$environment]
cron { "multinode_cron_${customer}":
command => "/opt/nextcloud/cron.sh nextcloud-${customer}-app-1",
require => File['/opt/nextcloud/cron.sh'],
user => 'root',
minute => '*/10',
}
if $environment == 'prod' {
$s3_bucket = "primary-${customer}-drive.sunet.se"
if 'primary_bucket' in $customer_config.keys() {
$s3_bucket = $customer_config['primary_bucket']
} else {
$s3_bucket = "primary-${customer}-drive.sunet.se"
}
$site_name = "${customer}.drive.sunet.se"
$trusted_proxies = ['lb1.drive.sunet.se','lb2.drive.sunet.se', 'lb3.drive.sunet.se', 'lb4.drive.sunet.se']
} else {
$s3_bucket = "primary-${customer}-${environment}.sunet.se"
if 'primary_bucket' in $customer_config.keys() {
$s3_bucket = $customer_config['primary_bucket']
} else {
$s3_bucket = "primary-${customer}-${environment}.sunet.se"
}
$site_name = "${customer}.drive.${environment}.sunet.se"
$trusted_proxies = ["lb1.drive.${environment}.sunet.se","lb2.drive.${environment}.sunet.se",
"lb3.drive.${environment}.sunet.se","lb4.drive.${environment}.sunet.se"]
@ -183,33 +292,54 @@ MACAddressPolicy=none'
$apache_error_path = "/opt/multinode/${customer}/404.html"
$config_php_path = "/opt/multinode/${customer}/config.php"
$cron_log_path ="/opt/multinode/${customer}/cron.log"
$customer_config_full = hiera_hash($customer)
$customer_config = $customer_config_full[$environment]
$dbhost = "mariadb${customer}_db_1"
$dbhost = 'proxysql_proxysql_1'
$dbname = "nextcloud_${customer}"
$dbuser = "nextcloud_${customer}"
$gs_enabled = hiera('gs_enabled')
$gs_federation = hiera('gs_federation')
$gss_master_admin = hiera_array('gss_master_admin')
$gss_master_url = hiera("gss_master_url_${environment}")
$https_port = hiera_hash('multinode_mapping')[$customer]['port']
$lookup_server = hiera("lookup_server_${environment}")
$mail_domain = hiera("mail_domain_${environment}")
$mail_from_address = hiera("mail_from_address_${environment}")
$mail_smtphost = hiera("mail_smtphost_${environment}")
$nextcloud_log_path ="/opt/multinode/${customer}/nextcloud.log"
$nextcloud_version = hiera("nextcloud_version_${environment}")
$audit_log_path ="/opt/multinode/${customer}/audit.log"
if $customer_config['nextcloud_version'] {
$nextcloud_version = $customer_config['nextcloud_version']
} else {
$nextcloud_version = hiera("nextcloud_version_${environment}")
}
$nextcloud_version_string = split($nextcloud_version, '[-]')[0]
$rclone_conf_path = "/opt/multinode/${customer}/rclone.conf"
$redis_conf_dir = "/opt/multinode/${customer}/server"
$redis_conf_path = "${redis_conf_dir}/redis.conf"
$redis_host= "redis${customer}_redis-server_1"
if $::facts['sunet_nftables_enabled'] == 'yes' {
$redis_host= "redis-${customer}-redis-server-1"
} else {
$redis_host= "redis-${customer}_redis-server_1"
}
$s3_host = $customer_config['s3_host']
$s3_usepath = hiera('s3_usepath')
$smtpuser = hiera("smtp_user_${environment}")
$trusted_domains = [$site_name, $facts['fqdn'], 'localhost']
$trusted_domains = [$site_name, $facts['networking']['fqdn'], 'localhost']
$tug_office = hiera_array('tug_office')
if $customer_config['twofactor_enforced_groups'] {
$twofactor_enforced_groups = $customer_config['twofactor_enforced_groups']
}
if $customer_config['twofactor_enforced_excluded_groups'] {
$twofactor_enforced_excluded_groups = $customer_config['twofactor_enforced_excluded_groups']
}
if $customer_config['full_backup_retention'] {
$full_backup_retention = $customer_config['full_backup_retention']
} else {
$full_backup_retention = hiera('full_backup_retention')
}
# Calculate some values
$expiration_months = max(12, $full_backup_retention)
$expiration_days_min = $expiration_months * 31
$expiration_days_max = $expiration_months * 31 + 93
# Secrets from local.eyaml
$admin_password = safe_hiera("${customer}_admin_password")
@ -222,13 +352,14 @@ MACAddressPolicy=none'
$secret = safe_hiera("${customer}_secret")
$passwordsalt= safe_hiera("${customer}_passwordsalt")
$redis_host_password = safe_hiera("${customer}_redis_host_password")
$gss_jwt_key = safe_hiera('gss_jwt_key')
$smtppassword = safe_hiera('smtp_password')
$extra_config = {
admin_password => $admin_password,
backup_password => $backup_password,
dbhost => $dbhost,
dbname => $dbname,
dbuser => $dbuser,
drive_email_template_plain_text_left => hiera($environment)['drive_email_template_plain_text_left'],
drive_email_template_text_left => hiera($environment)['drive_email_template_text_left'],
drive_email_template_url_left => hiera($environment)['drive_email_template_url_left'],
@ -241,18 +372,6 @@ MACAddressPolicy=none'
}
$config = deep_merge($customer_config, $extra_config)
ensure_resource('file', "/opt/multinode/${customer}" , { ensure => directory, recurse => true } )
$dirs = ['datadir', 'init', 'conf', 'scripts' ]
$dirs.each |$dir| {
ensure_resource('file',"${config['mariadb_dir']}/${dir}", { ensure => directory, recurse => true } )
}
ensure_resource('file',"${config['mariadb_dir']}/backups", {
ensure => directory,
owner => 'root',
group => 'script',
mode => '0750',
recurse => true
} )
# Use the other sunetdrive classes with overridden config
$db_ip = ['127.0.0.1']
$app_compose = sunet::docker_compose { "drive_${customer}_app_docker_compose":
@ -275,33 +394,12 @@ MACAddressPolicy=none'
description => "Redis cache server for ${customer}",
require => File[$redis_conf_path],
}
$mariadb_compose = sunet::docker_compose { "drive_mariadb_${customer}_compose":
content => template('sunetdrive/multinode/docker-compose_mariadb.yml.erb'),
service_name => "mariadb-${customer}",
compose_dir => "/opt/multinode/${customer}",
compose_filename => 'docker-compose.yml',
description => "Mariadb server for ${customer}",
owner => 'root',
group => 'script',
mode => '0750',
}
file { "/opt/multinode/${customer}/mariadb-${customer}/do_backup.sh":
ensure => present,
content => template('sunetdrive/mariadb_backup/do_backup.erb.sh'),
mode => '0744',
}
sunetdrive::app_type { "app_${customer}":
location => $location,
override_config => $config,
override_compose => $app_compose,
}
sunetdrive::db_type { "db_${customer}":
location => $location,
override_config => $config,
override_compose => $mariadb_compose,
}
file { $redis_conf_dir:
ensure => directory,
recurse => true,
@ -340,6 +438,13 @@ MACAddressPolicy=none'
group => 'root',
mode => '0644',
}
file { $audit_log_path:
ensure => file,
force => true,
owner => 'www-data',
group => 'root',
mode => '0644',
}
file { $rclone_conf_path:
ensure => present,
owner => 'www-data',
@ -355,10 +460,18 @@ MACAddressPolicy=none'
content => template('sunetdrive/multinode/complete_reinstall.erb.sh'),
mode => '0744',
}
# Open ports
sunet::misc::ufw_allow { "https_port_${customer}":
from => '0.0.0.0',
port => $https_port,
if $::facts['sunet_nftables_enabled'] == 'yes' {
$name = "https_port_${customer}"
ensure_resource('sunet::nftables::ufw_allow_compat', $name, {
from => ['0.0.0.0/0', '::/0'],
port => $https_port,
})
} else {
# Open ports
sunet::misc::ufw_allow { "https_port_${customer}":
from => '0.0.0.0',
port => $https_port,
}
}
}
}

80
manifests/multinode_db.pp Normal file
View file

@ -0,0 +1,80 @@
class sunetdrive::multinode_db(){
$is_multinode = true;
$environment = sunetdrive::get_environment()
$allcustomers = hiera_hash('multinode_mapping')
$customers = $allcustomers.keys
$customers.each |$customer| {
file { "/etc/mariadb/backups/${customer}":
ensure => directory,
}
file { "/etc/mariadb/init/04-nextcloud.${customer}.sql":
ensure => present,
content => "CREATE SCHEMA nextcloud_${customer};\nCREATE USER 'nextcloud_${customer}'@'%' IDENTIFIED BY '${hiera("${customer}_mysql_user_password")}';\nGRANT ALL PRIVILEGES ON nextcloud_${customer}.* TO 'nextcloud_${customer}'@'%' IDENTIFIED BY '${hiera("${customer}_mysql_user_password")}';\n",
mode => '0744',
}
}
if $facts["networking"]["fqdn"] =~ /^multinode-db1\.drive\.(test\.){1}sunet\.se$/ {
$statistics_secret = safe_hiera('statistics_secret')
notify { 'hostmessage':
message => 'We are on multinode-db1. Set up statistics environment.',
}
$custdata=$customers.reduce({}) |$memo, $value| {
$memo + {$value => lookup($value)}
}
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
$local_path = '/tmp/rclone-current-linux-amd64.deb'
exec { 'rclone_deb':
command => "/usr/bin/wget -q ${rclone_url} -O ${local_path}",
creates => $local_path,
}
package { 'rclone':
ensure => installed,
provider => dpkg,
source => $local_path,
require => Exec['rclone_deb'],
}
file { '/root/.rclone.conf':
ensure => file,
content => template('sunetdrive/mariadb_backup/rclone.conf.erb'),
owner => 'root',
group => 'root',
mode => '0600',
}
file { '/root/tasks/':
ensure => directory,
}
-> file { '/root/tasks/listusersbydep.sh':
ensure => file,
content => template('sunetdrive/mariadb/listusersdep.sh.erb'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/genusersondepartmentlists.sh':
ensure => file,
content => template('sunetdrive/mariadb/genuserdeplists.sh.erb'),
owner => 'root',
group => 'root',
mode => '0700',
}
file {'/opt/mariadb/statistics/':
ensure => directory,
}
-> file {'/opt/mariadb/statistics/custdata.json':
ensure => file,
content => template('sunetdrive/mariadb/custconfig.json.erb'),
owner => 'root',
group => 'root',
mode => '0600',
}
sunet::scriptherder::cronjob { 'genuserdeplists':
cmd => '/root/tasks/genusersondepartmentlists.sh',
hour => '2',
minute => '5',
ok_criteria => ['exit_status=0','max_age=30h'],
warn_criteria => ['exit_status=1', 'max_age=60h'],
}
}
}

79
manifests/nrpe.pp Normal file
View file

@ -0,0 +1,79 @@
# NRPE class
class sunetdrive::nrpe(
$loadw = '15,10,5',
$loadc = '30,25,20',
$procsw = 150,
$procsc = 200,
) {
require apt
class { 'sunet::nagios':
command_timeout => 600,
loadw => $loadw,
loadc => $loadc,
procsw => $procsw,
procsc => $procsc,
}
package {'nagios-plugins-contrib': ensure => latest}
if ($facts['os']['name'] == 'Ubuntu' and versioncmp($facts['os']['release']['full'], '22.04') >= 0 ){
$mem_w = '90'
$mem_c = '95'
} else {
$mem_w = '10'
$mem_c = '5'
}
$checks = ['nrpe_check_memory']
$checks.each |$check| {
ensure_resource("sunet::nagios::${check}", "nagios-nrpe-${check}")
}
sunet::nagios::nrpe_command {'check_entropy':
command_line => '/usr/lib/nagios/plugins/check_entropy -w 256'
}
sunet::nagios::nrpe_command {'check_ntp_time':
command_line => '/usr/lib/nagios/plugins/check_ntp_time -H ntp.se'
}
sunet::nagios::nrpe_command {'check_scriptherder':
command_line => '/usr/local/bin/scriptherder --mode check'
}
sunet::sudoer {'nagios_run_needrestart_command':
user_name => 'nagios',
collection => 'nrpe_needrestart_check',
command_line => '/usr/sbin/needrestart -p -l'
}
sunet::sudoer {'nagios_run_galera_command':
user_name => 'nagios',
collection => 'nrpe_galera_check',
command_line => '/usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
}
sunet::nagios::nrpe_command {'check_galera_cluster':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
}
sunet::sudoer {'nagios_run_proxysql_command':
user_name => 'nagios',
collection => 'nrpe_proxysql_check',
command_line => '/usr/lib/nagios/plugins/check_proxysql_server, /usr/lib/nagios/plugins/check_mysql_server_status'
}
sunet::nagios::nrpe_command {'check_proxysql_server':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_proxysql_server'
}
sunet::sudoer {'nagios_run_replication_command':
user_name => 'nagios',
collection => 'nrpe_replication_check',
command_line => '/usr/local/bin/check_replication'
}
sunet::nagios::nrpe_command {'check_async_replication':
command_line => '/usr/bin/sudo /usr/local/bin/check_replication'
}
sunet::sudoer {'nagios_run_backup_command':
user_name => 'nagios',
collection => 'nrpe_backup_check',
command_line => '/usr/local/bin/check_backups'
}
sunet::nagios::nrpe_command {'check_backups':
command_line => '/usr/bin/sudo /usr/local/bin/check_backups'
}
sunet::nagios::nrpe_command {'check_mysql_server_status':
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_mysql_server_status'
}
}

View file

@ -3,6 +3,8 @@ class sunetdrive::onlyoffice () {
$environment = sunetdrive::get_environment()
$extra_hosts = hiera_hash($environment)['extra_hosts']
$docker_tag = hiera_hash($environment)['collabora_tag']
$token_name = 'Sunet'
$access_token = safe_hiera('collabora_access_token')
$customers = hiera('fullnodes')
$multinode_customers = keys(hiera_hash('multinode_mapping'))
if $environment == 'prod' {
@ -10,14 +12,18 @@ class sunetdrive::onlyoffice () {
} else {
$domain = 'drive.test.sunet.se'
}
exec { 'collabora_docker_login':
command => "docker login registry.gitlab.collabora.com -u ${token_name} -p ${access_token}",
}
sunet::collabora::docs { 'sunet-onlyoffice':
dns => [ '89.32.32.32' ],
extra_hosts => $extra_hosts,
dns => [ '89.32.32.32' ],
extra_hosts => $extra_hosts,
extra_volumes => ['/opt/collabora/coolwsd.xml:/etc/coolwsd/coolwsd.xml'],
docker_tag => $docker_tag,
docker_tag => $docker_tag,
docker_image => 'registry.gitlab.collabora.com/productivity/collabora-online-nc',
}
file {'/opt/collabora/coolwsd.xml':
ensure => present,
ensure => present,
content => template('sunetdrive/document/coolwsd.xml.erb'),
}
}

48
manifests/portal.pp Normal file
View file

@ -0,0 +1,48 @@
#Class for SUNET-Drive-portal-Server
class sunetdrive::portal (
String $portal_version = '0.0.1-1'
) {
$environment = sunetdrive::get_environment()
if $environment == 'prod' {
$domain = 'drive.sunet.se'
} else {
$domain = 'drive.test.sunet.se'
}
sunet::docker_compose { 'portal_compose':
content => template('sunetdrive/portal/docker-compose.erb.yaml'),
service_name => 'portal',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Portal server',
}
exec { 'workaround_docker_compose_dir':
command => 'mkdir -p /opt/portal/nginx',
unless => 'test -d /opt/portal/nginx',
}
$nginx_dirs = ['acme', 'certs', 'conf', 'dhparam', 'html', 'vhost']
$nginx_dirs.each | $dir| {
file { "/opt/portal/nginx/${dir}":
ensure => directory,
owner => 'root',
group => 'root',
mode => '0751',
}
}
file { '/opt/portal/config.yaml':
ensure => present,
content => template('sunetdrive/portal/config.erb.yaml'),
mode => '0644',
}
sunet::misc::ufw_allow { 'https':
from => '0.0.0.0/0',
port => 443,
}
# For acme and redirect
sunet::misc::ufw_allow { 'http':
from => '0.0.0.0/0',
port => 80,
}
}

View file

@ -3,6 +3,8 @@ class sunetdrive::proxysql (
$bootstrap = undef,
$location = undef,
$proxysql_container_name = 'proxysql_proxysql_1',
$manage_config = true,
$manage_network = true,
) {
# Config from group.yaml
@ -10,6 +12,8 @@ class sunetdrive::proxysql (
$config = hiera_hash($environment)
$db_ip = $config['db']
$nextcloud_ip = $config['app']
$nextcloud_ipv6 = $config['app_v6']
$nextcloud_ip_all = $nextcloud_ip + $nextcloud_ipv6
$proxysql_ok_num = length($nextcloud_ip)
$proxysql_warn_num = $proxysql_ok_num - 1
@ -25,6 +29,9 @@ class sunetdrive::proxysql (
$mysql_user = safe_hiera('mysql_user')
$transaction_persistent = 1
if $::facts['dockerhost2'] == 'yes' and $manage_network {
$hostnet = true
}
file { '/usr/local/bin/proxysql':
ensure => file,
@ -54,10 +61,12 @@ class sunetdrive::proxysql (
require => Package['nagios-nrpe-server'],
content => template('sunetdrive/proxysql/check_mysql_server_status.erb'),
}
file { '/opt/proxysql/proxysql.cnf':
ensure => present,
content => template('sunetdrive/proxysql/proxysql.cnf.erb'),
mode => '0644',
if $manage_config {
file { '/opt/proxysql/proxysql.cnf':
ensure => present,
content => template('sunetdrive/proxysql/proxysql.cnf.erb'),
mode => '0644',
}
}
file { '/opt/proxysql/my.cnf':
@ -65,9 +74,22 @@ class sunetdrive::proxysql (
content => template('sunetdrive/proxysql/my.cnf.erb'),
mode => '0644',
}
sunet::misc::ufw_allow { 'stats_ports':
from => $tug_office,
port => 6080,
if $::facts['sunet_nftables_enabled'] == 'yes' {
sunet::nftables::docker_expose { 'stats_ports':
allow_clients => $tug_office,
port => 6080,
iif => 'ens3',
}
sunet::nftables::docker_expose { 'proxysql':
allow_clients => $nextcloud_ip_all,
port => 6032,
iif => 'ens3',
}
} else {
sunet::misc::ufw_allow { 'stats_ports':
from => $tug_office,
port => 6080,
}
}
sunet::docker_compose { 'drive_proxysql_docker_compose':

View file

@ -17,10 +17,10 @@ class sunetdrive::redis_cluster (
content => template('sunetdrive/redis_cluster/reset_cluster.erb.sh'),
mode => '0700',
}
file { '/root/.bashrc':
ensure => present,
content => template('sunetdrive/redis_cluster/bashrc.erb'),
mode => '0644',
exec { 'set_permissions_directory':
command => 'chown -R 999:root /opt/redis/node-*'
}
exec { 'set_permissions_files':
command => 'chown -R 999:999 /opt/redis/node-*/*'
}
}

View file

@ -1,12 +1,16 @@
#Class for SUNET-Drive-Lookup-Server
class sunetdrive::reva (
String $domain = '',
String $reva_domain = ''
String $domain = 'drive.test.sunet.se',
String $customer = 'sunet',
String $reva_domain = "${customer}-reva.${domain}",
String $reva_version = 'v1.26.0',
) {
$environment = sunetdrive::get_environment()
$shared_secret = safe_hiera('shared_secret')
$statistics_secret = safe_hiera('statistics_secret')
$iopsecret = safe_hiera('iopsecret')
$smtp_credentials = safe_hiera('smtp_credentials')
# Firewall settings
#Create users
@ -19,18 +23,17 @@ class sunetdrive::reva (
content => template('sunetdrive/reva/revad.toml.erb'),
mode => '0644',
}
file { '/opt/reva/rclone.conf':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/reva/rclone.conf.erb'),
mode => '0644',
}
file { '/opt/reva/data':
ensure => directory,
owner => 'www-data',
}
file { '/opt/reva/ocm-providers.json':
ensure => present,
owner => 'www-data',
group => 'root',
content => template('sunetdrive/reva/ocm-providers.json.erb'),
mode => '0644',
}
sunet::docker_compose { 'drive_reva_docker_compose':
content => template('sunetdrive/reva/docker-compose.yml.erb'),
service_name => 'reva',
@ -38,9 +41,12 @@ class sunetdrive::reva (
compose_filename => 'docker-compose.yml',
description => 'Sciencemesh reva server',
}
sunet::misc::ufw_allow { 'https_reva':
from => '0.0.0.0/0',
port => 443,
$ports = [443,19000]
$ports.each | $port|{
sunet::misc::ufw_allow { "reva_${port}":
from => '0.0.0.0/0',
port => $port,
}
}
}

View file

@ -29,16 +29,23 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
}
}
}
sunet::docker_run {'satosa':
image => $image,
imagetag => $tag,
volumes => ['/etc/satosa:/etc/satosa','/etc/dehydrated:/etc/dehydrated'],
ports => ['443:8000'],
env => ['METADATA_DIR=/etc/satosa/metadata', 'WORKER_TIMEOUT=120']
$dehydrated_status = $dehydrated_name ? {
undef => 'absent',
default => 'present'
}
file {'/opt/docker_run':
ensure => 'absent',
}
sunet::docker_compose { 'satosa':
content => template('sunetdrive/satosa/docker-compose.yml.erb'),
service_name => 'satosa',
compose_dir => '/opt/',
compose_filename => 'docker-compose.yml',
description => 'Satosa',
}
file {'/etc/satosa/proxy_conf.yaml':
content => inline_template("<%= @merged_conf.to_yaml %>\n"),
notify => Sunet::Docker_run['satosa']
notify => Sunet::Docker_compose['satosa']
}
$plugins = hiera('satosa_config')
sort(keys($plugins)).each |$n| {
@ -46,26 +53,16 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
$fn = $plugins[$n]
file { $fn:
content => inline_template("<%= @conf.to_yaml %>\n"),
notify => Sunet::Docker_run['satosa']
notify => Sunet::Docker_compose['satosa']
}
}
ufw::allow { 'satosa-allow-https':
ip => 'any',
sunet::misc::ufw_allow { 'satosa-allow-https':
from => 'any',
port => '443'
}
$dehydrated_status = $dehydrated_name ? {
undef => 'absent',
default => 'present'
}
sunet::docker_run {'alwayshttps':
sunet::misc::ufw_allow { 'satosa-allow-http':
ensure => $dehydrated_status,
image => 'docker.sunet.se/always-https',
ports => ['80:80'],
env => ['ACME_URL=http://acme-c.sunet.se']
}
ufw::allow { 'satosa-allow-http':
ensure => $dehydrated_status,
ip => 'any',
from => 'any',
port => '80'
}
if ($dehydrated_name) {
@ -77,12 +74,6 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
cert_file => '/etc/satosa/https.crt'
}
}
file { '/opt/satosa':
ensure => directory,
owner => 'root',
group => 'root',
mode => '0755',
}
-> file { '/opt/satosa/restart.sh':
ensure => file,
owner => 'root',

View file

@ -3,6 +3,8 @@ class sunetdrive::script (
$bootstrap = undef,
$location = undef
) {
include sunet::packages::python3_pip
include sunet::packages::kopia
$environment = sunetdrive::get_environment()
$customer = sunetdrive::get_customer()
$apikey_test = safe_hiera('monitor_apikey_test')
@ -17,7 +19,21 @@ class sunetdrive::script (
$backup_server = $config['backup_server']
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
$local_path = '/tmp/rclone-current-linux-amd64.deb'
$singlenodes = hiera('singlenodes')
$singlenodes = lookup('singlenodes')
$multinodes = keys(lookup('multinode_mapping'))
$extra_backup_jobs = pick($config['extra_backup_jobs'], {})
if $customer == 'mdu' {
$eppn_suffix = 'mdh.se'
$include_userbuckets = 'true'
} elsif $customer == 'uu' {
$eppn_suffix = 'users.uu.se'
$include_userbuckets = 'false'
}
else {
$eppn_suffix = "${customer}.se"
$include_userbuckets = 'false'
}
$ssh_config = "Host *.sunet.se
User script
@ -41,6 +57,17 @@ class sunetdrive::script (
}
$site_name = $config['site_name']
$user_bucket_name = $config['user_bucket_name']
if $config['user_scans'] {
$config['user_scans'].each |$job| {
sunet::scriptherder::cronjob { $job['name']:
cmd => "ssh -t -l script ${job['server']} /usr/bin/sudo /usr/local/bin/occ ${job['container']} files:scan ${job['user']}",
hour => $job['hour'],
minute => $job['minute'],
ok_criteria => ['exit_status=0','max_age=1d'],
warn_criteria => ['exit_status=1','max_age=2d'],
}
}
}
# It is a start that will get us user buckets and primary buckets
$backup_projects = $location
@ -54,11 +81,11 @@ class sunetdrive::script (
source => $local_path,
require => Exec['rclone_deb'],
}
package { 'python3.9':
package { 'fuse3':
ensure => installed,
provider => apt,
}
-> package { 'python3-pip':
package { 'python3':
ensure => installed,
provider => apt,
}
@ -66,11 +93,29 @@ class sunetdrive::script (
ensure => installed,
provider => apt,
}
package { 'xmlstarlet':
ensure => installed,
provider => apt,
}
$drive_version = '0.3.1'
if $facts['os']['distro']['id'] == 'Debian' {
$pip_cmd = 'pip3 install --break-system-packages'
} else {
$pip_cmd = 'python3 -m pip install'
}
exec { 'drive-utils':
command => "python3.9 -m pip install https://pypi.sunet.se/packages/drive-utils-${drive_version}.tar.gz",
unless => "python3.9 -m pip list | grep drive-utils | grep ${drive_version}",
require => Package['python3.9'],
command => "${pip_cmd} https://pypi.sunet.se/packages/drive-utils-${drive_version}.tar.gz",
unless => "python3 -m pip list | grep drive-utils | grep ${drive_version}",
require => Package['python3'],
}
file { '/opt/backups':
ensure => directory,
mode => '0700'
}
file { '/opt/backups/scripts':
ensure => directory,
mode => '0700'
}
file { '/root/.ssh/':
ensure => directory,
@ -139,13 +184,6 @@ class sunetdrive::script (
group => 'root',
mode => '0700',
}
file { '/root/tasks/switch_redis_master_to.sh':
ensure => file,
content => template('sunetdrive/script/switch_redis_master_to.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/restart-nextcloud-farm':
ensure => file,
content => template('sunetdrive/script/restart-nextcloud-farm.erb'),
@ -216,6 +254,12 @@ class sunetdrive::script (
group => 'root',
mode => '0700',
}
file_line { 'FIXME_remove_when_s3_migration_done_in_sto3':
ensure => 'present',
line => '37.156.195.53 s3.sto3.safedc.net',
path => '/etc/hosts',
match => '^37.156.195.53',
}
if $environment == 'test' {
sunet::scriptherder::cronjob { 'reboot-customer':
cmd => '/root/tasks/reboot-customer.sh',
@ -225,8 +269,13 @@ class sunetdrive::script (
warn_criteria => ['exit_status=1','max_age=31d'],
}
}
# Opt out of userbuckets
unless $customer in ['extern', 'gih', 'suni', 'common'] {
# Opt out of userbuckets, also customers that ended the contract
if $customer in ['extern', 'gih', 'hkr', 'suni', 'common', 'su', 'lnu'] {
sunet::scriptherder::cronjob { 'makebuckets':
ensure => absent,
cmd => 'bin/true',
}
} else {
sunet::scriptherder::cronjob { 'makebuckets':
cmd => '/root/tasks/makebuckets.sh',
minute => '*/5',
@ -260,31 +309,69 @@ class sunetdrive::script (
}
# Opt in to folder structure in projectbuckets
if $customer in ['gih', 'mdu'] {
sunet::scriptherder::cronjob { 'create_folders_in_project_buckets':
ensure => absent,
cmd => 'true',
}
file { '/root/tasks/create_folders_in_project_buckets.sh':
ensure => absent,
}
file { '/root/tasks/create_folders_in_fullnode_buckets.sh':
ensure => file,
content => template('sunetdrive/script/create_folders_in_project_buckets.erb.sh'),
content => template('sunetdrive/script/create_folders_in_fullnode_buckets.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
}
if $customer in ['gih'] {
sunet::scriptherder::cronjob { 'create_folders_in_project_buckets':
cmd => '/root/tasks/create_folders_in_project_buckets.sh',
sunet::scriptherder::cronjob { 'create_folders_in_fullnode_buckets':
cmd => '/root/tasks/create_folders_in_fullnode_buckets.sh',
minute => '*/30',
ok_criteria => ['exit_status=0','max_age=1h'],
warn_criteria => ['exit_status=1','max_age=2h'],
}
}
if $customer in ['mdu'] {
sunet::scriptherder::cronjob { 'create_folders_in_project_buckets':
cmd => '/root/tasks/create_folders_in_project_buckets.sh "Arbetsmaterial (work material)" "Bevarande (retention)" "Gallringsbart (disposal)"',
sunet::scriptherder::cronjob { 'create_folders_in_fullnode_buckets':
cmd => '/root/tasks/create_folders_in_fullnode_buckets.sh "Arbetsmaterial (work material)" "Bevarande (retention)" "Gallringsbart (disposal)"',
minute => '*/30',
ok_criteria => ['exit_status=0','max_age=1h'],
warn_criteria => ['exit_status=1','max_age=2h'],
}
}
if $customer == 'common' {
$multinode_passwords = $multinodes.map | $index, $customer | {
safe_hiera("${customer}_admin_app_password")
}
file { '/root/tasks/announce.sh':
ensure => file,
content => template('sunetdrive/script/multinodeannounce.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/backupmultinodedb.sh':
ensure => file,
content => template('sunetdrive/script/backupmultinodedb.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/opt/backups/scripts/hb.sh':
ensure => file,
content => template('sunetdrive/script/backup-hb.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
sunet::scriptherder::cronjob { 'backupmultinodedb':
cmd => '/root/tasks/backupmultinodedb.sh',
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
if $environment == 'prod' {
file { '/root/tasks/aggregate.sh':
ensure => file,
@ -300,33 +387,22 @@ class sunetdrive::script (
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
}
file { '/root/tasks/backupsinglenodedb.sh':
ensure => file,
content => template('sunetdrive/script/backupsinglenodedb.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
$singlenodes.each | $singlenode| {
$multinode = hiera_hash('multinode_mapping')[$singlenode]['server']
$multinodeserver = "${multinode}.${site_name}"
$nccontainer = "nextcloud${singlenode}_app_1"
$nccontainer = "nextcloud-${singlenode}-app-1"
sunet::scriptherder::cronjob { "backup${singlenode}db":
ensure => absent,
cmd => 'true',
}
sunet::scriptherder::cronjob { "listusers_${singlenode}":
cmd => "/root/tasks/listusers.sh ${singlenode} ${multinodeserver}",
minute => '*/5',
ok_criteria => ['exit_status=0','max_age=30m'],
warn_criteria => ['exit_status=1', 'max_age=60m'],
}
sunet::scriptherder::cronjob { "backup${singlenode}db":
cmd => "/root/tasks/backupsinglenodedb.sh ${multinodeserver} ${singlenode}",
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
if $environment == 'prod' {
sunet::scriptherder::cronjob { "statistics${singlenode}":
cmd => "/root/tasks/usage.sh ${singlenode} ${multinodeserver}",
@ -336,7 +412,7 @@ class sunetdrive::script (
warn_criteria => ['exit_status=1','max_age=3d'],
}
}
unless $singlenode in ['mau'] {
unless $singlenode in ['mau', 'uu'] {
sunet::scriptherder::cronjob { "make${singlenode}buckets":
cmd => "/root/tasks/makebuckets.sh ${multinodeserver} ${nccontainer} ${singlenode}-${environment}",
minute => '*',
@ -345,23 +421,45 @@ class sunetdrive::script (
}
}
}
$gss_backup_server = $config['gss_backup_server']
$lookup_backup_server = $config['lookup_backup_server']
sunet::scriptherder::cronjob { 'backupgssdb':
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
ensure => 'absent',
cmd => 'true',
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
sunet::scriptherder::cronjob { 'backuplookupdb':
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
ensure => 'absent',
cmd => 'true',
hour => '2',
minute => '0',
ok_criteria => ['exit_status=0','max_age=2d'],
warn_criteria => ['exit_status=1','max_age=3d'],
}
} else {
$admin_app_password = safe_hiera('admin_app_password')
file { '/root/tasks/announce.sh':
ensure => file,
content => template('sunetdrive/script/announce.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/delete_announcement_with_subject.sh':
ensure => file,
content => template('sunetdrive/script/delete_announcement_with_subject.erb.sh'),
owner => 'root',
group => 'root',
mode => '0700',
}
file { '/root/tasks/backupmultinodedb.sh':
ensure => absent,
}
sunet::scriptherder::cronjob { 'backupmultinodedb':
ensure => absent,
cmd => 'true',
}
sunet::scriptherder::cronjob { 'backupdb':
cmd => "/root/tasks/backupdb.sh ${backup_server}",
hour => '2',

View file

@ -1,10 +1,23 @@
#Class for SUNET-Drive-Script-receiver
class sunetdrive::scriptreceiver()
{
include sunet::packages::yq
sunet::system_user {'script': username => 'script', group => 'script', managehome => true, shell => '/bin/bash' }
# These tasks correspond to a ${task}.erb.sh template
$tasks = ['list_users', 'list_files_for_user', 'create_bucket', 'backup_db', 'purge_backups', 'maintenancemode', 'restart_sunet_service', 'start_sentinel', 'stop_sentinel']
$tasks = [
'list_users',
'list_files_for_user',
'create_bucket',
'backup_db',
'purge_backups',
'maintenancemode',
'restart_sunet_service',
'start_sentinel',
'stop_sentinel',
'removeswap',
'backup_multinode_db'
]
$environment = sunetdrive::get_environment()
$config = hiera_hash($environment)
@ -22,7 +35,6 @@ class sunetdrive::scriptreceiver()
owner => 'script',
group => 'script',
}
$kano_shell = ['89.46.21.246','2001:6b0:6c::1bc']
sunet::misc::ufw_allow { 'script_port':
from => $script_ipv4 + $script_ipv6 + $kano_shell,
@ -35,7 +47,9 @@ class sunetdrive::scriptreceiver()
type => 'ssh-ed25519',
key => $script_pub_key,
}
file { '/etc/sysctl.d/gofasta.conf':
ensure => 'absent',
}
file { '/opt/rotate':
ensure => directory,
mode => '0750',
@ -48,14 +62,28 @@ class sunetdrive::scriptreceiver()
owner => 'root',
group => 'root',
}
file { '/usr/local/bin/safer_reboot':
file { '/usr/local/bin/get_drive_customers':
ensure => file,
content => template('sunetdrive/scriptreceiver/safer_reboot.erb'),
mode => '0740',
content => template('sunetdrive/scriptreceiver/get_drive_customers.erb.sh'),
mode => '0744',
owner => 'root',
group => 'root',
}
file { "/etc/sudoers.d/99-safer_reboot":
file { '/usr/local/bin/safer_reboot':
ensure => file,
content => template('sunetdrive/scriptreceiver/safer_reboot.erb'),
mode => '0744',
owner => 'root',
group => 'root',
}
file { '/root/.bashrc':
ensure => file,
content => template('sunetdrive/scriptreceiver/bashrc.erb.sh'),
mode => '0644',
owner => 'root',
group => 'root',
}
file { '/etc/sudoers.d/99-safer_reboot':
ensure => file,
content => "script ALL=(root) NOPASSWD: /usr/local/bin/safer_reboot\n",
mode => '0440',
@ -90,6 +118,19 @@ class sunetdrive::scriptreceiver()
minute => '*',
hour => '*',
}
file { '/usr/local/bin/clear_scriptherder':
ensure => file,
content => template('sunetdrive/scriptreceiver/clear_scriptherder.erb.sh'),
mode => '0740',
owner => 'root',
group => 'root',
}
file { '/home/script/bin/makeswap.sh':
ensure => absent,
}
file { '/etc/sudoers.d/99-makeswap':
ensure => absent,
}
$tasks.each |String $task| {
file { "/home/script/bin/${task}.sh":
ensure => file,

View file

@ -7,10 +7,13 @@ class sunetdrive::sitemonitornaemon() {
$tls_servers_with_port = hiera_array('tls_servers_with_port')
$nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0]
$nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0]
$monitorhost = $::fqdn
$monitorhost = $facts['networking']['fqdn']
$environment = sunetdrive::get_environment()
$influx_passwd = safe_hiera('influx_passwd')
$slack_url = safe_hiera('slack_url')
$extra_host_groups = {
node3_hosts => join($facts['configured_hosts_in_cosmos']['all'].filter |$host| { $host =~ /^node3\./ }, ',')
}
file { '/usr/local/bin/slack_nagios.sh':
ensure => present,
@ -42,11 +45,20 @@ class sunetdrive::sitemonitornaemon() {
}
file { '/etc/naemon/conf.d/sunetdrive_thruk_templates.conf':
ensure => present,
owner => 'naemon',
group => 'naemon',
content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'),
mode => '0644',
}
file { '/etc/naemon/conf.d/sunetdrive_extra_hostgroups.cfg':
ensure => present,
content => template('sunetdrive/monitor/sunetdrive_extra_hostgroups.cfg.erb'),
mode => '0644',
}
nagioscfg::service {'check_scriptherder':
hostgroup_name => ['sunetdrive::nrpe'],
check_command => 'check_nrpe_1arg_to300!check_scriptherder',
description => 'Scriptherder Status',
contact_groups => ['naemon-admins'],
}
nagioscfg::service {'check_galera_cluster':
hostgroup_name => ['galera_monitor'],
check_command => 'check_nrpe_1arg!check_galera_cluster',
@ -95,6 +107,12 @@ class sunetdrive::sitemonitornaemon() {
description => 'Status of sarimner interface',
contact_groups => ['alerts']
}
nagioscfg::service {'check_nextcloud_mounts':
hostgroup_name => ['node3_hosts','sunetdrive::multinode'],
check_command => 'check_nrpe_1arg!check_nextcloud_mounts',
description => 'S3 buckets with multiple Nextcloud mounts',
contact_groups => ['alerts']
}
}

10
manifests/ubuntu_2004.pp Normal file
View file

@ -0,0 +1,10 @@
# Class for Ubuntu 20.04
class sunetdrive::ubuntu_2004() {
if $facts['os']['name'] == 'Ubuntu' and $facts['os']['distro']['release']['full'] == '20.04' {
# Hide deprecation warnings for Ubuntu 2004
file_line {'env_rubyopt':
path => '/etc/environment',
line => 'RUBYOPT=\'-W0\'',
}
}
}

View file

@ -7,7 +7,7 @@ LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so
ErrorLog ${APACHE_LOG_DIR}/error.log
CustomLog ${APACHE_LOG_DIR}/access.log combined
<Directory /var/www/nextcloud/>
<Directory /var/www/html/>
Require all granted
AllowOverride All
Options FollowSymLinks MultiViews
@ -16,6 +16,10 @@ LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so
Dav off
</IfModule>
</Directory>
<Directory /var/www/html/data>
Order allow,deny
deny from all
</Directory>
</VirtualHost>
@ -47,5 +51,9 @@ LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so
Dav off
</IfModule>
</Directory>
<Directory /var/www/html/data>
Order allow,deny
deny from all
</Directory>
</VirtualHost>
# vim: syntax=apache ts=4 sw=4 sts=4 sr noet

View file

@ -1,13 +1,14 @@
Hello and welcome to your personal space for research data storage,
Welcome to your personal space for research data storage!
It is important that research data is managed in a secure and careful manner, so that it is protected from damage and destruction. Mälardalen University offers its researchers and doctoral students a secure and stable IT service that makes it possible to store research data. Furthermore, it enables research collaboration internally and externally since researchers can easily share their data within the project or with other individual researchers at MDU in a secure way. This technical infrastructure entails a good collaboration space for researchers, a secure handling of data, the possibility for the researcher to be able to store data in such a way that data management according to the FAIR principles can be complied with at MDU.
Researchers and PhD-students at MDU have access to a free storage space of 200 GB.
To meet the general need for research data storage, MDU has purchased Sunet Drive's S3 research data storage service.  Each researcher will be offered a space of 200GB per person. To simplify administration, all spaces will be appropriated in advance in the same way, regardless of the research domain or total amount of research data.
Your storage space is in the “Your storage space” folder that appears a few minutes after opening SUNET Drive for the first time.
200 GB will not correspond to all of MDU researchers' data storage needs; therefore, the researcher will have the possibility to purchase additional data space via a digital form. The additional purchase function is done by internal billing and the purchased space is then visible at the next login in the researcher's personal space.   
The “Your storage space” folder has an ABG structure which means that there are folders for Arbetsmaterial (work material), Bevarande (retention) and Gallringsbart (disposable).
If there is a need for a specific project space for research data storage and/or sharing, this is also possible. Such a space is created by an administrator after ordering project space through the same form as when purchasing additional storage space.  The project space will then be visible to project members in the personal storage space at the next login.
It is possible to create subfolders within these main folders themselves.
Can a MDU student be included in the research project? It is possible to assign a student a time-limited access/sharing opportunity to specific folders in either the project space or the researcher's personal space. Contact the Data Access Unit (DAU) to enable such a feature.
All research data, except for military material and security-classified information, can be stored on SUNET Drive.
If your data needs to undergo an export control, please refer to the check list on the internal portal.
If you have any questions about the storage space, please contact dau@mdu.se.
A user guide and a FAQ regarding SUNET Drive is available on the internal portal. If you have any questions, please contact dau@mdu.se

View file

@ -25,7 +25,7 @@ log_errors_max_len = 1024
max_execution_time = 86400
max_file_uploads = 20
max_input_time = 86400
memory_limit = 512M
memory_limit = <%= @php_memory_limit_mb %>M
output_buffering = Off
post_max_size = 30G
precision = 14
@ -151,7 +151,7 @@ ldap.max_links = -1
[dba]
; Nothing here
[opcache]
; Nothing here
opcache.interned_strings_buffer=32
[curl]
; Nothing here
[openssl]

View file

@ -0,0 +1,42 @@
#!/usr/bin/env python3
from collections import Counter
import json
import shlex
import subprocess
import sys
exit = 0
base_message = "OK: no duplicate mounts"
long_message = ""
get_containers = subprocess.Popen('/usr/local/bin/get_containers', stdout=subprocess.PIPE).stdout.read()
containers = get_containers.decode().splitlines()
for i, container in enumerate(containers, start=1):
buckets = []
list_command = f"/usr/local/bin/nocc {container} files_external:list --all --show-password --output json"
command = shlex.split(list_command)
mount_data_byte = subprocess.Popen(command, stdout=subprocess.PIPE).stdout.read()
try:
mount_data = json.loads(mount_data_byte.decode())
except json.decoder.JSONDecodeError as err:
if i == 1 or i != len(containers):
base_message = "WARNING: invalid json"
long_message += f"\ncontainer: {container} - json decode error: {err}"
# lets do exit 0 for now
# exit = 1
continue
for items in mount_data:
buckets.append(items["configuration"]["bucket"])
bucket_count = dict(Counter(buckets))
for k, v in bucket_count.items():
if v > 1:
base_message = "WARNING: buckets with multiple mounts"
long_message += f"\ncontainer: {container} - bucket: {k} - {v}"
# lets do exit 0 for now
# exit = 1
print(base_message)
if long_message != "":
print(long_message.lstrip())
sys.exit(exit)

View file

@ -147,7 +147,9 @@ ldap.max_links = -1
[dba]
; Nothing here
[opcache]
; Nothing here
opcache.interned_strings_buffer=16
opcache.validate_timestamps=0
opcache.memory_consumption=128
[curl]
; Nothing here
[openssl]

View file

@ -3,7 +3,6 @@
config_php='/var/www/html/config/config.php'
dbhost="<%= @dbhost %>"
mysql_user_password="<%= @mysql_user_password %>"
admin_password="<%= @admin_password %>"
location="<%= @location %>"
bucket="<%= @s3_bucket %>"
@ -14,6 +13,9 @@ if [[ "${user_input}" == "IKnowWhatIAmDoing" ]]; then
echo "WARNING: This will delete everything in the database and reinstall Nextcloud."
echo "You have 10 seconds to abort by hitting CTRL/C"
sleep 10s
echo "Setting temp admin password"
apt update && apt install -y apg
admin_password="$(apg -m 40 | head -1)"
echo "Ok, proceeding."
echo "Dropping database in 3 seconds"
sleep 3s
@ -48,6 +50,11 @@ EOF
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
echo "Now delete the admin user:"
echo " occ user:delete admin"
echo "and then create a new admin user:"
echo " /usr/local/bin/add_admin_user <username> <email address>"
echo ""
echo "Please use edit-secrets to add these variables to all Nextcloud servers:"
echo "instanceid: DEC::PKCS7[${instanceid}]!"
echo "secret: DEC::PKCS7[${secret}]!"

View file

@ -1,7 +1,5 @@
<?php
$CONFIG = array (
'memcache.local' => '\\OC\\Memcache\\APCu',
'appstoreenabled' => false,
'apps_paths' =>
array (
0 =>
@ -17,145 +15,41 @@ $CONFIG = array (
'writable' => true,
),
),
'memcache.distributed' => '\\OC\\Memcache\\Redis',
'memcache.locking' => '\\OC\\Memcache\\Redis',
<% if @location == 'sunet-test' -%>
'redis.cluster' => [
'seeds' => [
'redis1.drive.test.sunet.se:6379',
'redis2.drive.test.sunet.se:6379',
'redis3.drive.test.sunet.se:6379',
'redis1.drive.test.sunet.se:6380',
'redis2.drive.test.sunet.se:6380',
'redis3.drive.test.sunet.se:6380',
'redis1.drive.test.sunet.se:6381',
'redis2.drive.test.sunet.se:6381',
'redis3.drive.test.sunet.se:6381'
],
'timeout' => 1.1,
'read_timeout' => 0.0,
'password' => '<%= @redis_cluster_password %>',
'failover_mode' => \RedisCluster::FAILOVER_ERROR
],
<% elsif @environment == 'test' && ! @is_multinode && @location != 'gss-test' -%>
'redis.cluster' => [
'seeds' => [
'redis1.<%= @customer %>.drive.test.sunet.se:6379',
'redis2.<%= @customer %>.drive.test.sunet.se:6379',
'redis3.<%= @customer %>.drive.test.sunet.se:6379',
'redis1.<%= @customer %>.drive.test.sunet.se:6380',
'redis2.<%= @customer %>.drive.test.sunet.se:6380',
'redis3.<%= @customer %>.drive.test.sunet.se:6380',
'redis1.<%= @customer %>.drive.test.sunet.se:6381',
'redis2.<%= @customer %>.drive.test.sunet.se:6381',
'redis3.<%= @customer %>.drive.test.sunet.se:6381'
],
'timeout' => 1.1,
'read_timeout' => 0.0,
'password' => '<%= @redis_cluster_password %>',
'failover_mode' => \RedisCluster::FAILOVER_ERROR
],
<% elsif @environment == 'prod' && ! @is_multinode && @location != 'gss-prod' -%>
'redis.cluster' => [
'seeds' => [
'redis1.<%= @customer %>.drive.sunet.se:6379',
'redis2.<%= @customer %>.drive.sunet.se:6379',
'redis3.<%= @customer %>.drive.sunet.se:6379',
'redis1.<%= @customer %>.drive.sunet.se:6380',
'redis2.<%= @customer %>.drive.sunet.se:6380',
'redis3.<%= @customer %>.drive.sunet.se:6380',
'redis1.<%= @customer %>.drive.sunet.se:6381',
'redis2.<%= @customer %>.drive.sunet.se:6381',
'redis3.<%= @customer %>.drive.sunet.se:6381'
],
'timeout' => 1.1,
'read_timeout' => 0.0,
'password' => '<%= @redis_cluster_password %>',
'failover_mode' => \RedisCluster::FAILOVER_ERROR
],
<% else -%>
'redis' =>
array (
'host' => '<%= @redis_host %>',
'password' => '<%= @redis_host_password %>',
'port' => 6379,
),
<% end -%>
'forcessl' => true,
'overwriteprotocol' => 'https',
'objectstore' =>
array (
'class' => '\\OC\\Files\\ObjectStore\\S3',
'arguments' =>
array (
'bucket' => '<%= @s3_bucket %>',
'key' => '<%= @s3_key %>',
'secret' => '<%= @s3_secret %>',
'region' => 'us-east-1',
'hostname' => '<%= @s3_host %>',
'port' => '',
'objectPrefix' => 'urn:oid:',
'autocreate' => false,
'use_ssl' => true,
'use_path_style' => true,
'legacy_auth' => false,
),
),
'csrf.disabled' => true,
'passwordsalt' => '<%= @passwordsalt %>',
'secret' => '<%= @secret %>',
'trusted_domains' =>
array (
<%- index = 0 -%>
<%- @trusted_domains.each do |item| -%>
<%= index %> => '<%= item %>',<% index += 1 %>
<%- end -%>
),
<%- unless @trusted_proxies.empty? -%>
'trusted_proxies' =>
array (
<%- index = 0 -%>
<%- @trusted_proxies.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
),
<%- end -%>
'appstoreenabled' => false,
'auth.bruteforce.protection.enabled' => false,
'config_is_read_only' => true,
'csrf.disabled' => true,
'datadirectory' => '/var/www/html/data',
'davstorage.request_timeout' => 86401,
<%- if @hostnet -%>
'dbhost' => '127.0.0.1',
<%- else -%>
'dbhost' => '<%= @dbhost %>',
'dbname' => 'nextcloud',
<%- end -%>
'dbname' => '<%= @dbname %>',
'dbpassword' => '<%= @mysql_user_password %>',
'dbport' => '3306',
'dbtableprefix' => 'oc_',
'dbtype' => 'mysql',
'dbuser' => 'nextcloud',
'dbuser' => '<%= @dbuser %>',
'default_phone_region' => 'SE',
'drive_email_template_text_left' => '<%= @drive_email_template_text_left %>',
'drive_email_template_plain_text_left' => '<%= @drive_email_template_plain_text_left %>',
'drive_email_template_url_left' => '<%= @drive_email_template_url_left %>',
<% if @location == 'sunet-test' -%>
'filelocking.debug' => true,
<% end -%>
'files_external_allow_create_new_local' => false,
'forcessl' => true,
'gs.enabled' => '<%= @gs_enabled %>',
'gs.federation' => '<%= @gs_federation %>',
'gs.trustedHosts' => ['*.sunet.se'],
'gss.discovery.manual.mapping.file' => '/var/www/html/mappingfile.json',
'gss.discovery.manual.mapping.parameter' => 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6',
'gss.discovery.manual.mapping.regex' => true,
'gss.jwt.key' => '<%= @gss_jwt_key %>',
'gss.master.admin' =>
array (
<%- index = 0 -%>
<%- @gss_master_admin.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
),
'gss.master.url' => '<%= @gss_master_url %>',
'gss.mode' => '<%= @gss_mode %>',
'gss.user.discovery.module' => '\\OCA\\GlobalSiteSelector\\UserDiscoveryModules\\ManualUserMapping',
'htaccess.RewriteBase' => '/',
'installed' => true,
'instanceid' => '<%= @instanceid %>',
'integrity.check.disabled' => true,
'log_type' => 'file',
'loglevel' => 0,
'lookup_server' => '<%= @lookup_server %>',
'loglevel' => 1,
'mail_domain' => '<%= @mail_domain %>',
'mail_from_address' => '<%= @mail_from_address %>',
'mail_sendmailmode' => 'smtp',
@ -168,33 +62,126 @@ $CONFIG = array (
'mail_smtpport' => '587',
'mail_smtpsecure' => 'tls',
'mail_template_class' => 'OCA\DriveEmailTemplate\EMailTemplate',
'memcache.distributed' => '\\OC\\Memcache\\Redis',
'memcache.local' => '\\OC\\Memcache\\APCu',
'memcache.locking' => '\\OC\\Memcache\\Redis',
'mysql.utf8mb4' => true,
'objectstore' =>
array (
'class' => '\\OC\\Files\\ObjectStore\\S3',
'arguments' =>
array (
'bucket' => '<%= @s3_bucket %>',
'key' => '<%= @s3_key %>',
'secret' => '<%= @s3_secret %>',
'region' => 'us-east-1',
'hostname' => '<%= @s3_host %>',
'port' => '',
'useMultipartCopy' => true,
'objectPrefix' => 'urn:oid:',
'autocreate' => false,
'use_ssl' => true,
'use_path_style' => true,
'legacy_auth' => false,
),
),
'overwrite.cli.url' => 'https://<%= @site_name %>/',
'overwritehost' => '<%= @site_name %>',
'overwrite.cli.url' => 'https://<%= @site_name %>',
'templatedirectory' => '',
'overwriteprotocol' => 'https',
'passwordsalt' => '<%= @passwordsalt %>',
<% if @environment == 'test' && ! @is_multinode -%>
'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>',
'read_timeout' => 0.0,
'seeds' => [
'redis1.<%= @customer %>.drive.test.sunet.se:6379',
'redis2.<%= @customer %>.drive.test.sunet.se:6379',
'redis3.<%= @customer %>.drive.test.sunet.se:6379',
'redis1.<%= @customer %>.drive.test.sunet.se:6380',
'redis2.<%= @customer %>.drive.test.sunet.se:6380',
'redis3.<%= @customer %>.drive.test.sunet.se:6380',
'redis1.<%= @customer %>.drive.test.sunet.se:6381',
'redis2.<%= @customer %>.drive.test.sunet.se:6381',
'redis3.<%= @customer %>.drive.test.sunet.se:6381'
],
'timeout' => 1.1
],
<% elsif @environment == 'prod' && ! @is_multinode -%>
'redis.cluster' => [
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
'password' => '<%= @redis_cluster_password %>',
'read_timeout' => 0.0,
'seeds' => [
'redis1.<%= @customer %>.drive.sunet.se:6379',
'redis2.<%= @customer %>.drive.sunet.se:6379',
'redis3.<%= @customer %>.drive.sunet.se:6379',
'redis1.<%= @customer %>.drive.sunet.se:6380',
'redis2.<%= @customer %>.drive.sunet.se:6380',
'redis3.<%= @customer %>.drive.sunet.se:6380',
'redis1.<%= @customer %>.drive.sunet.se:6381',
'redis2.<%= @customer %>.drive.sunet.se:6381',
'redis3.<%= @customer %>.drive.sunet.se:6381'
],
'timeout' => 1.1
],
<% else -%>
'redis' =>
array (
'host' => '<%= @redis_host %>',
'password' => '<%= @redis_host_password %>',
'port' => 6379,
),
<% end -%>
'secret' => '<%= @secret %>',
'skeletondirectory' => '<%= @skeletondirectory %>',
'templatedirectory' => '',
<% if @environment == 'test' -%>
'trashbin_retention_obligation' => 'auto, 30',
<% end -%>
'trusted_domains' =>
array (
<%- index = 0 -%>
<%- @trusted_domains.each do |item| -%>
<%= index %> => '<%= item %>',<% index += 1 %>
<%- end -%>
),
<%- unless @trusted_proxies.empty? -%>
'trusted_proxies' =>
array (
<%- index = 0 -%>
<%- @trusted_proxies.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
),
<%- end -%>
'twofactor_enforced' => 'true',
'twofactor_enforced_groups' =>
array (
0 => 'admin',
<%- index = 1 -%>
<%- @twofactor_enforced_groups.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
),
array (
0 => 'admin',
1 => 'forcemfa',
<%- if @twofactor_enforced_groups -%>
<%- index = 2 -%>
<%- @twofactor_enforced_groups.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
<%- end -%>
),
'twofactor_enforced_excluded_groups' =>
array (
<%- index = 0 -%>
<%- @twofactor_enforced_excluded_groups.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
),
array (
<%- if @twofactor_enforced_excluded_groups -%>
<%- index = 0 -%>
<%- @twofactor_enforced_excluded_groups.each do |item| -%>
<%= index %> => '<%= item %>',
<%- index += 1 -%>
<%- end -%>
<%- end -%>
),
'updatechecker' => false,
'version' => '<%= @nextcloud_version_string %>',
'app_install_overwrite' =>
array (
0 => 'globalsiteselector',
),
<% if @environment == 'test' -%>
'versions_retention_obligation' => '<%= @expiration_days_min %>, <%= @expiration_days_max %>',
<% end -%>
);

View file

@ -5,6 +5,10 @@ services:
app:
image: docker.sunet.se/drive/nextcloud-custom:<%= @nextcloud_version %>
restart: always
container_name: nextcloud_app_1
<%- if @hostnet -%>
network_mode: host
<%- end -%>
volumes:
- /opt/nextcloud/000-default.conf:/etc/apache2/sites-enabled/000-default.conf
- /opt/nextcloud/mpm_prefork.conf:/etc/apache2/mods-available/mpm_prefork.conf
@ -15,28 +19,30 @@ services:
- /opt/nextcloud/complete_reinstall.sh:/complete_reinstall.sh
- /opt/nextcloud/config.php:/var/www/html/config/config.php
- /opt/nextcloud/nextcloud.log:/var/www/html/data/nextcloud.log
- /opt/nextcloud/audit.log:/var/www/html/data/audit.log
- /opt/nextcloud/rclone.conf:/rclone.conf
<%- if @skeletondirectory -%>
- /opt/nextcloud/skeleton:<%= @skeletondirectory %>
<%- end -%>
<%- if @location =~ /^gss/ -%>
- /opt/nextcloud/mappingfile.json:/var/www/html/mappingfile.json
<%- end -%>
<% if @location =~ /^kau/ -%>
- /mnt:/opt/tmp/
<%- end -%>
<%- if ! @hostnet -%>
networks:
- default
- proxysql_proxysql
<%- end -%>
dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32
<%- if !@hostnet -%>
ports:
- 443:443
command: apachectl -D FOREGROUND
<%- end -%>
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
tty: true
<%- if !@hostnet -%>
networks:
proxysql_proxysql:
external: true
<%- end -%>

View file

@ -1,57 +1,58 @@
{
"/antagning.se$/": "antagning.drive.sunet.se",
"/bth.se$/": "bth.drive.sunet.se",
"/chalmers.se$/": "chalmers.drive.sunet.se",
"/du.se$/": "du.drive.sunet.se",
"/eduid.se$/": "extern.drive.sunet.se",
"/esh.se$/": "esh.drive.sunet.se",
"/fhs.se$/": "fhs.drive.sunet.se",
"/gih.se$/": "gih.drive.sunet.se",
"/gu.se$/": "gu.drive.sunet.se",
"/hb.se$/": "hb.drive.sunet.se",
"/shh.se$/": "shh.drive.sunet.se",
"/hh.se$/": "hh.drive.sunet.se",
"/hhs.se$/": "hhs.drive.sunet.se",
"/hig.se$/": "hig.drive.sunet.se",
"/his.se$/": "his.drive.sunet.se",
"/hj.se$/": "hj.drive.sunet.se",
"/hkr.se$/": "hkr.drive.sunet.se",
"/hv.se$/": "hv.drive.sunet.se",
"/irf.se$/": "irf.drive.sunet.se",
"/kb.se$/": "kb.drive.sunet.se",
"/ki.se$/": "ki.drive.sunet.se",
"/kkh.se$/": "kkh.drive.sunet.se",
"/kmh.se$/": "kmh.drive.sunet.se",
"/konstfack.se$/": "konstfack.drive.sunet.se",
"/kth.se$/": "kth.drive.sunet.se",
"/kva.se$/": "kva.drive.sunet.se",
"/liu.se$/": "liu.drive.sunet.se",
"/lnu.se$/": "lnu.drive.sunet.se",
"/ltu.se$/": "ltu.drive.sunet.se",
"/lu.se$/": "lu.drive.sunet.se",
"/mah.se$/": "mau.drive.sunet.se",
"/mau.se$/": "mau.drive.sunet.se",
"/mdh.se$/": "mdu.drive.sunet.se",
"/mdu.se$/": "mdu.drive.sunet.se",
"/miun.se$/": "miun.drive.sunet.se",
"/nordunet.se$/": "nordunet.drive.sunet.se",
"/nrm.se$/": "nrm.drive.sunet.se",
"/oru.se$/": "oru.drive.sunet.se",
"/rkh.se$/": "rkh.drive.sunet.se",
"/sics.se$/": "sics.drive.sunet.se",
"/slu.se$/": "slu.drive.sunet.se",
"/smhi.se$/": "smhi.drive.sunet.se",
"/sp.se$/": "sp.drive.sunet.se",
"/su.se$/": "su.drive.sunet.se",
"/sunet.se$/": "sunet.drive.sunet.se",
"/suni.se$/": "suni.drive.sunet.se",
"/swamid.se$/": "swamid.drive.sunet.se",
"/ths.se$/": "ths.drive.sunet.se",
"/uhr.se$/": "uhr.drive.sunet.se",
"/umu.se$/": "umu.drive.sunet.se",
"/uniarts.se$/": "uniarts.drive.sunet.se",
"/uu.se$/": "uu.drive.sunet.se",
"/vinnova.se$/": "vinnova.drive.sunet.se",
"/vr.se$/": "vr.drive.sunet.se",
"/(\\w+\\.)*antagning.se$/": "antagning.drive.sunet.se",
"/(\\w+\\.)*bth.se$/": "bth.drive.sunet.se",
"/(\\w+\\.)*chalmers.se$/": "chalmers.drive.sunet.se",
"/(\\w+\\.)*du.se$/": "du.drive.sunet.se",
"/(\\w+\\.)*eduid.se$/": "extern.drive.sunet.se",
"/(\\w+\\.)*esh.se$/": "esh.drive.sunet.se",
"/(\\w+\\.)*fhs.se$/": "fhs.drive.sunet.se",
"/(\\w+\\.)*gih.se$/": "gih.drive.sunet.se",
"/(\\w+\\.)*gu.se$/": "gu.drive.sunet.se",
"/(\\w+\\.)*hb.se$/": "hb.drive.sunet.se",
"/(\\w+\\.)*shh.se$/": "shh.drive.sunet.se",
"/(\\w+\\.)*hh.se$/": "hh.drive.sunet.se",
"/(\\w+\\.)*hhs.se$/": "hhs.drive.sunet.se",
"/(\\w+\\.)*hig.se$/": "hig.drive.sunet.se",
"/(\\w+\\.)*his.se$/": "his.drive.sunet.se",
"/(\\w+\\.)*hj.se$/": "hj.drive.sunet.se",
"/(\\w+\\.)*hkr.se$/": "hkr.drive.sunet.se",
"/(\\w+\\.)*hv.se$/": "hv.drive.sunet.se",
"/(\\w+\\.)*irf.se$/": "irf.drive.sunet.se",
"/(\\w+\\.)*kb.se$/": "kb.drive.sunet.se",
"/(\\w+\\.)*ki.se$/": "ki.drive.sunet.se",
"/(\\w+\\.)*kkh.se$/": "kkh.drive.sunet.se",
"/(\\w+\\.)*kmh.se$/": "kmh.drive.sunet.se",
"/(\\w+\\.)*konstfack.se$/": "konstfack.drive.sunet.se",
"/(\\w+\\.)*kth.se$/": "kth.drive.sunet.se",
"/(\\w+\\.)*kva.se$/": "kva.drive.sunet.se",
"/(\\w+\\.)*liu.se$/": "liu.drive.sunet.se",
"/(\\w+\\.)*lnu.se$/": "lnu.drive.sunet.se",
"/(\\w+\\.)*ltu.se$/": "ltu.drive.sunet.se",
"/(\\w+\\.)*lu.se$/": "lu.drive.sunet.se",
"/(\\w+\\.)*mah.se$/": "mau.drive.sunet.se",
"/(\\w+\\.)*mau.se$/": "mau.drive.sunet.se",
"/(\\w+\\.)*mdh.se$/": "mdu.drive.sunet.se",
"/(\\w+\\.)*mdu.se$/": "mdu.drive.sunet.se",
"/(\\w+\\.)*miun.se$/": "miun.drive.sunet.se",
"/(\\w+\\.)*nordunet.se$/": "nordunet.drive.sunet.se",
"/(\\w+\\.)*nordu.net$/": "nordunet.drive.sunet.se",
"/(\\w+\\.)*nrm.se$/": "nrm.drive.sunet.se",
"/(\\w+\\.)*oru.se$/": "oru.drive.sunet.se",
"/(\\w+\\.)*rkh.se$/": "rkh.drive.sunet.se",
"/(\\w+\\.)*sics.se$/": "sics.drive.sunet.se",
"/(\\w+\\.)*slu.se$/": "slu.drive.sunet.se",
"/(\\w+\\.)*smhi.se$/": "smhi.drive.sunet.se",
"/(\\w+\\.)*sp.se$/": "sp.drive.sunet.se",
"/(\\w+\\.)*su.se$/": "su.drive.sunet.se",
"/(\\w+\\.)*sunet.se$/": "sunet.drive.sunet.se",
"/(\\w+\\.)*suni.se$/": "suni.drive.sunet.se",
"/(\\w+\\.)*swamid.se$/": "swamid.drive.sunet.se",
"/(\\w+\\.)*ths.se$/": "ths.drive.sunet.se",
"/(\\w+\\.)*uhr.se$/": "uhr.drive.sunet.se",
"/(\\w+\\.)*umu.se$/": "umu.drive.sunet.se",
"/(\\w+\\.)*uniarts.se$/": "uniarts.drive.sunet.se",
"/(\\w+\\.)*uu.se$/": "uu.drive.sunet.se",
"/(\\w+\\.)*vinnova.se$/": "vinnova.drive.sunet.se",
"/(\\w+\\.)*vr.se$/": "vr.drive.sunet.se",
"/$/": "extern.drive.sunet.se"
}

View file

@ -1,57 +1,58 @@
{
"/antagning.se$/": "antagning.drive.test.sunet.se",
"/bth.se$/": "bth.drive.test.sunet.se",
"/chalmers.se$/": "chalmers.drive.test.sunet.se",
"/du.se$/": "du.drive.test.sunet.se",
"/eduid.se$/": "extern.drive.test.sunet.se",
"/esh.se$/": "esh.drive.test.sunet.se",
"/fhs.se$/": "fhs.drive.test.sunet.se",
"/gih.se$/": "gih.drive.test.sunet.se",
"/gu.se$/": "gu.drive.test.sunet.se",
"/hb.se$/": "hb.drive.test.sunet.se",
"/shh.se$/": "shh.drive.test.sunet.se",
"/hh.se$/": "hh.drive.test.sunet.se",
"/hhs.se$/": "hhs.drive.test.sunet.se",
"/hig.se$/": "hig.drive.test.sunet.se",
"/his.se$/": "his.drive.test.sunet.se",
"/hj.se$/": "hj.drive.test.sunet.se",
"/hkr.se$/": "hkr.drive.test.sunet.se",
"/hv.se$/": "hv.drive.test.sunet.se",
"/irf.se$/": "irf.drive.test.sunet.se",
"/kb.se$/": "kb.drive.test.sunet.se",
"/ki.se$/": "ki.drive.test.sunet.se",
"/kkh.se$/": "kkh.drive.test.sunet.se",
"/kmh.se$/": "kmh.drive.test.sunet.se",
"/konstfack.se$/": "konstfack.drive.test.sunet.se",
"/kth.se$/": "kth.drive.test.sunet.se",
"/kva.se$/": "kva.drive.test.sunet.se",
"/liu.se$/": "liu.drive.test.sunet.se",
"/lnu.se$/": "lnu.drive.test.sunet.se",
"/ltu.se$/": "ltu.drive.test.sunet.se",
"/lu.se$/": "lu.drive.test.sunet.se",
"/mah.se$/": "mau.drive.test.sunet.se",
"/mau.se$/": "mau.drive.test.sunet.se",
"/mdh.se$/": "mdu.drive.test.sunet.se",
"/mdu.se$/": "mdu.drive.test.sunet.se",
"/miun.se$/": "miun.drive.test.sunet.se",
"/nordunet.se$/": "nordunet.drive.test.sunet.se",
"/nrm.se$/": "nrm.drive.test.sunet.se",
"/oru.se$/": "oru.drive.test.sunet.se",
"/rkh.se$/": "rkh.drive.test.sunet.se",
"/sics.se$/": "sics.drive.test.sunet.se",
"/slu.se$/": "slu.drive.test.sunet.se",
"/smhi.se$/": "smhi.drive.test.sunet.se",
"/sp.se$/": "sp.drive.test.sunet.se",
"/su.se$/": "su.drive.test.sunet.se",
"/sunet.se$/": "sunet.drive.test.sunet.se",
"/suni.se$/": "suni.drive.test.sunet.se",
"/swamid.se$/": "swamid.drive.test.sunet.se",
"/ths.se$/": "ths.drive.test.sunet.se",
"/uhr.se$/": "uhr.drive.test.sunet.se",
"/umu.se$/": "umu.drive.test.sunet.se",
"/uniarts.se$/": "uniarts.drive.test.sunet.se",
"/uu.se$/": "uu.drive.test.sunet.se",
"/vinnova.se$/": "vinnova.drive.test.sunet.se",
"/vr.se$/": "vr.drive.test.sunet.se",
"/(\\w+\\.)*antagning.se$/": "antagning.drive.test.sunet.se",
"/(\\w+\\.)*bth.se$/": "bth.drive.test.sunet.se",
"/(\\w+\\.)*chalmers.se$/": "chalmers.drive.test.sunet.se",
"/(\\w+\\.)*du.se$/": "du.drive.test.sunet.se",
"/(\\w+\\.)*eduid.se$/": "extern.drive.test.sunet.se",
"/(\\w+\\.)*esh.se$/": "esh.drive.test.sunet.se",
"/(\\w+\\.)*fhs.se$/": "fhs.drive.test.sunet.se",
"/(\\w+\\.)*gih.se$/": "gih.drive.test.sunet.se",
"/(\\w+\\.)*gu.se$/": "gu.drive.test.sunet.se",
"/(\\w+\\.)*hb.se$/": "hb.drive.test.sunet.se",
"/(\\w+\\.)*shh.se$/": "shh.drive.test.sunet.se",
"/(\\w+\\.)*hh.se$/": "hh.drive.test.sunet.se",
"/(\\w+\\.)*hhs.se$/": "hhs.drive.test.sunet.se",
"/(\\w+\\.)*hig.se$/": "hig.drive.test.sunet.se",
"/(\\w+\\.)*his.se$/": "his.drive.test.sunet.se",
"/(\\w+\\.)*hj.se$/": "hj.drive.test.sunet.se",
"/(\\w+\\.)*hkr.se$/": "hkr.drive.test.sunet.se",
"/(\\w+\\.)*hv.se$/": "hv.drive.test.sunet.se",
"/(\\w+\\.)*irf.se$/": "irf.drive.test.sunet.se",
"/(\\w+\\.)*kb.se$/": "kb.drive.test.sunet.se",
"/(\\w+\\.)*ki.se$/": "ki.drive.test.sunet.se",
"/(\\w+\\.)*kkh.se$/": "kkh.drive.test.sunet.se",
"/(\\w+\\.)*kmh.se$/": "kmh.drive.test.sunet.se",
"/(\\w+\\.)*konstfack.se$/": "konstfack.drive.test.sunet.se",
"/(\\w+\\.)*kth.se$/": "kth.drive.test.sunet.se",
"/(\\w+\\.)*kva.se$/": "kva.drive.test.sunet.se",
"/(\\w+\\.)*liu.se$/": "liu.drive.test.sunet.se",
"/(\\w+\\.)*lnu.se$/": "lnu.drive.test.sunet.se",
"/(\\w+\\.)*ltu.se$/": "ltu.drive.test.sunet.se",
"/(\\w+\\.)*lu.se$/": "lu.drive.test.sunet.se",
"/(\\w+\\.)*mah.se$/": "mau.drive.test.sunet.se",
"/(\\w+\\.)*mau.se$/": "mau.drive.test.sunet.se",
"/(\\w+\\.)*mdh.se$/": "mdu.drive.test.sunet.se",
"/(\\w+\\.)*mdu.se$/": "mdu.drive.test.sunet.se",
"/(\\w+\\.)*miun.se$/": "miun.drive.test.sunet.se",
"/(\\w+\\.)*nordunet.se$/": "nordunet.drive.test.sunet.se",
"/(\\w+\\.)*nordu.net$/": "nordunet.drive.test.sunet.se",
"/(\\w+\\.)*nrm.se$/": "nrm.drive.test.sunet.se",
"/(\\w+\\.)*oru.se$/": "oru.drive.test.sunet.se",
"/(\\w+\\.)*rkh.se$/": "rkh.drive.test.sunet.se",
"/(\\w+\\.)*sics.se$/": "sics.drive.test.sunet.se",
"/(\\w+\\.)*slu.se$/": "slu.drive.test.sunet.se",
"/(\\w+\\.)*smhi.se$/": "smhi.drive.test.sunet.se",
"/(\\w+\\.)*sp.se$/": "sp.drive.test.sunet.se",
"/(\\w+\\.)*su.se$/": "su.drive.test.sunet.se",
"/(\\w+\\.)*sunet.se$/": "sunet.drive.test.sunet.se",
"/(\\w+\\.)*suni.se$/": "suni.drive.test.sunet.se",
"/(\\w+\\.)*swamid.se$/": "swamid.drive.test.sunet.se",
"/(\\w+\\.)*ths.se$/": "ths.drive.test.sunet.se",
"/(\\w+\\.)*uhr.se$/": "uhr.drive.test.sunet.se",
"/(\\w+\\.)*umu.se$/": "umu.drive.test.sunet.se",
"/(\\w+\\.)*uniarts.se$/": "uniarts.drive.test.sunet.se",
"/(\\w+\\.)*uu.se$/": "uu.drive.test.sunet.se",
"/(\\w+\\.)*vinnova.se$/": "vinnova.drive.test.sunet.se",
"/(\\w+\\.)*vr.se$/": "vr.drive.test.sunet.se",
"/$/": "extern.drive.test.sunet.se"
}

21
templates/application/nocc.erb Executable file
View file

@ -0,0 +1,21 @@
#!/bin/bash
if [[ "${1}" =~ ^nextcloud ]]; then
container=${1}
shift
else
container="nextcloud_app_1"
fi
oc_list=$(env| grep 'OC_')
if [[ "x${oc_list}" != "x" ]]; then
for row in $(echo "${oc_list}"); do
MY_VARS="${MY_VARS} -e ${row}"
done
fi
docker exec -i ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
exit 0

View file

@ -7,6 +7,7 @@ else
container="nextcloud_app_1"
fi
oc_list=$(env| grep 'OC_')
if [[ "x${oc_list}" != "x" ]]; then
for row in $(echo "${oc_list}"); do
@ -14,4 +15,19 @@ if [[ "x${oc_list}" != "x" ]]; then
done
fi
if [[ ${1} == 'config:editable' ]]; then
echo "config:editable is deprecated"
exit 0
fi
docker exec ${container} chmod u+w /var/www/html/config/
docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => true,/config_is_read_only\1 => false,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp'
docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php
docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp
docker exec -ti ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => false,/config_is_read_only\1 => true,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp'
docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php
docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp
docker exec ${container} chmod u-w /var/www/html/config/
exit 0

View file

@ -0,0 +1,79 @@
#!/bin/bash
mountid="${1}"
user="${2}"
container="${3}"
if [[ -z ${mountid} ]] || [[ -z ${user} ]]; then
echo "We need a valid mount id and user to proceed"
echo "Usage: ${0} <mountid> <user> [<container>]"
exit
fi
if [[ -z ${container} ]]; then
container="nextcloud_app_1"
fi
occ="/usr/local/bin/occ ${container}"
function get_config {
${occ} files_external:config ${mountid} ${1} | tr -d '\n\t\r'
}
echo "Gathering information, hang tight."
echo -n "."
bucket="$(get_config bucket)"
echo -n "."
hostname="$(get_config hostname)"
echo -n "."
key="$(get_config key)"
echo -n "."
region="$(get_config region)"
echo -n "."
secret="$(get_config secret)"
jsonfile="/tmp/${user}-user-bucket.json"
mount_point="${user/@/-}"
mount_point="${mount_point/./-}-user-bucket"
echo "This will remount the user bucket with mountid ${mountid} for ${user} as project bucket with mountpoint ${mount_point}."
read -r -p "Press enter to continue"
echo '
[
{
"mount_point": "\/'${mount_point}'",
"storage": "\\OCA\\Files_External\\Lib\\Storage\\AmazonS3",
"authentication_type": "amazons3::accesskey",
"configuration": {
"bucket": "'${bucket}'",
"hostname": "'${hostname}'",
"key": "'${key}'",
"legacy_auth": false,
"port": "443",
"region": "'${region}'",
"secret": "'${secret}'",
"storageClass": "",
"useMultipartCopy": true,
"use_path_style": true,
"use_ssl": true
},
"options": {
"encrypt": true,
"previews": true,
"enable_sharing": true,
"filesystem_check_changes": 0,
"encoding_compatibility": false,
"readonly": false
},
"applicable_users": [
],
"applicable_groups": ["admin"]
}
]
' > "${jsonfile}"
docker cp ${jsonfile} ${container}:/${jsonfile}
${occ} files_external:import /${jsonfile}
docker exec ${container} rm /${jsonfile}
rm ${jsonfile}
${occ} files_external:delete ${mountid}

View file

@ -0,0 +1,34 @@
#!/bin/bash
error_ids=""
# Only run if this is the only instance of this script running
# note: since this script forks to run pgrep, we need -eq 2 here
# shellcheck disable=SC2126
if [[ $(pgrep -a -f "${0}" | grep -v scriptherder | wc -l) -eq 2 ]]; then
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
# a specific host, but will differ between hosts
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
errors=''
for container in $(/usr/local/bin/get_containers); do
error_ids="${error_ids} ${container}: "
for id in $(/usr/local/bin/nocc "${container}" files_external:list --all --output json | jq '.[].mount_id' | jq .); do
/usr/local/bin/nocc "${container}" files_external:scan "${id}" | grep Error
# shellcheck disable=SC2181
if [[ ${?} -eq 0 ]]; then
errors="${errors} ${id}"
error_ids="${error_ids} ${id}"
fi
done
done
else
echo "Another instance of this script is already running, exiting"
pgrep -a -f "${0}" | grep -v scriptherder
exit 0
fi
if [[ -n "${errors}" ]]; then
echo "Errors found in the following mounts: ${error_ids}"
exit 1
fi
echo "No errors found"
exit 0

View file

@ -0,0 +1,69 @@
version: "3.7"
services:
nginx:
image: docker.io/nginxproxy/nginx-proxy:latest
container_name: nginx
networks:
- internal_network
- external_network
dns:
- 89.32.32.32
ports:
- "80:80"
- "443:443"
labels:
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: "true"
volumes:
- /opt/jupyter_site/nginx/certs:/etc/nginx/certs:ro
- /opt/jupyter_site/nginx/conf:/etc/nginx/conf.d
- /opt/jupyter_site/nginx/dhparam:/etc/nginx/dhparam
- /opt/jupyter_site/nginx/html:/usr/share/nginx/html
- /opt/jupyter_site/nginx/vhost:/etc/nginx/vhost.d
- /var/run/docker.sock:/tmp/docker.sock:ro
environment:
- ENABLE_IPV6=true
restart: unless-stopped
acme:
image: docker.io/nginxproxy/acme-companion:latest
container_name: acme
networks:
- external_network
dns:
- 89.32.32.32
volumes:
- /opt/jupyter_site/nginx/acme:/etc/acme.sh
- /opt/jupyter_site/nginx/certs:/etc/nginx/certs:rw
- /opt/jupyter_site/nginx/conf:/etc/nginx/conf.d
- /opt/jupyter_site/nginx/dhparam:/etc/nginx/dhparam
- /opt/jupyter_site/nginx/html:/usr/share/nginx/html
- /opt/jupyter_site/nginx/vhost:/etc/nginx/vhost.d:rw
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- NGINX_PROXY_CONTAINER=nginx
- DEFAULT_EMAIL=drive@sunet.se
depends_on:
- nginx
restart: unless-stopped
web:
image: docker.sunet.se/drive/jupyter-site:<%= @site_version %>
container_name: web
restart: always
networks:
- internal_network
ports:
- "127.0.0.1:3000:3000"
dns:
- 89.32.32.32
environment:
- VIRTUAL_HOST=<%= @domain %>
- VIRTUAL_PATH=/
- VIRTUAL_PORT=3000
- LETSENCRYPT_HOST=<%= @domain %>
networks:
external_network:
internal_network:
internal: true

View file

@ -1,16 +0,0 @@
<?php
$CONFIG = [
'DB' => [
'host' => "<%= @dbhost %>",
'db' => "lookup" ,
'user' => "lookup",
'pass' => "<%= @mysql_user_password %>",
],
'GLOBAL_SCALE' => true,
'AUTH_KEY' => "<%= @gss_jwt_key %>",
];

View file

@ -1,24 +0,0 @@
version: '3.2'
services:
app:
image: docker.sunet.se/drive/nextcloud-lookup:<%= @lookup_version %>
restart: always
volumes:
- /opt/lookup/config.php:/var/www/html/config/config.php
networks:
- default
- proxysql_proxysql
dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32
ports:
- 443:443
command: apache2-foreground
tty: true
networks:
proxysql_proxysql:
external: true

View file

@ -1,62 +0,0 @@
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
SET time_zone = "+00:00";
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
/*!40101 SET NAMES utf8mb4 */;
CREATE DATABASE IF NOT EXISTS `lookup` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
USE `lookup`;
CREATE USER 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
GRANT ALL PRIVILEGES ON lookup.* TO 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
DROP TABLE IF EXISTS `emailValidation`;
CREATE TABLE IF NOT EXISTS `emailValidation` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`storeId` int(11) NOT NULL,
`token` varchar(16) COLLATE utf8mb4_unicode_ci NOT NULL,
PRIMARY KEY (`id`),
KEY `token` (`token`),
KEY `storeId` (`storeId`)
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
DROP TABLE IF EXISTS `store`;
CREATE TABLE IF NOT EXISTS `store` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`userId` int(11) NOT NULL,
`k` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`v` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`valid` tinyint(1) NOT NULL DEFAULT '0',
PRIMARY KEY (`id`),
KEY `key` (`k`(191)),
KEY `value` (`v`(191)),
KEY `userId` (`userId`)
) ENGINE=InnoDB AUTO_INCREMENT=51 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
DROP TABLE IF EXISTS `users`;
CREATE TABLE IF NOT EXISTS `users` (
`id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT,
`federationId` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
PRIMARY KEY (`id`),
KEY `federationId` (`federationId`(191))
) ENGINE=InnoDB AUTO_INCREMENT=15 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
DROP TABLE IF EXISTS `toVerify`;
CREATE TABLE IF NOT EXISTS `toVerify` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`userId` int(11) NOT NULL,
`storeId` int(11) NOT NULL,
`property` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL,
`location` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
`tries` int(11) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;

View file

@ -0,0 +1,3 @@
CREATE SCHEMA roundcubemail;
CREATE USER 'roundcube'@'%' IDENTIFIED BY '<%= @roundcube_password %>';
GRANT ALL PRIVILEGES ON roundcubemail.* TO 'roundcube'@'%' IDENTIFIED BY '<%= @roundcube_password %>';

View file

@ -0,0 +1 @@
<%= @custdata.to_json %>

View file

@ -4,6 +4,7 @@ services:
db:
image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %>
container_name: mariadb_db_1
restart: always
volumes:
- /etc/mariadb/backups:/backups
@ -20,11 +21,6 @@ services:
- MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %>
- BOOTSTRAP=<%= @bootstrap %>
- FORCE_BOOTSTRAP=0
ports:
- 3306:3306
- 4444:4444
- 4567:4567
- 4568:4568
command: "--wsrep_cluster_address=gcomm://<%= @db_ip[0] %>,<%= @db_ip[1] %>,<%= @db_ip[2] %>"
command: "--wsrep_cluster_address=gcomm://<%= @db_ip.join(',') %>"
tty: true

View file

@ -0,0 +1,32 @@
#!/bin/bash
<% basedir="statistics:drive-server-coms" -%>
<% cupath="/opt/mariadb/statistics/users/" -%>
<% custdata="/opt/mariadb/statistics/custdata.json" -%>
status=0
<% @custdata.each do |cust,data| -%>
#Customer <%= cust %> has no billing departments.
<% if defined?(data[@environment]["billdomains"]) && data[@environment]["billdomains"] -%>
mkdir -p /opt/mariadb/statistics/users/<%= cust %>
chmod '0700' /opt/mariadb/statistics/users/<%= cust %>
<% data[@environment]["billdomains"].each do |dom| -%>
/root/tasks/listusersbydep.sh <%= cust %> <%= dom %> > /opt/mariadb/statistics/users/<%= cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json
if jq . <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json &>/dev/null
then
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json <%= basedir%>/<%= cust %>-<%= @environment%>/
[[ $? -eq 0 ]] || { status=1 ; echo "Error: Upload of user data failed." ; }
else
echo "Error in json data"
status=1
fi
<% end -%>
<% end -%>
<% end -%>
if [[ -f <%= custdata %> ]]
then
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= custdata %> <%= basedir%>/
fi
exit ${status}

View file

@ -0,0 +1,24 @@
#!/bin/bash
function usage () {
printf "Usage:\t%s <customer> <department email domain>\n" "${0##*/}"
printf "Example:\t%s uu int.uu.se\n" "${0##*/}"
}
[[ -z "${1}" || -z "${2}" || ! "${1}" =~ ^[a-zA-Z0-9.]{1,200}$ || ! "${2}" =~ ^[a-zA-Z0-9.]{1,200}$ ]] && usage && exit 1
depdom="${2}"
customer="${1}"
docker exec mariadb_db_1 /bin/bash -c 'mysql -p${MYSQL_ROOT_PASSWORD} -NB -e '\
$'\'select JSON_PRETTY(JSON_OBJECTAGG(uid,displayname)) from ('\
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_global_scale_users.displayname'\
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_global_scale_users'\
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_global_scale_users.uid'\
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\""'\
$' UNION'\
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_user_saml_users.displayname'\
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_user_saml_users'\
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_user_saml_users.uid'\
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\"") as users\'' \
| sed 's/\\n/\n/g'

View file

@ -0,0 +1,4 @@
#!/bin/bash
pw=$(yq -r '.services.db.environment[0]' /opt/mariadb/docker-compose.yml | awk -F '=' '{print $2}')
docker exec -ti mariadb_db_1 mysql -u root -p"${pw}" "${@}"

View file

@ -0,0 +1,3 @@
#!/bin/bash
six_hours_ago=$(date -d "6 hours ago" "+%Y-%m-%d %H:%M:%S")
docker exec mariadb_db_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "PURGE BINARY LOGS BEFORE '${six_hours_ago}'"

View file

@ -1,6 +1,6 @@
#!/bin/bash
result="$(docker exec mariadbbackup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
result="$(docker exec -u root mariadb_backup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
if [[ "${result}" == "Slave_running ON" ]]; then
echo "OK: Replica running"
exit 0

View file

@ -8,10 +8,12 @@ mkdir -p "${backup_dir}"
if [[ -z ${customer} ]]; then
buopts="--slave-info --safe-slave-backup"
dumpopts="--dump-slave"
mysql -p${MYSQL_ROOT_PASSWORD} -e "stop slave"
mysql -p"${MYSQL_ROOT_PASSWORD}" -u root -e "stop slave"
fi
mariadb-backup --backup ${buopts} -u root -p${MYSQL_ROOT_PASSWORD} --stream=xbstream | gzip >"${backup_dir}/${stream_name}"
mysqldump --all-databases --single-transaction ${dumpopts} -u root -p${MYSQL_ROOT_PASSWORD} | gzip >"${backup_dir}/${dump_name}"
# shellcheck disable=SC2086
mariadb-backup --backup ${buopts} -u root -p"${MYSQL_ROOT_PASSWORD}" --stream=xbstream | gzip >"${backup_dir}/${stream_name}"
# shellcheck disable=SC2086
mysqldump --all-databases --single-transaction ${dumpopts} -u root -p"${MYSQL_ROOT_PASSWORD}" | gzip >"${backup_dir}/${dump_name}"
if [[ -z ${customer} ]]; then
mysql -p${MYSQL_ROOT_PASSWORD} -e "start slave"
mysql -p"${MYSQL_ROOT_PASSWORD}" -u root -e "start slave"
fi

View file

@ -3,7 +3,8 @@ version: '3.2'
services:
mariadb_backup:
image: docker.sunet.se/drive/mariadb
image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %>
container_name: mariadb_backup_mariadb_backup_1
dns:
- 89.46.20.75
- 89.46.21.29

View file

@ -3,7 +3,7 @@
container=${1}
customer=${2}
if [[ -z ${container} ]]; then
container='mariadbbackup_mariadb_backup_1'
container='mariadb_backup_mariadb_backup_1'
fi
if [[ -z ${customer} ]]; then
location='<%= @location %>'

View file

@ -3,7 +3,7 @@
container=${1}
customer=${2}
if [[ -z ${container} ]]; then
container='mariadbbackup_mariadb_backup_1'
container='mariadb_backup_mariadb_backup_1'
fi
if [[ -z ${customer} ]]; then
location='<%= @location %>'
@ -13,11 +13,18 @@ dexec="docker exec ${container}"
password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}')
mysql="${dexec} mysql -p${password}"
mysql="${dexec} mysql -p${password} -u root"
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
users="${users}
$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_global_scale_users")')" == "1" ]]
then
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
fi
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_user_saml_users")')" == "1" ]]
then
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_user_saml_users')"
fi
users="$(echo "${users}" | sort | uniq)"
project="statistics"
bucket="drive-server-coms"
@ -25,7 +32,7 @@ base_dir="${project}:${bucket}"
mountpoint="/opt/statistics"
customer_dir="${mountpoint}/${location}"
mkdir -p "${customer_dir}"
rclone mkdir "${base_dir}/${location}"
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}"
echo "${users}" | awk 'BEGIN{print "{"} {print t "\""$1"\": \""$2"\""} {t=","} END{print "}"}' | jq . >"${customer_dir}/users.json"
status=0
@ -34,7 +41,7 @@ if ! jq . "${customer_dir}/users.json" &>/dev/null; then
fi
if [[ ${status} -eq 0 ]]; then
# something is wrong if we cant copy the file in 30 seconds, so we should note that
if ! timeout 30s rclone copy --webdav-headers "Host=sunet.drive.sunet.se" --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
if ! timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
status=1
fi
fi

View file

@ -1,6 +1,6 @@
[statistics]
type = webdav
url = https://37.156.196.13/remote.php/dav/files/_script/
url = https://89.45.236.246/remote.php/dav/files/_script/
vendor = nextcloud
user = _script
pass = <%= @statistics_secret %>

View file

@ -1,3 +1,3 @@
#!/bin/bash
docker exec mariadbbackup_mariadb_backup_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "show status like 'Slave_running'"
docker exec mariadb_backup_mariadb_backup_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "show status like 'Slave_running'"

View file

@ -0,0 +1,8 @@
<% @extra_host_groups.each do |group, members| -%>
# <%= group %>
define hostgroup {
hostgroup_name <%= group %>
alias <%= group %>
members <%= members %>
}
<% end -%>

View file

@ -131,39 +131,13 @@ define host {
<% end -%>
use monitor-site
}
<% if site.match('lookup') %>
define service {
notes_url https://<%= site %>
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
check_command check_https
check_interval 5
check_period 24x7
<% if @environment == 'prod' %>
contacts slack
<% else -%>
contact_groups naemon-admins
<% end -%>
host_name <%= site %>
max_check_attempts 3
notification_interval 60
notification_period 24x7
retry_interval 1
service_description HTTPS
<% if site.match('test') -%>
servicegroups test-sites
<% else -%>
servicegroups prod-sites
<% end -%>
}
<% end -%>
<% unless site.match('lookup') %>
define service {
notes_url https://<%= site %>/status.php
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
check_command check_nextcloud
check_interval 5
check_period 24x7
<% cur_cust = site.sub('/\.drive.*/','') %>
<% cur_cust = site.gsub(/\.drive.*/,'') %>
# 'check_nextcloud' command definition
<% if @environment == 'prod' and not site.match('test') and @fullnodes.include?(cur_cust) %>
contacts slack
@ -186,10 +160,12 @@ define service {
notes_url https://<%= site %>/status.php
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
<% if site.match('test') -%>
check_command check_nextcloud_version!'<%= @nextcloud_version_test %>'
<% version_variable = 'nextcloud_version_test_' << cur_cust -%>
<% else -%>
check_command check_nextcloud_version!'<%= @nextcloud_version_prod %>'
<% version_variable = 'nextcloud_version_prod_' << cur_cust -%>
<% end -%>
<% version = String(scope.lookupvar(version_variable)).gsub(/-[0-9]+/,'') -%>
check_command check_nextcloud_version!'<%= version %>'
check_interval 5
check_period 24x7
contact_groups naemon-admins
@ -206,4 +182,3 @@ define service {
<% end -%>
}
<% end -%>
<% end -%>

View file

@ -3,10 +3,12 @@
config_php='/var/www/html/config/config.php'
dbhost="<%= @dbhost %>"
mysql_user_password="<%= @mysql_user_password %>"
admin_password="<%= @admin_password %>"
location="<%= @location %>"
bucket="<%= @s3_bucket %>"
customer="<%= @customer %>"
echo "Setting temp admin password"
apt update && apt install -y apg
admin_password="$(apg -m 40 | head -1)"
/usr/bin/mysql -e "drop database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
/usr/bin/mysql -e "create database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
@ -35,6 +37,11 @@ EOF
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
echo "Now delete the admin user:"
echo " occ <container> user:delete admin"
echo "and then create a new admin user:"
echo " /usr/local/bin/add_admin_user <username> <email address> <container>"
echo ""
echo "${customer}_instanceid: DEC::PKCS7[${instanceid}]!"
echo "${customer}_secret: DEC::PKCS7[${secret}]!"
echo "${customer}_passwordsalt: DEC::PKCS7[${passwordsalt}]!"

View file

@ -5,7 +5,7 @@ no_files=30 # Keep this many files as an archive, script is run once a week
# a specific host, but will differ between hosts
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
for logfile in $(ls /opt/multinode/*/{nextcloud.log,server/server.log}); do
for logfile in $(ls /opt/multinode/*/{nextcloud.log,audit.log,server/server.log}); do
if [[ -f ${logfile}.gz.${no_files} ]]; then
rm ${logfile}.gz.${no_files}
fi

View file

@ -11,11 +11,10 @@ services:
volumes:
- <%= @redis_conf_dir %>:/data
command: redis-server /data/redis.conf --loglevel verbose
restart: always
networks:
- mariadb<%= @customer %>_<%= @customer %>
- proxysql_proxysql
restart: always
networks:
mariadb<%= @customer %>_<%= @customer %>:
proxysql_proxysql:
external: true

View file

@ -13,19 +13,21 @@ services:
- /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini
- <%= @config_php_path %>:/var/www/html/config/config.php
- <%= @nextcloud_log_path %>:/var/www/html/data/nextcloud.log
- <%= @audit_log_path %>:/var/www/html/data/audit.log
- <%= @rclone_conf_path %>:/rclone.conf
networks:
- default
- mariadb<%= @customer %>_<%= @customer %>
- proxysql_proxysql
dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32
ports:
- <%= @https_port %>:443
command: apachectl -D FOREGROUND
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
tty: true
networks:
mariadb<%= @customer %>_<%= @customer %>:
proxysql_proxysql:
external: true

View file

@ -0,0 +1,4 @@
#!/bin/bash
me=$(hostname -s)
cat /etc/hiera/data/common.yaml | yq -r '.multinode_mapping| to_entries |map({name: .key} + .value)| map(select(.server == "'"${me}"'")) |.[] |.name' | \
grep -Ev "$(cat /etc/hiera/data/common.yaml | yq -r '.singlenodes[]' | sed -e 's/^- //' -e 's/$/|/' | tr -d '\n' | sed 's/|$//')"

View file

@ -0,0 +1,5 @@
#!/bin/bash
me="$(hostname -s)"
cat /etc/hiera/data/common.yaml | yq -r '.multinode_mapping| to_entries |map({name: .key} + .value)| map(select(.server == "'"${me}"'")) |.[] |.name' |
grep -E "$(cat /etc/hiera/data/common.yaml | yq -r '.singlenodes[]' | sed -e 's/^- //' -e 's/$/|/' | tr -d '\n' | sed 's/|$//')"

View file

@ -0,0 +1,142 @@
datadir="/var/lib/proxysql"
# ProxySQL admin configuration section
admin_variables=
{
admin_credentials="admin:<%= @admin_password%>;cluster_admin:<%= @cluster_admin_password %>"
mysql_ifaces="0.0.0.0:6032"
refresh_interval=2000
web_enabled=true
web_port=6080
stats_credentials="stats:<%= @admin_password %>"
cluster_username="cluster_admin"
cluster_password="<%= @cluster_admin_password %>"
cluster_check_interval_ms=200
cluster_check_status_frequency=100
cluster_mysql_query_rules_save_to_disk=true
cluster_mysql_servers_save_to_disk=true
cluster_mysql_users_save_to_disk=true
cluster_proxysql_servers_save_to_disk=true
cluster_mysql_query_rules_diffs_before_sync=3
cluster_mysql_servers_diffs_before_sync=3
cluster_mysql_users_diffs_before_sync=3
cluster_proxysql_servers_diffs_before_sync=3
}
# MySQL/MariaDB related section
mysql_variables=
{
threads=4
max_connections=2048
default_query_delay=0
default_query_timeout=36000000
have_compress=true
poll_timeout=2000
interfaces="0.0.0.0:3306;/tmp/proxysql.sock"
default_schema="information_schema"
stacksize=1048576
server_version="10.5.5"
connect_timeout_server=10000
monitor_history=60000
monitor_connect_interval=2000
monitor_ping_interval=2000
ping_interval_server_msec=10000
ping_timeout_server=200
commands_stats=true
sessions_sort=true
monitor_username="proxysql"
monitor_password="<%= @monitor_password %>"
monitor_galera_healthcheck_interval=2000
monitor_galera_healthcheck_timeout=800
}
# Specify all ProxySQL hosts here
proxysql_servers =
(
<%- index = 0 -%>
<%- @nextcloud_ip.each do |appserver| -%>
<%- index += 1 -%>
{
hostname="<%= appserver %>"
port=6032
comment="proxysql<%= index %>"
},
<%- end -%>
)
# HG10 - single-writer
# HF30 - multi-writer
mysql_galera_hostgroups =
(
{
writer_hostgroup=10
backup_writer_hostgroup=20
reader_hostgroup=30
offline_hostgroup=9999
max_writers=1
writer_is_also_reader=1
max_transactions_behind=0
active=1
}
)
# List all MariaDB Galera nodes here
mysql_servers =
(
<%- @db_ip.each do |db| -%>
{
address="<%= db %>"
port=3306
hostgroup=10
max_connections=100
},
<%- end -%>
)
# Default query rules:
# - All writes -> HG10 (single-writer)
# - All reads -> HG30 (multi-writer)
mysql_query_rules =
(
{
rule_id=100
active=1
match_pattern="^SELECT .* FOR UPDATE"
destination_hostgroup=10
apply=1
},
{
rule_id=200
active=1
match_pattern="^SELECT .*"
destination_hostgroup=30
apply=1
},
{
rule_id=300
active=1
match_pattern=".*"
destination_hostgroup=10
apply=1
}
)
# All MySQL user that you want to pass through this instance
# - The MySQL user must be created first in the DB server and grant it to access from this ProxySQL host
mysql_users =
(
<%- index = 0 -%>
<%- @allnames.each do |customer| -%>
{
username="nextcloud_<%= customer %>"
password="<%= @passwords[index] %>"
default_hostgroup=10
transaction_persistent=<%= @transaction_persistent %>
active=1
},
<%- index += 1 -%>
<%- end -%>
)

View file

@ -0,0 +1,21 @@
#!/bin/bash
include_paying="${1}"
customers="$(/usr/local/bin/get_non_paying_customers)"
echo "Starting cleanup: $(date)"
if [[ -n ${include_paying} ]]; then
echo "Including paying customers: $(date)"
customers="${customers}
$(/usr/local/bin/get_paying_customers)"
fi
touch /etc/no-automatic-cosmos
for customer in ${customers}; do
echo "Stopping ${customer}: $(date)"
systemctl stop sunet-{redis,nextcloud}-"${customer}"
echo "Pruning docker: $(date)"
docker system prune -af --volumes
echo "Starting ${customer}: $(date)"
systemctl start sunet-{redis,nextcloud}-"${customer}"
done
rm /etc/no-automatic-cosmos
echo "Cleanup done: $(date)"

View file

@ -0,0 +1,2 @@
---
domain: "<%= @domain %>"

View file

@ -0,0 +1,71 @@
version: "3.7"
services:
nginx:
image: docker.io/nginxproxy/nginx-proxy:latest
container_name: nginx
networks:
- internal_network
- external_network
dns:
- 89.32.32.32
ports:
- "80:80"
- "443:443"
labels:
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: "true"
volumes:
- /opt/portal/nginx/certs:/etc/nginx/certs:ro
- /opt/portal/nginx/conf:/etc/nginx/conf.d
- /opt/portal/nginx/dhparam:/etc/nginx/dhparam
- /opt/portal/nginx/html:/usr/share/nginx/html
- /opt/portal/nginx/vhost:/etc/nginx/vhost.d
- /var/run/docker.sock:/tmp/docker.sock:ro
environment:
- ENABLE_IPV6=true
restart: unless-stopped
acme:
image: docker.io/nginxproxy/acme-companion:latest
container_name: acme
networks:
- external_network
dns:
- 89.32.32.32
volumes:
- /opt/portal/nginx/acme:/etc/acme.sh
- /opt/portal/nginx/certs:/etc/nginx/certs:rw
- /opt/portal/nginx/conf:/etc/nginx/conf.d
- /opt/portal/nginx/dhparam:/etc/nginx/dhparam
- /opt/portal/nginx/html:/usr/share/nginx/html
- /opt/portal/nginx/vhost:/etc/nginx/vhost.d:rw
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- NGINX_PROXY_CONTAINER=nginx
- DEFAULT_EMAIL=noc@sunet.se
depends_on:
- nginx
restart: unless-stopped
portal:
image: docker.sunet.se/drive/portal:<%= @portal_version %>
container_name: portal
restart: always
networks:
- internal_network
ports:
- "127.0.0.1:8080:8080"
dns:
- 89.32.32.32
volumes:
- /opt/portal/config.yaml:/app/config.yaml
environment:
- VIRTUAL_HOST=portal.<%= @domain %>
- VIRTUAL_PATH=/
- VIRTUAL_PORT=8080
- LETSENCRYPT_HOST=portal.<%= @domain %>
networks:
external_network:
internal_network:
internal: true

View file

@ -4,18 +4,25 @@ services:
proxysql:
image: docker.sunet.se/drive/proxysql:<%= @proxysql_version %>
container_name: proxysql_proxysql_1
<%- if @hostnet -%>
network_mode: host
<%- else -%>
ports:
- 3306:3306
- 6032:6032
- 6080:6080
<%- end -%>
environment:
INITIALIZE: 1
dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32
<%- if !@hostnet -%>
networks:
- proxysql
<%- end -%>
restart: unless-stopped
volumes:
- proxysql:/var/lib/proxysql
@ -25,6 +32,8 @@ services:
volumes:
proxysql:
<%- if !@hostnet -%>
networks:
proxysql:
driver: bridge
<%- end -%>

View file

@ -1,8 +1,14 @@
#!/bin/bash
PATH="${PATH}:/usr/local/bin"
restarted="false"
domain=$(hostname -d)
prefix="intern-db"
if [[ ${domain} =~ ^drive ]]; then
prefix="multinode-db"
fi
for index in 1 2 3; do
db_ip=$(host "intern-db${index}.$(hostname -d)" | awk '/has address/ {print $NF}')
db_ip=$(host "${prefix}${index}.${domain}" | awk '/has address/ {print $NF}')
result=$(proxysql "select * from main.mysql_servers where hostname = '${db_ip}' and hostgroup_id = 10")
if [[ -z ${result} ]]; then
query="INSERT INTO main.mysql_servers (hostgroup_id, hostname, max_connections, comment) VALUES( 10, '${db_ip}', 100, 'Inserted by script at $(date)')"

View file

@ -1,58 +0,0 @@
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoredups:ignorespace
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"

View file

@ -5,7 +5,7 @@ redis_password="<%= @redis_password %>"
for index in 1 2 3; do
cur_host="redis${index}.$(hostname -d)"
if [[ "${my_host}" == "${cur_host}" ]]; then
ip="$(hostname -I | awk '{print $1}')"
ip="$(facter networking.ip)"
else
ip="$(host "${cur_host}" | grep "has address" | awk '{print $NF}')"
fi

View file

@ -3,17 +3,20 @@ version: '3.2'
services:
reva-server:
# image: docker.sunet.se/drive/sciencemesh-reva:v2.12.0
image: michielbdejong/reva:mentix-fixes
image: docker.sunet.se/drive/sciencemesh-reva:<%= @reva_version %>
network_mode: host
security_opt:
- seccomp:unconfined
dns:
- 89.46.20.75
- 89.46.21.29
- 89.32.32.32
environment:
- CGO_ENABLED=1
volumes:
- /opt/reva/revad.toml:/etc/revad/revad.toml
- /opt/reva/data:/var/tmp/reva
- /opt/reva/ocm-providers.json:/etc/revad/ocm-providers.json
- /opt/reva/test/drive.test.sunet.se.crt:/etc/revad/tls/drive.test.sunet.se.crt
- /opt/reva/test/drive.test.sunet.se.key:/etc/revad/tls/drive.test.sunet.se.key
- /opt/reva/rclone.conf:/root/.rclone.conf
- /opt/reva/<%= @environment %>/<%= @domain %>.crt:/etc/revad/tls/<%= @domain %>.crt
- /opt/reva/<%= @environment %>/<%= @domain %>.key:/etc/revad/tls/<%= @domain %>.key
restart: always

View file

@ -1,14 +0,0 @@
[
{ "domain": "mesh.pondersource.org", "services": [
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://mesh.pondersource.org/ocm/" }, "host": "https://mesh.pondersource.org" },
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://cloud.pondersource.org/remote.php/webdav/" }, "host": "https://cloud.pondersource.org" }
] },
{ "domain": "cs3mesh-iop.apps.dcw1.paas.psnc.pl", "services": [
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://cs3mesh-iop.apps.dcw1.paas.psnc.pl/ocm/" }, "host": "https://cs3mesh-iop.apps.dcw1.paas.psnc.pl" },
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://cs3mesh-drive.apps.dcw1.paas.psnc.pl/remote.php/webdav/" }, "host": "https://cs3mesh-drive.apps.dcw1.paas.psnc.pl" }
] },
{ "domain": "<%= @reva_domain %>", "services": [
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://<%= @reva_domain%>/ocm/" }, "host": "https://<%= @reva_domain %>" },
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://<%= @domain %>/remote.php/webdav/" }, "host": "https://<%= @domain %>" }
] }
]

View file

@ -0,0 +1,6 @@
[statistics]
type = webdav
url = https://89.45.236.246/remote.php/dav/files/_script/
vendor = nextcloud
user = _script
pass = <%= @statistics_secret %>

View file

@ -1,48 +1,83 @@
[vars]
internal_gateway = "<%= @reva_domain %>"
provider_domain = "<%= @reva_domain %>"
external_reva_endpoint = "https://<%= @reva_domain %>" # append here any route if applicable
efss_sciencemesh_endpoint = "https://<%= @customer %>.<%= @domain %>/index.php/apps/sciencemesh/"
machine_api_key = "<%= @iopsecret %>"
efss_shared_secret = "<%= @shared_secret %>"
[http]
certfile = "/etc/revad/tls/<%= @domain %>.crt"
keyfile = "/etc/revad/tls/<%= @domain %>.key"
[log]
level = "debug"
[shared]
gatewaysvc = "<%= @reva_domain %>:19000"
# gatewaycertfile = "/etc/revad/tls/revanc1.crt"
# [registry]
# driver = "static"
#
# [registry.static]
# services = ["authprovider","userprovider"]
#
# [registry.static.authprovider]
# bearer = ["localhost:0123"]
# basic = ["localhost:1234"]
# publiclink = ["localhost:9876"]
[grpc]
address = "0.0.0.0:19000"
# certfile = "/etc/revad/tls/revanc1.crt"
# keyfile = "/etc/revad/tls/revanc1.key"
gatewaysvc = "{{ vars.internal_gateway }}:19000"
[grpc.services.gateway]
authregistrysvc = "<%= @reva_domain %>:19000"
appprovidersvc = "<%= @reva_domain %>:19000"
appregistry = "<%= @reva_domain %>:19000"
storageregistrysvc = "<%= @reva_domain %>:19000"
preferencessvc = "<%= @reva_domain %>:19000"
userprovidersvc = "<%= @reva_domain %>:19000"
usershareprovidersvc = "<%= @reva_domain %>:19000"
publicshareprovidersvc = "<%= @reva_domain %>:19000"
ocmcoresvc = "<%= @reva_domain %>:19000"
ocmshareprovidersvc = "<%= @reva_domain %>:19000"
ocminvitemanagersvc = "<%= @reva_domain %>:19000"
ocmproviderauthorizersvc = "<%= @reva_domain %>:19000"
commit_share_to_storage_grant = false
datagateway = "https://<%= @reva_domain %>/data"
transfer_expires = 6 # give it a moment
address = ":19000"
authregistrysvc = "{{ grpc.services.authregistry.address }}"
appregistrysvc = "{{ grpc.services.appregistry.address }}"
storageregistrysvc = "{{ grpc.services.storageregistry.address }}"
preferencessvc = "{{ grpc.services.userprovider.address }}"
userprovidersvc = "{{ grpc.services.userprovider.address }}"
usershareprovidersvc = "{{ grpc.services.usershareprovider.address }}"
ocmcoresvc = "{{ grpc.services.ocmcore.address }}"
ocmshareprovidersvc = "{{ grpc.services.ocmshareprovider.address }}"
ocminvitemanagersvc = "{{ grpc.services.ocminvitemanager.address }}"
ocmproviderauthorizersvc = "{{ grpc.services.ocmproviderauthorizer.address }}"
datagateway = "https://{{ http.services.datagateway.address }}/data"
transfer_expires = 6 # give it a moment
commit_share_to_storage_grant = true
commit_share_to_storage_ref = true
[grpc.services.appregistry]
driver = "static"
[grpc.services.appregistry.drivers.static]
mime_types = [
{"mime_type" = "text/plain", "extension" = "txt", "name" = "Text file", "description" = "Text file", "allow_creation" = true},
{"mime_type" = "text/markdown", "extension" = "md", "name" = "Markdown file", "description" = "Markdown file", "allow_creation" = true},
{"mime_type" = "application/vnd.oasis.opendocument.text", "extension" = "odt", "name" = "OpenDocument", "description" = "OpenDocument text document", "default_app" = "Collabora", "allow_creation" = true},
{"mime_type" = "application/vnd.oasis.opendocument.spreadsheet", "extension" = "ods", "name" = "OpenSpreadsheet", "description" = "OpenDocument spreadsheet document", "default_app" = "Collabora", "allow_creation" = true},
{"mime_type" = "application/vnd.oasis.opendocument.presentation", "extension" = "odp", "name" = "OpenPresentation", "description" = "OpenDocument presentation document", "default_app" = "Collabora", "allow_creation" = true},
{"mime_type" = "application/vnd.jupyter", "extension" = "ipynb", "name" = "Jupyter Notebook", "description" = "Jupyter Notebook"}
]
### AUTH PROVIDERS ###
[grpc.services.authregistry]
driver = "static"
[grpc.services.authregistry.drivers.static.rules]
basic = "<%= @reva_domain %>:19000"
basic = "{{ grpc.services.authprovider[0].address }}"
machine = "{{ grpc.services.authprovider[1].address }}"
ocmshares = "{{ grpc.services.authprovider[2].address }}"
[[grpc.services.authprovider]]
auth_manager = "nextcloud"
[grpc.services.authprovider.auth_managers.nextcloud]
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
mock_http = false
[[grpc.services.authprovider]]
auth_manager = "machine"
[grpc.services.authprovider.auth_managers.machine]
api_key = "{{ vars.machine_api_key }}"
gateway_addr = "{{ vars.internal_gateway }}:19000"
[[grpc.services.authprovider]]
auth_manager = "ocmshares"
### STORAGE PROVIDERS ###
[grpc.services.storageregistry]
driver = "static"
@ -51,8 +86,36 @@ driver = "static"
home_provider = "/home"
[grpc.services.storageregistry.drivers.static.rules]
"/home" = {"address" = "<%= @reva_domain %>:19000"}
"123e4567-e89b-12d3-a456-426655440000" = {"address" = "<%= @reva_domain %>:19000"}
"/home" = {"address" = "{{ grpc.services.storageprovider[0].address }}"}
"nextcloud" = {"address" = "{{ grpc.services.storageprovider[0].address }}"}
"/ocm" = {"address" = "{{ grpc.services.storageprovider[1].address }}"}
"ocm" = {"address" = "{{ grpc.services.storageprovider[1].address }}"}
[[grpc.services.storageprovider]]
driver = "nextcloud"
mount_id = "nextcloud"
expose_data_server = true
enable_home_creation = false
data_server_url = "https://localhost:{{ http.services.dataprovider[0].address.port }}/data"
[grpc.services.storageprovider.drivers.nextcloud]
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
mock_http = false
[[grpc.services.storageprovider]]
driver = "ocmoutcoming"
mount_id = "ocm"
mount_path = "/ocm"
expose_data_server = true
enable_home_creation = false
data_server_url = "{{ vars.external_reva_endpoint }}/data"
[grpc.services.storageprovider.drivers.ocmoutcoming]
machine_secret = "{{ vars.machine_api_key }}"
### OTHER PROVIDERS ###
[grpc.services.usershareprovider]
driver = "memory"
@ -61,121 +124,148 @@ driver = "memory"
driver = "nextcloud"
[grpc.services.ocmcore.drivers.nextcloud]
webdav_host = "https://<%= @domain %>/"
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
host = "{{ vars.external_reva_endpoint }}"
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
mock_http = false
[grpc.services.ocminvitemanager]
# TODO the driver should be "nextcloud" once it is implemented
driver = "json"
provider_domain = "{{ vars.provider_domain }}"
[grpc.services.ocmshareprovider]
driver = "nextcloud"
provider_domain = "{{ vars.provider_domain }}"
webdav_endpoint = "{{ vars.external_reva_endpoint }}"
webdav_prefix = "{{ vars.external_reva_endpoint }}/remote.php/dav/files"
# TODO the following should become {{ vars.external_reva_endpoint }}/external/{{.Token}}/...
webapp_template = "https://your.revad.org/external/sciencemesh/{{.Token}}/{relative-path-to-shared-resource}"
[grpc.services.ocmshareprovider.drivers.nextcloud]
webdav_host = "https://<%= @domain %>/"
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
webdav_host = "{{ vars.external_reva_endpoint }}"
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
mock_http = false
mount_id = "nextcloud"
[grpc.services.ocmproviderauthorizer]
#driver = "mentix"
driver = "open"
driver = "mentix"
[grpc.services.ocmproviderauthorizer.drivers.mentix]
url = "https://iop.sciencemesh.uni-muenster.de/iop/mentix/cs3"
verify_request_hostname = false
verify_request_hostname = true
insecure = false
timeout = 10
refresh = 900
[grpc.services.publicshareprovider]
driver = "memory"
[grpc.services.appprovider]
driver = "demo"
iopsecret = "<%= @iopsecret %>"
wopiurl = "http://0.0.0.0:8880/"
wopibridgeurl = "http://localhost:8000/wopib"
[grpc.services.appregistry]
driver = "static"
[grpc.services.appregistry.static.rules]
"text/plain" = "<%= @reva_domain %>:19000"
"text/markdown" = "<%= @reva_domain %>:19000"
"application/compressed-markdown" = "<%= @reva_domain %>:19000"
"application/vnd.oasis.opendocument.text" = "<%= @reva_domain %>:19000"
"application/vnd.oasis.opendocument.spreadsheet" = "<%= @reva_domain %>:19000"
"application/vnd.oasis.opendocument.presentation" = "<%= @reva_domain %>:19000"
[grpc.services.storageprovider]
driver = "nextcloud"
expose_data_server = true
data_server_url = "https://<%= @reva_domain %>/data"
enable_home_creation = true
[grpc.services.storageprovider.drivers.nextcloud]
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
mock_http = false
[grpc.services.authprovider]
auth_manager = "nextcloud"
[grpc.services.authprovider.auth_managers.nextcloud]
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
mock_http = false
[grpc.services.ocmproviderauthorizer.drivers.json]
# this is used by the docker-based test deployment, not in production
providers = "providers.testnet.json"
verify_request_hostname = true
[grpc.services.userprovider]
driver = "nextcloud"
[grpc.services.userprovider.drivers.nextcloud]
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
mock_http = false
[http]
enabled_services = ["ocmd"]
enabled_middlewares = ["providerauthorizer", "cors"]
address = "0.0.0.0:443"
certfile = "/etc/revad/tls/drive.test.sunet.se.crt"
keyfile = "/etc/revad/tls/drive.test.sunet.se.key"
[grpc.services.datatx]
txdriver = "rclone"
storagedriver = "json"
remove_transfer_on_cancel = true
[http.services.dataprovider]
[grpc.services.datatx.txdrivers.rclone]
# rclone endpoint
endpoint = "http://rclone.docker"
# basic auth is used
auth_user = "rcloneuser"
auth_pass = "eilohtho9oTahsuongeeTh7reedahPo1Ohwi3aek"
auth_header = "x-access-token"
job_status_check_interval = 2000
job_timeout = 120000
storagedriver = "json"
remove_transfer_job_on_cancel = true
[grpc.services.datatx.storagedrivers.json]
file = ""
[grpc.services.datatx.txdrivers.rclone.storagedrivers.json]
file = ""
### HTTP ENDPOINTS ###
[http.services.appprovider]
address = ":443"
insecure = true
[http.services.datagateway]
address = ":443"
[[http.services.dataprovider]]
driver = "nextcloud"
[http.services.prometheus]
[http.services.sysinfo]
[http.services.dataprovider.drivers.nextcloud]
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
shared_secret = "<%= @shared_secret %>"
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
shared_secret = "{{ vars.efss_shared_secret }}"
mock_http = false
[[http.services.dataprovider]]
address = ":443"
driver = "ocmoutcoming"
[http.services.dataprovider.drivers.ocmoutcoming]
machine_secret = "{{ vars.machine_api_key }}"
[http.services.sciencemesh]
address = ":443"
provider_domain = "{{ vars.provider_domain }}"
mesh_directory_url = "https://sciencemesh.cesnet.cz/iop/meshdir"
ocm_mount_point = "/sciencemesh"
[http.services.sciencemesh.smtp_credentials]
disable_auth = false
sender_mail = "noreply@<%= @domain %>"
sender_login = "noreply@<%= @domain %>"
sender_password = "<%= @smtp_credentials %>"
smtp_server = "smtp.sunet.se"
smtp_port = 587
[http.services.ocmprovider]
address = ":443"
ocm_prefix = "ocm"
provider = "Reva for ownCloud/Nextcloud"
endpoint = "{{ vars.external_reva_endpoint }}"
enable_webapp = true
enable_datatx = true
[http.services.ocmd]
address = ":443"
prefix = "ocm"
[http.services.ocmd.config]
host = "<%= @reva_domain %>"
provider = "test-revanc1"
[http.middlewares.providerauthorizer]
#driver = "mentix"
driver = "open"
[http.middlewares.providerauthorizer.drivers.mentix]
url = "https://iop.sciencemesh.uni-muenster.de/iop/mentix/cs3"
verify_request_hostname = false
insecure = false
timeout = 10
refresh = 900
host = "{{ vars.provider_domain }}"
[http.services.ocs]
address = ":443"
prefix = "ocs"
[http.services.ocdav]
prefix = "ocdav"
address = ":443"
[http.services.prometheus]
address = ":443"
[http.services.metrics]
address = ":443"
metrics_data_driver_type = "json"
metrics_data_location = "/etc/revad/metrics.json"
metrics_record_interval = 5000
[http.services.sysinfo]
[http.middlewares.cors]
[http.middlewares.log]

View file

@ -0,0 +1,23 @@
services:
satosa:
environment:
- "METADATA_DIR=/etc/satosa/metadata"
- "WORKER_TIMEOUT=120"
dns:
- "89.32.32.32"
image: "<%= @image %><% if @tag %>:<%= @tag %><% end %>"
pull_policy: "always"
ports:
- "443:8000"
volumes:
- "/etc/satosa:/etc/satosa"
- "/etc/dehydrated:/etc/dehydrated"
alwayshttps:
environment:
- "ACME_URL=http://acme-c.sunet.se"
dns:
- "89.32.32.32"
image: "docker.sunet.se/always-https"
pull_policy: "always"
ports:
- "80:80"

View file

@ -27,21 +27,21 @@ Customer;Total GB;Users;Product"
for customer in $(${yq} -r '.fullnodes | .[]' ${commonyaml}); do
product=1 # Prisplan 1
csv="${csv}
$(rclone cat "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
$(rclone cat --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
grep -E -v '^DATE|^Customer' |
sed 's/$/;1/')"
done
for customer in $(${yq} -r '.singlenodes | .[]' ${commonyaml}); do
product=2 # Prisplan 2
csv="${csv}
$(rclone cat "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
$(rclone cat --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
grep -E -v '^DATE|^Customer' |
sed 's/$/;'${product}'/')"
done
echo "${csv}" >"${aggregatefile}"
rclone copy "${aggregatefile}" "${aggregatedir}/"
rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${aggregatefile}" "${aggregatedir}/"
mv "${aggregatefile}" "latest.csv"
rclone move "latest.csv" "${latestdir}/"
rclone move -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "latest.csv" "${latestdir}/"
cd "${olddir}" || (echo "Could not switch back to old dir" && exit 1)
rmdir "${tempdir}"

View file

@ -0,0 +1,75 @@
#!/bin/bash
VALID_ARGS=$(getopt -o cdghi:m:s: --long create,delete,get,help,id:,message:,subject: -- "$@")
# shellcheck disable=SC2181
if [[ ${?} -ne 0 ]]; then
exit 1;
fi
usage () {
echo "${0}: -c|--create -m|--message <'Your announcement goes here'> -s|--subject <Your subject goes here>"
echo "${0}: -d|--delete -i|--id <announcement_id>"
echo "${0}: -g|--get"
exit 1
}
eval set -- "${VALID_ARGS}"
# shellcheck disable=SC2078
while [ : ]; do
case "$1" in
-c | --create)
method='POST'
shift
;;
-d | --delete)
method='DELETE'
shift
;;
-g | --get)
method='GET'
shift
;;
-h | --help)
usage
;;
-i | --id)
argument="${2}"
shift 2
;;
-m | --message)
message="${2}"
shift 2
;;
-s | --subject)
subject="${2}"
shift 2
;;
*)
break
;;
esac
done
if [[ ${method} == 'DELETE' ]] && [[ -z ${argument} ]]; then
usage
fi
if [[ ${method} == 'POST' ]]; then
if [[ -z ${message} ]] || [[ -z ${subject} ]]; then
usage
fi
argument='{"subject":"'${subject}'","message":"'${message}'", "plainMessage":"'${message}'", "groups": [], "userId": "admin", "activities": false, "notifications": true, "emails": false, "comments": false }'
fi
curl_cmd(){
local method="${1}"
if [[ ${method} == 'POST' ]] && [[ -n ${2} ]]; then
local payload=(-d "${2}" -H "Content-Type: application/json")
elif [[ ${method} == 'DELETE' ]] && [[ -n ${2} ]]; then
local id="/${2}"
fi
local admin_app_password="<%= @admin_app_password %>"
domain="$(hostname -d)"
curl -s -X "${method}" -u "admin:${admin_app_password}" "${payload[@]}" -H 'OCS-APIRequest: true' "https://${domain}/ocs/v2.php/apps/announcementcenter/api/v1/announcements${id}"
}
curl_cmd "${method}" "${argument}"

View file

@ -6,63 +6,78 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
number_of_full_to_keep='<%= @full_backup_retention %>'
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
customer="<%= @customer %>"
environment="<%= @environment %>"
declare -A extra_backup_jobs
#<% if @extra_backup_jobs.any? %>
#<% @extra_backup_jobs.each do |client, job| %>
extra_backup_jobs["<%= client %>"]="<%= job %>"
#<% end %>
#<% end %>
#<% if @location.start_with?('common') %>
declare -a sixmonths=('mau')
if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
number_of_full_to_keep=6
number_of_full_to_keep=6
fi
declare -a projects
#<% @singlenodes.each do |singlenode| %>
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %>")
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %> <%= singlenode %>")
#<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %>
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= singlenode %>")
#<% end %>
#<% end %>
#<% else %>
declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
declare -a projects=("<%= @primary_project %> <%= @mirror_project %> ${customer}")
#<% @assigned_projects.each do |project| %>
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> ${customer}")
#<% end %>
#<% end %>
if [[ ${customer} == 'common' ]]; then
projects+=("<%= @location %> <%= @location %>-mirror")
projects+=("<%= @location %> <%= @location %>-mirror ${customer}")
fi
function do_backup {
local project="${1}"
local mirror="${2}"
local bucket="${3}"
local mirrorbucket="${bucket}-mirror"
local mountpoint="/opt/backupmounts/${bucket}"
mkdir -p ${mountpoint}
rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
rclone mkdir ${mirror}:${mirrorbucket}
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
umount ${mountpoint}
rmdir ${mountpoint}
# Clean up
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
--force rclone://${mirror}:/${mirrorbucket}
local project="${1}"
local mirror="${2}"
local bucket="${3}"
local customer="${4}"
local mirrorbucket="${bucket}-mirror"
local mountpoint="/opt/backupmounts/${bucket}"
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
local oktorun=$? # 1 == this bucket has no other bakup process in progress
if [[ ${oktorun} -ne 0 ]]; then
mkdir -p ${mountpoint}
rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
rclone mkdir ${mirror}:${mirrorbucket}
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
if [[ "${extra_backup_jobs[${customer}]:+found}" == "found" ]] && [[ -f "${extra_backup_jobs[${customer}]}" ]]; then
${extra_backup_jobs[${customer}]} ${project} ${mirror} ${bucket} ${customer} ${environment}
fi
umount ${mountpoint}
rmdir ${mountpoint}
# Clean up
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${mirrorbucket}
fi
}
for entry in "${projects[@]}"; do
project=$(echo ${entry} | awk '{print $1}')
mirror=$(echo ${entry} | awk '{print $2}')
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
project=$(echo ${entry} | awk '{print $1}')
mirror=$(echo ${entry} | awk '{print $2}')
customer=$(echo ${entry} | awk '{print $3}')
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
if [[ ${?} -eq 124 ]]; then
size=$((${fork_limit} * 1000000001))
else
size=$(echo ${maybesize} | jq -r '.bytes' )
fi
# If bucket is above 50 GB we fork
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
do_backup ${project} ${mirror} ${bucket} &
else
do_backup ${project} ${mirror} ${bucket}
fi
done
# If bucket is above 50 GB we fork
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
do_backup ${project} ${mirror} ${bucket} ${customer} &
else
do_backup ${project} ${mirror} ${bucket} ${customer}
fi
done
done

View file

@ -1,86 +0,0 @@
#!/bin/bash
# Backup all buckets
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
# a specific host, but will differ between hosts
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
number_of_full_to_keep="<%= @full_backup_retention %>"
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
split_limit=1000 #in GB, if bigger than this number, we fork backup of each directory to it's own process
declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
#<% @assigned_projects.each do |project| %>
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
#<% end %>
function do_huge_backup {
local project="${1}"
local mirror="${2}"
local bucket="${3}"
declare -a directories
declare -a empty
for dir in $(rclone lsd ${project}:${bucket} | awk '{print $NF}'); do
directories+=("${dir}")
mountpoint="/opt/backupmounts/${bucket}-${dir}"
do_backup ${project} ${mirror} ${bucket} ${mountpoint} ${dir} ${empty} &
done
mountpoint="/opt/backupmounts/${bucket}"
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${directories[@]} &
}
function do_backup {
local project="${1}"
shift
local mirror="${1}"
shift
local bucket="${1}"
shift
local mountpoint="${1}"
shift
local dire="${1}"
shift
declare -a exclude
exclude=( "${@}" )
suffix=""
opts=""
if [[ "${dire}" != "none" ]]; then
suffix="/${dire}"
fi
if ((${#exclude[@]})); then
for dir in "${exclude[@]}"; do
opts="${opts} --exclude /${dir}"
done
fi
local mirrorbucket="${bucket}-mirror"
mkdir -p ${mountpoint}
rclone mount ${project}:${bucket}${suffix} ${mountpoint}/ --daemon --allow-other
rclone mkdir ${mirror}:${mirrorbucket}${suffix}
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt ${opts} \
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}${suffix}
umount ${mountpoint}
rmdir ${mountpoint}
# Clean up
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
--force rclone://${mirror}:/${mirrorbucket}${suffix}
}
for entry in "${projects[@]}"; do
project=$(echo ${entry} | awk '{print $1}')
mirror=$(echo ${entry} | awk '{print $2}')
declare -a empty
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
size=$(rclone size --json ${project}:${bucket} | jq -r '.bytes')
mirrorbucket="${bucket}-mirror"
mountpoint="/opt/backupmounts/${bucket}"
# If bucket is above ${split_limit} we fork and do backup per directory
if [[ ${size} -gt $((${split_limit} * 1000000000)) ]]; then
do_huge_backup ${project} ${mirror} ${bucket} &
# If bucket is above ${fork_limit} we fork and do backup for bucket
elif [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${empty} &
else
# If bucket is below ${fork_limit} we do not fork and do backup for bucket
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${empty}
fi
done
done

View file

@ -0,0 +1,8 @@
#!/bin/bash
project="${1}"
mirror="${2}"
bucket="${3}"
customer="${4}"
environment="${5}"
rsync -e "ssh -i ${HOME}/.ssh/id_script" -avz "/opt/backupmounts/${bucket}" "sd-${environment}@sd-${environment}-backup.hb.se:~/sd-${environment}/${bucket}"

View file

@ -6,11 +6,14 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
number_of_full_to_keep="<%= @full_backup_retention %>"
backup="${1}"
if [[ -z ${backup} ]]; then
backup="backup1.$(hostname -d)"
fi
if ! [[ ${backup} =~ backup1.*sunet.se$ ]]; then
echo "Usage: ${0} <fqdn of backup server>"
echo "Example: ${0} backup1.sunet.drive.sunet.se"
fi
backup_dir="/opt/backups"
backup_dir="/opt/backups/backup-files"
bucket="db-backups"
mirror="<%= @customer %>-<%= @environment %>-mirror"
if [[ ${mirror} =~ common-(test|prod)-mirror ]]; then
@ -18,16 +21,15 @@ if [[ ${mirror} =~ common-(test|prod)-mirror ]]; then
bucket="${bucket}-${suffix}"
backup_dir="${backup_dir}-${suffix}"
fi
mkdir -p ${backup_dir}
echo "Backing up database for ${backup}"
ssh ${backup} "sudo /home/script/bin/backup_db.sh"
echo "Cleaning up old backups for ${backup}"
ssh ${backup} "sudo /home/script/bin/purge_backups.sh /opt/mariadb_backup/backups/"
echo "Copying backups here"
mkdir -p ${backup_dir}
scp script@${backup}:/opt/mariadb_backup/backups/$(date +%Y/%m/%d)/*.gz ${backup_dir}
echo "Copying backups to remote bucket"
rclone mkdir ${mirror}:${bucket}
duplicity --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption ${backup_dir} rclone://${mirror}:/${bucket}
duplicity --allow-source-mismatch --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption ${backup_dir} rclone://${mirror}:/${bucket}
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${bucket}
echo "cleaning up"
rm -r ${backup_dir}
rm -rf "${backup_dir}"

View file

@ -0,0 +1,22 @@
#!/bin/bash
# Backup all databases
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
# a specific host, but will differ between hosts
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
number_of_full_to_keep=7
backup="multinode-db1.$(hostname -d)"
remote_backup_dir="/etc/mariadb/backups"
backup_dir="/opt/backups/multinode"
bucket="db-backups-multinode"
mirror="common-<%= @environment %>-mirror"
echo "Backing up all databases for for multinode customer"
ssh "${backup}" "sudo /home/script/bin/backup_multinode_db.sh"
echo "Copying backups here"
mkdir -p ${backup_dir}
scp "script@${backup}:${remote_backup_dir}/mariadb-dump*.sql.gz" "${backup_dir}"
echo "Copying backups to remote bucket"
rclone mkdir "${mirror}:${bucket}"
duplicity --allow-source-mismatch --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption "${backup_dir}" "rclone://${mirror}:/${bucket}"
duplicity remove-all-but-n-full "${number_of_full_to_keep}" --tempdir /mnt --archive-dir /mnt --force "rclone://${mirror}:/${bucket}"
echo "cleaning up"
rm -r "${backup_dir}"

View file

@ -25,7 +25,7 @@ if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
else
number_of_full_to_keep=1
fi
container="mariadb${customer}_db_1"
container="mariadb-${customer}_db_1"
backup_dir="/opt/backups"
bucket="db-backups"
mirror="${customer}-<%= @environment %>-mirror"

View file

@ -2,9 +2,7 @@
project="${1}"
bucket="${2}"
number_of_full_to_keep='<%= @full_backup_retention %>'
max_num_inc=$((32 * number_of_full_to_keep))
max_num_full=$((2 * number_of_full_to_keep))
declare -a sixmonths=('multinode')
output_status="OK"
exit_status=0
problems=""
@ -12,6 +10,19 @@ num_problems=0
data_dir='/opt/backups/data'
for project in $(ls ${data_dir}); do
for bucket in $(ls ${data_dir}/${project}/ | sed 's/\.dat$//'); do
issixmonths="false"
for customer in "${sixmonths[@]}"; do
if [[ "${bucket}" =~ ${customer} ]]; then
issixmonths="true"
fi
done
number_of_full_to_keep='<%= @full_backup_retention %>'
if [[ "${issixmonths}" == "true" ]]; then
number_of_full_to_keep=6
fi
max_num_inc=$((50 * number_of_full_to_keep))
max_num_full=$((2 * number_of_full_to_keep))
tabular_data=$(cat "${data_dir}/${project}/${bucket}.dat")
# We warn if there are too many old backups
num_full=$(echo "${tabular_data}" | grep -c full)

View file

@ -3,7 +3,7 @@
project="${1}"
bucket="${2}"
data_dir='/opt/backups/data'
for project in $(rclone listremotes | grep -v 'mirror'); do
for project in $(rclone listremotes | grep -v 'mirror' | grep -v 'statistics'); do
for bucket in $(rclone lsd "${project}" | awk '{print $NF}' | grep -E '\-mirror|db-backups'); do
mkdir -p "${data_dir}/${project}"
duplicity collection-status --log-file /dev/stdout --no-encryption "rclone://${project}${bucket}" | grep -E '^ inc|^ full' > "${data_dir}/${project}/${bucket}.dat"

View file

@ -0,0 +1,60 @@
#!/bin/bash
customer="<%= @customer %>"
environment="<%= @environment %>"
eppn_suffix="<%= @eppn_suffix %>"
include_userbuckets="<%= @include_userbuckets %>"
container="nextcloud_app_1"
yq="/usr/local/bin/yq"
if ! [[ -x ${yq} ]]; then
pip install yq
fi
declare -a directories
if [[ -n ${1} ]]; then
directories=("${@}")
else
directories+=("Arbetsmaterial")
directories+=("Bevarande")
directories+=("Gallringsbart")
fi
olddir="${PWD}"
tempdir=$(mktemp -d)
dirty=0
primary=''
declare -a users=( 'admin' )
cd "${tempdir}" || echo "Could not cd to tempdir"
declare -a projects=( $("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml) )
if [[ "${include_userbuckets}" == "true" ]]; then
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
projects+=( "${primary}" )
fi
for project in "${projects[@]}"; do
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do
count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
if [[ ${count} -gt 0 ]]; then
echo "Skipping ${project}:${bucket} because it has stuff in it already"
continue
fi
for directory in "${directories[@]}"; do
dirty=1
if [[ -n ${primary} ]] && [[ ${project} == "${primary}" ]] ; then
user=$(echo "${bucket}" | awk -F '-' '{print $1}')
users+=( "${user}@${eppn_suffix}" )
fi
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
temp="README.md"
echo "**${directory}**" >"${temp}"
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
done
done
done
cd "${olddir}" || echo "could not cd to home dir"
rmdir "${tempdir}"
if [[ ${dirty} -gt 0 ]]; then
for user in "${users[@]}"; do
ssh -t "node3.$(hostname -d)" -l script -i .ssh/id_script "sudo /usr/local/bin/occ ${container} files:scan ${user}"
done
fi

View file

@ -1,44 +0,0 @@
#!/bin/bash
customer="<%= @customer %>"
environment="<%= @environment %>"
container="nextcloud_app_1"
yq="/usr/local/bin/yq"
if ! [[ -x ${yq} ]]; then
pip install yq
fi
declare -a directories
if [[ -n ${1} ]]; then
directories=("${@}")
else
directories+=("Arbetsmaterial")
directories+=("Bevarande")
directories+=("Gallringsbart")
fi
olddir="${PWD}"
tempdir=$(mktemp -d)
dirty=0
cd "${tempdir}" || echo "Could not cd to tempdir"
for project in $(${yq} -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml); do
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}'); do
count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
if [[ ${count} -gt 0 ]]; then
echo "Skipping ${project}:${bucket} because it has stuff in it already"
continue
fi
for directory in "${directories[@]}"; do
dirty=1
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
temp="README.md"
echo "**${directory}**" >"${temp}"
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
done
done
done
cd "${olddir}" || echo "could not cd to home dir"
rmdir "${tempdir}"
if [[ ${dirty} -gt 0 ]]; then
ssh -t "node3.$(hostname -d)" -l script -i .ssh/id_script "sudo /usr/local/bin/occ ${container} files:scan admin"
fi

View file

@ -5,7 +5,7 @@ shift
include_userbuckets="${1}"
shift
environment="<%= @environment %>"
container="nextcloud${customer}_app_1"
container="nextcloud-${customer}-app-1"
yq="/usr/local/bin/yq"
if ! [[ -x ${yq} ]]; then

View file

@ -0,0 +1,35 @@
#!/bin/bash
VALID_ARGS=$(getopt -o s: --long subject: -- "$@")
# shellcheck disable=SC2181
if [[ ${?} -ne 0 ]]; then
exit 1;
fi
usage () {
echo "${0}: -s|--subject <subject>"
exit 1
}
eval set -- "${VALID_ARGS}"
# shellcheck disable=SC2078
while [ : ]; do
case "$1" in
-s | --subject)
subject="${2}"
shift 2
;;
*)
break
;;
esac
done
if [[ -z ${subject} ]]; then
usage
fi
id=$(/root/tasks/announce.sh --get | xmlstarlet sel -t -i '//subject="'"${subject}"'"' -m "/ocs/data/element/id" -v .)
if [[ -n ${id} ]]; then
/root/tasks/announce.sh --delete --id "${id}"
fi

View file

@ -4,19 +4,19 @@ customer="${1}"
multinode="${2}"
environment="<%= @environment %>"
location="${customer}-${environment}"
userjson=$(ssh "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud${customer}_app_1")
userjson=$(ssh -o StrictHostKeyChecking=no "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud-${customer}-app-1")
project="statistics"
bucket="drive-server-coms"
base_dir="${project}:${bucket}"
stat_dir="/opt/statistics"
customer_dir="${stat_dir}/${location}"
mkdir -p "${customer_dir}"
rclone mkdir "${base_dir}/${location}"
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}"
echo "${userjson}" | jq . >"${customer_dir}/users.json"
status=${?}
if [[ ${status} -eq 0 ]]; then
# something is wrong if we cant copy the file in 30 seconds, so we should note that
if ! timeout 30s rclone copy --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
if ! timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
status=1
fi
fi

View file

@ -17,7 +17,7 @@ if [[ "${ENVIRONMENT}" == "prod" ]]; then
fi
if [[ "${CUSTOMER}" == "common" ]]; then
customer=""
types="multinode gss"
types="multinode"
fi
domain="${customer}drive.${env}sunet.se"

View file

@ -38,12 +38,16 @@ rclone="rclone --config /root/.rclone.conf"
# These are dynamic
buckets="$(${rclone} lsd "${rcp}:" | awk '{print $NF}')"
users=$(${rclone} cat "statistics:drive-server-coms/${rcp}/users.json" | jq '. | with_entries( select(.key | match("@") ) )')
users=$(${rclone} cat --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "statistics:drive-server-coms/${rcp}/users.json" | jq '. | with_entries( select(.key | match("@") ) )')
for eppn in $(echo "${users}" | jq -r keys[]); do
user=${eppn%@*}
username=${eppn%@*}
# Remove underscore from username
user=${username//_/-}
# convert user to lower case for bucket naming rules
user_lower=${user,,}
echo "$(date) - Check bucket status for ${eppn}"
bucketname="${user}-${site_name//./-}"
bucketname="${user_lower}-${site_name//./-}"
if ! echo "${buckets}" | grep "${bucketname}" &> /dev/null; then
echo "$(date) - ${eppn} has no mounts configured, adding bucket and mounts..."
${rclone} mkdir "${rcp}:${bucketname}"

View file

@ -27,7 +27,9 @@ fi
site_name="<%= @site_name %>"
rclone="rclone --config /root/.rclone.conf"
user=${eppn%@*}
username=${eppn%@*}
# Remove underscore from username
user=${username//_/-}
bucketname="${user}-${site_name//./-}"

Some files were not shown because too many files have changed in this diff Show more