Compare commits
1007 commits
stable-202
...
main
Author | SHA1 | Date | |
---|---|---|---|
1a41a66355 | |||
da7be10d58 | |||
44e398a418 | |||
d903f4f90d | |||
ad13e65250 | |||
b63229092c | |||
4b3c189481 | |||
8d57b773a0 | |||
3c815b9a38 | |||
55cc71871f | |||
46f1330b5c | |||
d774349950 | |||
249fe90c54 | |||
87b6ea8ddb | |||
dc39cb5d85 | |||
beff4242ac | |||
cc31a2f038 | |||
6a63031f84 | |||
1c3eecdd85 | |||
16c4f3650c | |||
57b90cced4 | |||
4e67d6f1b8 | |||
9bb2afa68b | |||
26ea8e8e8d | |||
5c42f9d7d2 | |||
f87fac3c3b | |||
582d88d2f1 | |||
4f84f71070 | |||
4caed83e80 | |||
38174166a0 | |||
3b42817c57 | |||
5950751c73 | |||
d010d27f20 | |||
d5c3486425 | |||
1aea2f5674 | |||
0b3a2d43cb | |||
4b952bac40 | |||
e150fa3625 | |||
3579682de8 | |||
8f090bfcac | |||
0e189ad0ef | |||
e27cf34987 | |||
17d8d8b2de | |||
3fbfe26c66 | |||
4a6d35b984 | |||
ef1eb5b4ad | |||
dc5b61c9e6 | |||
33389e842a | |||
bb7ab09db8 | |||
bc481d44d5 | |||
5619cb8270 | |||
fbe409efff | |||
9096bf7c33 | |||
1c38cfbf3c | |||
d332a810b9 | |||
74da008b5e | |||
a6eaddbd8f | |||
0da62f097d | |||
ed3d4b1b9c | |||
bb44e6da8f | |||
de3d18b0a1 | |||
aa6be0ce34 | |||
83fcc67c91 | |||
ec4e2bdb38 | |||
a68fea86b8 | |||
a1b7a03908 | |||
7363284353 | |||
9f0eccdb3d | |||
be9f1a7152 | |||
f6cd170e6f | |||
dcb0ca8e48 | |||
e0f427fcc8 | |||
7090f7ca5e | |||
705479f29c | |||
3007fc6de4 | |||
4a270b9f65 | |||
b2942339d1 | |||
e0e1d5148f | |||
919ff971b4 | |||
8b216f6730 | |||
434cd6d1e4 | |||
b3c42225e1 | |||
06d7fabf4b | |||
800b2c4afc | |||
1654186ddc | |||
2c974bd178 | |||
41e9045833 | |||
7e53313d94 | |||
ebe6f9640d | |||
74f96705f1 | |||
c7e337f3c4 | |||
2944823676 | |||
fc0a790bd1 | |||
a4a9e492f5 | |||
cbb291be47 | |||
d760626e0b | |||
ec6cda87fb | |||
00c494193a | |||
76e110ab90 | |||
3315e17346 | |||
f1c816cba1 | |||
331db3e24c | |||
a7d737c8c4 | |||
2a273c649d | |||
96632b111b | |||
f2774b9c38 | |||
0ded3c46aa | |||
74343d15ef | |||
602fb12a7f | |||
fd0ed43035 | |||
0046349756 | |||
1f30a23a5a | |||
def0a0a77b | |||
|
98158e1be5 | ||
78bc2b3f7e | |||
26ee034887 | |||
d891874202 | |||
c11e95f5fe | |||
9596f9d210 | |||
e57349f011 | |||
02578640a9 | |||
c9bdb766eb | |||
9425d56716 | |||
b2d3b80f37 | |||
d67cc24318 | |||
efd05971b1 | |||
8dd5534b0f | |||
d24fa23262 | |||
84fc07cffe | |||
5b68f26771 | |||
dc59842fe8 | |||
a91bb85313 | |||
5f0c24865f | |||
8ee75b0601 | |||
62670483c1 | |||
ace56103fc | |||
4afec74a34 | |||
d4d998cf81 | |||
9794b03d44 | |||
45a44789a4 | |||
4201f18ce6 | |||
8057a2fa22 | |||
|
09deca0172 | ||
0e154b0e53 | |||
9ee8f6ea5d | |||
7a977b79a2 | |||
|
2625269aba | ||
19015d9061 | |||
9ac6531700 | |||
28a94fca51 | |||
20709f5dba | |||
38cd097f71 | |||
8027abaa82 | |||
1868c2a0b7 | |||
0a415b5129 | |||
250396dbb4 | |||
ab2d70303f | |||
2ba22cd30b | |||
ec95b0754f | |||
61daa44afa | |||
9c1e10ff4c | |||
0154a533ce | |||
ca8f0d5b89 | |||
63b780028f | |||
7e0f744e5d | |||
1d5d61b4a7 | |||
31f4c1eb9b | |||
b6c0bb2f46 | |||
9e6e33e137 | |||
977bd84f50 | |||
c7de56f73f | |||
2abf9ecf27 | |||
501ca8edfb | |||
fff058613a | |||
369492be90 | |||
856bc3b2c5 | |||
ebdf6f3b46 | |||
53e9a65b9f | |||
0400e89f36 | |||
571535f569 | |||
09ee93515c | |||
9f721c38e8 | |||
d4fae26777 | |||
c4f95af173 | |||
e0c4ddcd6e | |||
24a5abbefe | |||
65a236dcaa | |||
6cfa2b3da7 | |||
29aecdfe2a | |||
ee6887bfeb | |||
5a04dcdd0f | |||
8927f194d9 | |||
04e4592f4c | |||
45989b8d3d | |||
2c58c02864 | |||
c0c964282b | |||
8f7cd413ff | |||
536a4a11d3 | |||
2883693967 | |||
86a5d1d307 | |||
60c776088e | |||
af323bdfd9 | |||
c2db1c8671 | |||
d9b4ff9f34 | |||
a942487f30 | |||
67b72f387a | |||
7c1456702f | |||
d62724d63d | |||
5c89469f3a | |||
9ef9bab2cd | |||
32339a3fbf | |||
601ff83b9f | |||
67b46d3f75 | |||
11ab90e981 | |||
0f746f67ce | |||
b09c79cc3d | |||
4bc3a128ac | |||
e31f4c8c61 | |||
19473d6d82 | |||
4561e417c6 | |||
64d8ad253d | |||
b100e18eea | |||
80e1034695 | |||
ab039d217b | |||
75cbaed902 | |||
39285f075f | |||
ba792bd851 | |||
1c43f32440 | |||
7f39e96bf9 | |||
3e5a76a37d | |||
a4b4f4c013 | |||
c3ce7089ad | |||
6cff186121 | |||
8e2ba68b80 | |||
d648c987c7 | |||
49c3a69282 | |||
9a58486d11 | |||
596f05f099 | |||
849bd7f846 | |||
c3fbf4c1ce | |||
62d172b2f5 | |||
2c61a00ebd | |||
a98fe62a43 | |||
8e5e7fe34f | |||
60fc3ef307 | |||
43081e0955 | |||
99f768708a | |||
30952f7448 | |||
0e83c47dd1 | |||
b769f1637a | |||
25d97470da | |||
ce745ff6b0 | |||
1aae63ba07 | |||
5913537c34 | |||
932d91364e | |||
812a56cf99 | |||
d5e88dd431 | |||
f27a9c353c | |||
bff0aff823 | |||
a8060b7774 | |||
dac082f4f5 | |||
48f8f6d690 | |||
deabba2e22 | |||
22bfeaf244 | |||
d6b4098932 | |||
42363a9aaf | |||
4dd95bdb39 | |||
714e2d98f6 | |||
64e3746628 | |||
a563e16309 | |||
124203cd92 | |||
6365549250 | |||
9a367b0955 | |||
a5580567af | |||
a69ae092d1 | |||
df6557a91b | |||
4dcdf87f32 | |||
2258108232 | |||
38825adce8 | |||
3baf31c7ed | |||
221c89564a | |||
6d3bca981d | |||
2e171b1d04 | |||
22785da3e0 | |||
67d9e825a5 | |||
b6bd77a52b | |||
38c56a07cd | |||
fa5a34aeba | |||
76700f595d | |||
bb8f2d8b67 | |||
2f47e6e109 | |||
a2ee732d5d | |||
453e72a79d | |||
662d9592fa | |||
1a223f2846 | |||
25309ea51f | |||
72d71a69c7 | |||
28b6584a6c | |||
d597ec106a | |||
d4eb4af079 | |||
4e45dd1733 | |||
2c7acd1ec6 | |||
992f74d3a1 | |||
2f682f8bd8 | |||
9ee7f6f7b8 | |||
bf3ed2e92f | |||
6d2d863076 | |||
8391219068 | |||
f36659353d | |||
2b9e199899 | |||
71bca06a08 | |||
fb58999b22 | |||
91b9955db2 | |||
1d3970c134 | |||
1086053d50 | |||
57b2ca6316 | |||
d3d0807d43 | |||
cb13190197 | |||
671b5f95df | |||
dc8ff6a712 | |||
124c2f6c5d | |||
142fc78f6b | |||
4b4d0626cf | |||
e3a729e1a8 | |||
8cae1a668a | |||
7177dfa12e | |||
76640002c0 | |||
b8a16bd700 | |||
90372ddd01 | |||
5487bb9d9c | |||
d27990988a | |||
12d07a5ab0 | |||
3eb87725af | |||
5c228af37a | |||
a507c0f43f | |||
e8e8a9ef7d | |||
6a5e2aed62 | |||
374afd5e5a | |||
eecb8a9e34 | |||
1e3d79542c | |||
37d8165cc5 | |||
73d96f7f6d | |||
36131d0b21 | |||
9a419cd214 | |||
86fea06b02 | |||
b6bfa49bd7 | |||
16b645b845 | |||
92cd7dc19f | |||
5dc6a9e574 | |||
959e0e6228 | |||
e3a511521b | |||
c28c6ac91b | |||
4fee7b1042 | |||
cb2a62349d | |||
06c4f4ec68 | |||
da0e74313d | |||
aea23bbf86 | |||
3abec90157 | |||
57958e6475 | |||
4504de3160 | |||
9de3ab2c60 | |||
e8c5a9e8ac | |||
4ec58c3b61 | |||
fdb57e1b99 | |||
9fb353de79 | |||
1cc0e86664 | |||
ce6774487a | |||
2bed7d8914 | |||
7222cf5703 | |||
a64b2533bb | |||
6865d6833f | |||
958ff7daa9 | |||
c649a7a6b4 | |||
ac9e29d39e | |||
56f8a5d149 | |||
cb35c3bbf7 | |||
b280257882 | |||
d3cb9b9e14 | |||
39384f7e40 | |||
6969510b5d | |||
170d1cfc77 | |||
7be06a1215 | |||
bff3c3620b | |||
880e0401cc | |||
04061b41c1 | |||
5f425cf2bf | |||
7f7743b538 | |||
8510d4e958 | |||
7dad764539 | |||
cebb4929a9 | |||
f95fc106c9 | |||
64e5e61879 | |||
9a37885da6 | |||
ed08626dc8 | |||
f0cb47dd97 | |||
29acc076b3 | |||
d57135d906 | |||
1d6e08b852 | |||
978d073bde | |||
8e2f1d032d | |||
66093b13b8 | |||
1438c94ca5 | |||
5c3fa64086 | |||
2fb9e754cc | |||
f03e6241bf | |||
3c7fe38458 | |||
6853993a34 | |||
1ab9fbc14e | |||
d680ed8597 | |||
96734c8736 | |||
f948bfb4ae | |||
b7a17f7520 | |||
ecc158b48e | |||
6445c0434e | |||
330f167a2c | |||
a0df922493 | |||
9648aa184c | |||
1cede4b87c | |||
438e2c24e0 | |||
46d1db97bc | |||
ad0b7abff9 | |||
88c8e159bc | |||
3818772b87 | |||
f79d7acb02 | |||
1eaa62a150 | |||
a4d0294c10 | |||
b24c8ec683 | |||
90569f54d3 | |||
2aba03fe41 | |||
52f563692d | |||
f2abb855ba | |||
4b3e9c0da1 | |||
8e982592c2 | |||
2b543373ed | |||
a089d02cc4 | |||
a6d680aaed | |||
af2385dffe | |||
6aa7238ee2 | |||
f06aa65ec5 | |||
f973b83d1b | |||
a8f2bb9bb6 | |||
751ad087e6 | |||
f886d53f3c | |||
a51cc9fdc4 | |||
a329a46491 | |||
c9e7d8eb2c | |||
ac85bddc8a | |||
49f00af783 | |||
d471c7f271 | |||
9b2490e415 | |||
58af85571b | |||
6664271b49 | |||
dc16a41e0f | |||
b1ae0c0465 | |||
862f94b4eb | |||
1c9365e121 | |||
1ff784e5e4 | |||
3304d408a5 | |||
38447ceb37 | |||
e0f6d3bafa | |||
f531a60514 | |||
8646f1eda7 | |||
f5f97e1c3a | |||
81341be6e7 | |||
51ddfa985c | |||
158f982aed | |||
a7fe6cecc7 | |||
45ac19bec8 | |||
a74899332e | |||
d63c586998 | |||
beb69036a1 | |||
74af996a76 | |||
e76c22f6d5 | |||
b065ac7ae6 | |||
019a3d9792 | |||
6224fe3e96 | |||
cc2a9e9ebf | |||
daa647beb9 | |||
1ecf6c8128 | |||
4111ac7500 | |||
8b0d90e4eb | |||
e3a66f95df | |||
b096522c36 | |||
d7e7e260c2 | |||
52869ca089 | |||
1a39e0dbec | |||
e0c06d4571 | |||
f29e074ded | |||
c761c56052 | |||
026c5c5d03 | |||
7460019ec5 | |||
5c0e05394d | |||
f2b8406826 | |||
6d78fdde7c | |||
15beb333ec | |||
8ca506f608 | |||
00d4c96d28 | |||
3e98d5f477 | |||
bcbcb6e469 | |||
4b13d54f23 | |||
60d37feb61 | |||
301d451c3a | |||
bbee5643d6 | |||
01b3c4cf56 | |||
d3e0f41ff1 | |||
8b74bd9c75 | |||
5c16a783e9 | |||
2b590b3f01 | |||
aade6fb2c7 | |||
f2c31c79cd | |||
b79eb5d4bf | |||
606b14db17 | |||
b4e496bf58 | |||
e4093f5e40 | |||
b573820906 | |||
141f9334f2 | |||
07ddb2220a | |||
722a8bdb68 | |||
d832f2a274 | |||
0bf3ebfc53 | |||
ccdb53310d | |||
07d54eba92 | |||
62bb2da068 | |||
f5297850d9 | |||
652f2fbdf7 | |||
66a81768c8 | |||
260fd526fe | |||
ac917b9811 | |||
4ec608adf2 | |||
b66e072cf7 | |||
e3697719d2 | |||
766c337cb2 | |||
24c2fcbdfc | |||
b7e94c2259 | |||
cf0a778781 | |||
15a752a65d | |||
cd1a4ba19c | |||
2c216513d3 | |||
8ba567e385 | |||
911edbed58 | |||
b4b099b237 | |||
f8dce117a8 | |||
1a63b4884f | |||
eeca6dfbe9 | |||
66bd2e9bab | |||
473f702943 | |||
d135f415d0 | |||
dbac473c26 | |||
7673aca60e | |||
fc2079e2fa | |||
f339aa2865 | |||
705d3c8732 | |||
1822ecd683 | |||
74768985cc | |||
b0ded4d0ef | |||
98a02615cc | |||
d0260d7c05 | |||
dd022213a6 | |||
e20e41edf8 | |||
663021dc61 | |||
59e137e918 | |||
d7c724570f | |||
ffc5170fc0 | |||
ea09557c1a | |||
9364391ad5 | |||
6a5000a557 | |||
e4ccdde169 | |||
589f62fc0a | |||
6827c0af7c | |||
bff8d8ab22 | |||
2152b5b4a9 | |||
75c3a77c6d | |||
eac5b172f2 | |||
05bde6adb6 | |||
d8eaa122e3 | |||
7402202b9a | |||
60cb53e631 | |||
c134f0771f | |||
8acd0c40b8 | |||
2c13b2f51d | |||
04a3c96250 | |||
3651afd778 | |||
728b152f85 | |||
c8051b065a | |||
41dd736b20 | |||
08be347419 | |||
9285c155db | |||
76fe97382d | |||
12ca587858 | |||
cdb1157172 | |||
39b571f3d1 | |||
d30dabc534 | |||
d353069660 | |||
d696c19242 | |||
eef539928e | |||
4cf8cf3adb | |||
7f7d38c87e | |||
adeaea5a37 | |||
4a45670bec | |||
3738e703b6 | |||
bac25d27c8 | |||
42831948e1 | |||
db92a5d255 | |||
6e5831367e | |||
1dea001ed9 | |||
765e0b41cb | |||
4d6e6f67af | |||
f21a489e46 | |||
b20c4c7397 | |||
16496dfded | |||
a836417db2 | |||
a76397aafd | |||
ea51f0b56b | |||
6c8a4f430b | |||
097fce5f87 | |||
5c46f5bc38 | |||
bd1519430f | |||
5f47ce7fca | |||
45321052de | |||
717350780f | |||
a03d4437e2 | |||
35362991d0 | |||
a4d0afe9fd | |||
c90a6ac9a1 | |||
3ad7d95161 | |||
450f31e2d5 | |||
840d2000a6 | |||
00a2f1f425 | |||
b3d9873176 | |||
bf5abc073e | |||
aa2a869203 | |||
5b7c702573 | |||
cbf4086993 | |||
5f74eaaab0 | |||
05814cfd24 | |||
4376aad6e2 | |||
c485935f30 | |||
6299dad4fd | |||
718bc0b496 | |||
472977fde4 | |||
eba9546cdc | |||
a16c9d68e8 | |||
2f548f2431 | |||
47bf118116 | |||
72cf4e570c | |||
51a695ed9e | |||
02454ebd39 | |||
cb16f03710 | |||
5ba11977b0 | |||
27d0bc15cc | |||
01fc5a9e86 | |||
bd074c73d5 | |||
d33f80273d | |||
4a8a4d642c | |||
df4fe9985b | |||
d13b22e3a2 | |||
c774d67745 | |||
d5c6b9a5fd | |||
8d2c481569 | |||
36f163414f | |||
b5b3885c30 | |||
77ad3ac26d | |||
e72c42924a | |||
069eb7ce93 | |||
d07ed76d36 | |||
f626156c4c | |||
b4ce0046de | |||
314149c7f3 | |||
65abea7daa | |||
181f2e52fe | |||
f1cac6ce35 | |||
49386124c3 | |||
cc6f2c8d7b | |||
3fce5c6fab | |||
4f6786363b | |||
0837f82d87 | |||
10b0f30155 | |||
922de25fd8 | |||
1a70154efc | |||
cbb6ce3aa3 | |||
7f19e0de62 | |||
49243acaa9 | |||
393f2c7034 | |||
f10864ff91 | |||
|
f511383a7a | ||
668c1389d5 | |||
585319b177 | |||
0d505d2482 | |||
db6bfcc517 | |||
8aa9c14d35 | |||
2cdacfe0f5 | |||
|
c425f90b0f | ||
|
7dc9667bec | ||
|
3b82d5b0e3 | ||
|
f581835f7c | ||
|
31b1bcb92a | ||
|
6265472396 | ||
|
a150d23f1a | ||
|
b40beeb420 | ||
|
300e877b3a | ||
8cf9f0eafa | |||
436ea4f1a5 | |||
c83e8b2982 | |||
a08f473954 | |||
2f629c289c | |||
f4eca0b747 | |||
9d5e53c0cc | |||
ec679013e3 | |||
7aa4a25dbd | |||
2ba33ad5d6 | |||
ed65bb0d94 | |||
689c625b90 | |||
df731fdb44 | |||
440bcec0d2 | |||
9e4879d9c8 | |||
3f1ea1b694 | |||
3491a1a5f7 | |||
76bc7edaed | |||
7d25ae0859 | |||
3899dab9b3 | |||
3cdf7d4084 | |||
14b6535efd | |||
0148a727ca | |||
e06a29a8a1 | |||
af873e7133 | |||
65a6b1787d | |||
4e496bcb13 | |||
5c5ed31f13 | |||
21d9a5e3d0 | |||
a5f5ddb293 | |||
eb0bb65f4a | |||
333d712548 | |||
bdd858f76c | |||
03fb0a23ac | |||
81bb47819c | |||
9e6321e743 | |||
b1ea687f2d | |||
33d49b7ec0 | |||
f988081b39 | |||
7d755324f8 | |||
b72315b1a2 | |||
6220d691a3 | |||
ec9115df06 | |||
aa1d8710e1 | |||
6211f3aaea | |||
176338f537 | |||
1e9164dcd4 | |||
e39b6cbaed | |||
11607f208c | |||
823e172278 | |||
6afbcf5beb | |||
ec6ba465f9 | |||
63631b2b51 | |||
059bd041e6 | |||
5df8dba25c | |||
1e7108297b | |||
cdf16db309 | |||
5472b767c8 | |||
e990b05401 | |||
ca27f46ffe | |||
e57de0ebd0 | |||
28a1b95114 | |||
88ee8c2b67 | |||
ade4bd42b3 | |||
4c597442e1 | |||
829f3f01c7 | |||
7198109d56 | |||
8752500f4d | |||
0a09f73f12 | |||
f6cca5bcdc | |||
37863c7a17 | |||
63a2b363a7 | |||
a21ea6fdd5 | |||
0dea8dbc91 | |||
0e5653aa0c | |||
56291a4165 | |||
3bac4ac9f3 | |||
c6a1b2cd66 | |||
39137a4dfc | |||
216c706457 | |||
c013b861e9 | |||
d5b9bb88fa | |||
f606d6c1b6 | |||
4a7af7a049 | |||
3d4025e738 | |||
b4b90799e6 | |||
a44cdc1bb6 | |||
d2ced8d1cd | |||
b9e35dcf92 | |||
b5f8abf934 | |||
6d33b53a92 | |||
631f3616c6 | |||
174c72eb70 | |||
b989bd3d54 | |||
234d19b652 | |||
22f5dfac62 | |||
1b60d41dab | |||
7207ef2bbf | |||
c7bd0fa965 | |||
d540c90afa | |||
b927716aad | |||
ac6430df51 | |||
88b3c1b4fb | |||
5bc0ddfa00 | |||
99c88c0c3b | |||
8bfa2d58a9 | |||
3b58bcd2ae | |||
364a8685a7 | |||
3bd7a44d7a | |||
f56a5c6eae | |||
feb02cc2cc | |||
58fd21e0aa | |||
1e0879769c | |||
3986e69349 | |||
d0ca012129 | |||
753512f90f | |||
598be8c2f1 | |||
1cea9c9173 | |||
6df092764f | |||
735eaf7067 | |||
54de0adb0f | |||
42b57f3655 | |||
ed9353198d | |||
1152fd972f | |||
604d485d29 | |||
c7072d6dae | |||
3281824eac | |||
df1277e04e | |||
b15bc5ad03 | |||
befc3207d9 | |||
b166a683a6 | |||
bffb06333d | |||
bc329be450 | |||
1e23d134f1 | |||
50dcefdb3d | |||
ff3a5c143c | |||
e0139b0765 | |||
fa93c01846 | |||
a9fdb43bf6 | |||
95030efba2 | |||
2119f71008 | |||
08243f555d | |||
61c56fd2f2 | |||
b7a26db915 | |||
b225e8d364 | |||
0f8f27773b | |||
f4ff3fea76 | |||
e7ffd6fac3 | |||
8f3becd914 | |||
3531be828e | |||
24b0b1eedc | |||
20471ebaa8 | |||
6c22905a04 | |||
149ae70210 | |||
402b6e1c4b | |||
8593d0bb00 | |||
3dab127050 | |||
fc4b51b4ac | |||
a14a71f5d6 | |||
52c11917ce | |||
ca2cf7e3f2 | |||
ef7c470244 | |||
5be9aedc56 | |||
422ebd7ace | |||
75f75732e7 | |||
8ca66c10ad | |||
7d10d168bd | |||
036dcf010c | |||
3b5d14a189 | |||
e840903eae | |||
db516e3b72 | |||
a9695aeee1 | |||
713f12e1d4 | |||
568cb87ad6 | |||
709d2165ac | |||
37e419cf75 | |||
54868e8518 | |||
20d1558866 | |||
f1276d52d3 | |||
9b44a99135 | |||
9ec76cfc7f | |||
8d5471d88e | |||
71fd8a3287 | |||
f95675f6ee | |||
e06b7ffb1c | |||
1045898d40 | |||
4af4a23d1b | |||
31651be3be | |||
2a59d0995c | |||
c5e5279f0e | |||
1ce82c89cc | |||
824be30b44 | |||
bbf00f7f4a | |||
1632657f46 | |||
165415b725 | |||
f2f919a59c | |||
801c5a2e85 | |||
4948ada609 | |||
e220a1b1c6 | |||
bf5e5adf10 | |||
0b267c07c5 | |||
e3dbe10187 | |||
d93cce4fc0 | |||
6dfe1496a4 | |||
8386df91de | |||
6de5312e7c | |||
af020c77aa | |||
cdfd3b9126 | |||
4be1448dab | |||
16bf805717 | |||
38807e62a5 | |||
238172397b | |||
d104fd4299 | |||
3c1ee2ba1d | |||
03808354f3 | |||
4963afd5f6 | |||
18578817f1 | |||
3fb6164501 | |||
392e83188b | |||
c82fe2db78 | |||
b2d70279b1 | |||
810c095bda | |||
09d3f72162 | |||
d94ecc9a26 | |||
284bc8ad8e | |||
09e8655a08 | |||
76a928c060 | |||
abd56622e7 | |||
4bde51f883 | |||
19a02f0bf2 | |||
e9e70c5b29 | |||
d09dc50d0f | |||
25d8d94b70 | |||
10c44fe5fc | |||
44e782d793 | |||
8443f738cc | |||
99a93ec9be | |||
08e3bc8fa1 | |||
8c355fcc4d | |||
5abf4b04f4 | |||
00256a7259 | |||
23af9a6a24 | |||
45762b076a | |||
988efa23d9 | |||
1f36efdcb4 | |||
b60f548e0a | |||
09a13f6444 | |||
5d286dd5c0 | |||
7c49d580a7 | |||
e5c55de042 | |||
fd61340a4a | |||
36d777ff8f | |||
ceb28450ee | |||
bd9e48eca8 | |||
2ce8d1c01c | |||
08e8cdcdaa | |||
7728f8a272 | |||
9dd730fcea | |||
58a2b4f041 | |||
ed764ad8fd | |||
ebf5e458f6 | |||
f810c1cba7 | |||
4d36d4b44e | |||
6c528254d7 | |||
88f7c95679 | |||
1588d5869b | |||
5a8bca9de7 | |||
b62746ac66 | |||
dec2ebd0e7 | |||
430521d96d | |||
eec40a23ba | |||
2d50ca2dac | |||
d378b5f2da | |||
746859e70f | |||
3712e6228c | |||
6a1895d48e | |||
98bf6d47b2 | |||
d4b91c5400 | |||
121f231397 | |||
d4668a5086 | |||
707d5cfe32 | |||
5ac40a8bbe | |||
b132b363cd | |||
20755e1361 | |||
e06f327f95 | |||
93b5b9352e | |||
c760a04551 | |||
fc32dfe6fd | |||
dd6e75cc32 | |||
5f1163e257 | |||
a16ff712a2 | |||
599444f537 | |||
e013d5baa7 | |||
ddc3f673d3 | |||
f94b66e3c1 | |||
3979367a7d | |||
6dae356334 | |||
edddcbc5da | |||
4c014aaa97 | |||
f371fbc34d | |||
a1ce98a14d | |||
d4c557b290 | |||
9e18ee62e8 | |||
028c358c72 | |||
28b54a74a0 | |||
2adea62865 | |||
a1c56de932 |
119 changed files with 2896 additions and 1308 deletions
33
facts.d/nc_versions.sh
Executable file
33
facts.d/nc_versions.sh
Executable file
|
@ -0,0 +1,33 @@
|
|||
#!/bin/bash
|
||||
|
||||
if ! [[ $(hostname) =~ monitor ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
repo="/var/cache/cosmos/repo"
|
||||
common="${repo}/global/overlay/etc/hiera/data/common.yaml"
|
||||
|
||||
function print_fact {
|
||||
customer=${1}
|
||||
environment=${2}
|
||||
version=${3}
|
||||
if [[ ${version} != 'null' ]]; then
|
||||
echo "nextcloud_version_${environment}_${customer}=${version}"
|
||||
else
|
||||
echo "nextcloud_version_${environment}_${customer}=$(yq -r ".${key}" "${common}")"
|
||||
fi
|
||||
}
|
||||
|
||||
for environment in test prod; do
|
||||
key="nextcloud_version_${environment}"
|
||||
for customer in $(yq -r '.multinode_mapping | keys| .[]' "${common}"); do
|
||||
group="${repo}/multinode-common/overlay/etc/hiera/data/group.yaml"
|
||||
version=$(yq -r ".${key}" "${group}")
|
||||
print_fact "${customer}" "${environment}" "${version}"
|
||||
done
|
||||
for customer in $(yq -r '.fullnodes[]' "${common}"); do
|
||||
group="${repo}/${customer}-common/overlay/etc/hiera/data/group.yaml"
|
||||
version=$(yq -r ".${key}" "${group}")
|
||||
print_fact "${customer}" "${environment}" "${version}"
|
||||
done
|
||||
done
|
6
files/scriptreciver/sysctl-d-gofasta.conf
Normal file
6
files/scriptreciver/sysctl-d-gofasta.conf
Normal file
|
@ -0,0 +1,6 @@
|
|||
net.core.rmem_max=67108864
|
||||
net.core.wmem_max=67108864
|
||||
net.ipv4.tcp_rmem=4096 87380 33554432
|
||||
net.ipv4.tcp_wmem=4096 87380 33554432
|
||||
net.core.default_qdisc=fq
|
||||
net.ipv4.tcp_congestion_control=bbr
|
|
@ -1,14 +1,8 @@
|
|||
# Lets determin who the customer is by looking at the hostname
|
||||
function sunetdrive::get_customer() >> String {
|
||||
$hostnameparts = split($facts['fqdn'],'\.')
|
||||
$hostnameparts = split($facts['networking']['fqdn'],'\.')
|
||||
if $hostnameparts[1] == 'drive' {
|
||||
if $hostnameparts[0] =~ /^gss/ {
|
||||
return 'gss'
|
||||
} elsif $hostnameparts[0] =~ /^lookup/ {
|
||||
return 'lookup'
|
||||
} else {
|
||||
return 'common'
|
||||
}
|
||||
return 'common'
|
||||
} elsif $hostnameparts[0] =~ /idp-proxy/ {
|
||||
return 'common'
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Lets determin where we are by looking at the hostname
|
||||
function sunetdrive::get_environment() >> String {
|
||||
$hostname = $facts['fqdn']
|
||||
$hostname = $facts['networking']['fqdn']
|
||||
if $hostname =~ /^.*\.drive\.sunet\.se$/ {
|
||||
if $hostname =~ /^.*\.pilot\.drive\.sunet\.se$/ {
|
||||
return 'pilot'
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Lets determin where we are by looking at the hostname
|
||||
function sunetdrive::get_node_number() >> Integer {
|
||||
Integer(regsubst($::fqdn, /^[a-zA-Z\-]+(\d).*$/, '\\1'))
|
||||
Integer(regsubst($facts['networking']['fqdn'], /^[a-zA-Z\-]+(\d).*$/, '\\1'))
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ define sunetdrive::app_type (
|
|||
$override_config = undef,
|
||||
$override_compose = undef
|
||||
) {
|
||||
include sunet::packages::netcat_openbsd
|
||||
# Config from group.yaml and customer specific conf
|
||||
$environment = sunetdrive::get_environment()
|
||||
$customer = sunetdrive::get_customer()
|
||||
|
@ -14,8 +15,9 @@ define sunetdrive::app_type (
|
|||
# The config used
|
||||
$config = $override_config
|
||||
# Other settings
|
||||
$admin_password = $config[ 'admin_password' ]
|
||||
$dbhost = $config[ 'dbhost' ]
|
||||
$dbname = $config[ 'dbname' ]
|
||||
$dbuser = $config[ 'dbuser' ]
|
||||
$instanceid = $config[ 'instanceid' ]
|
||||
$mysql_user_password = $config[ 'mysql_user_password' ]
|
||||
$passwordsalt = $config[ 'passwordsalt' ]
|
||||
|
@ -28,8 +30,10 @@ define sunetdrive::app_type (
|
|||
$config = hiera_hash($environment)
|
||||
$skeletondirectory = $config['skeletondirectory']
|
||||
# Other settings
|
||||
$admin_password = safe_hiera('admin_password')
|
||||
$dbhost = 'proxysql_proxysql_1'
|
||||
$dbname = 'nextcloud'
|
||||
$dbuser = 'nextcloud'
|
||||
$full_backup_retention = hiera('full_backup_retention')
|
||||
$instanceid = safe_hiera('instanceid')
|
||||
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||
$passwordsalt = safe_hiera('passwordsalt')
|
||||
|
@ -39,8 +43,8 @@ define sunetdrive::app_type (
|
|||
$s3_secret = safe_hiera('s3_secret')
|
||||
$secret = safe_hiera('secret')
|
||||
}
|
||||
$twofactor_enforced_groups = hiera_array('twofactor_enforced_groups')
|
||||
$twofactor_enforced_excluded_groups = hiera_array('twofactor_enforced_excluded_groups')
|
||||
$twofactor_enforced_groups = $config['twofactor_enforced_groups']
|
||||
$twofactor_enforced_excluded_groups = $config['twofactor_enforced_excluded_groups']
|
||||
$nextcloud_version = hiera("nextcloud_version_${environment}")
|
||||
$nextcloud_version_string = split($nextcloud_version, '[-]')[0]
|
||||
# Common settings for multinode and full nodes
|
||||
|
@ -51,18 +55,22 @@ define sunetdrive::app_type (
|
|||
$site_name = $config['site_name']
|
||||
$trusted_domains = $config['trusted_domains']
|
||||
$trusted_proxies = $config['trusted_proxies']
|
||||
if $location == 'kau-prod' {
|
||||
$php_memory_limit_mb = 2048
|
||||
} else {
|
||||
$php_memory_limit_mb = 512
|
||||
}
|
||||
if $::facts['dockerhost2'] == 'yes' {
|
||||
$hostnet = true
|
||||
}
|
||||
|
||||
# These are encrypted values from local.eyaml
|
||||
|
||||
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||
$smtppassword = safe_hiera('smtp_password')
|
||||
|
||||
#These are global values from common.yaml
|
||||
$gs_enabled = hiera('gs_enabled')
|
||||
$gs_federation = hiera('gs_federation')
|
||||
$gss_master_admin = hiera_array('gss_master_admin')
|
||||
$gss_master_url = hiera("gss_master_url_${environment}")
|
||||
$lookup_server = hiera("lookup_server_${environment}")
|
||||
$mail_domain = hiera("mail_domain_${environment}")
|
||||
$mail_smtphost = hiera("mail_smtphost_${environment}")
|
||||
$mail_from_address = hiera("mail_from_address_${environment}")
|
||||
|
@ -70,17 +78,63 @@ define sunetdrive::app_type (
|
|||
$smtpuser = hiera("smtp_user_${environment}")
|
||||
$tug_office = hiera_array('tug_office')
|
||||
|
||||
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
|
||||
$gss_mode = hiera('gss_mode')
|
||||
|
||||
# These are global values from common.yaml but can be overridden in group.yaml
|
||||
$drive_email_template_text_left = $config['drive_email_template_text_left']
|
||||
$drive_email_template_plain_text_left = $config['drive_email_template_plain_text_left']
|
||||
$drive_email_template_url_left = $config['drive_email_template_url_left']
|
||||
$lb_servers = hiera_hash($environment)['lb_servers']
|
||||
$document_servers = hiera_hash($environment)['document_servers']
|
||||
# set up cronjob on node3
|
||||
if $::fqdn[0,5] == 'node3' {
|
||||
|
||||
# Calculate some values
|
||||
$expiration_months = max(12, $full_backup_retention)
|
||||
$expiration_days_min = $expiration_months * 31
|
||||
$expiration_days_max = $expiration_months * 31 + 93
|
||||
|
||||
unless $is_multinode{
|
||||
user { 'www-data': ensure => present, system => true }
|
||||
|
||||
file { '/usr/local/bin/get_containers':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/get_containers'),
|
||||
mode => '0744',
|
||||
}
|
||||
if ($nodenumber == 3) {
|
||||
file { '/usr/lib/nagios/plugins/check_nextcloud_mounts.py':
|
||||
ensure => present,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/check_nextcloud_mounts.py'),
|
||||
mode => '0744',
|
||||
}
|
||||
sunet::sudoer {'nagios_run_nextcloud_mounts_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_nextcloud_mounts_check',
|
||||
command_line => '/usr/lib/nagios/plugins/check_nextcloud_mounts.py'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_nextcloud_mounts':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_nextcloud_mounts.py'
|
||||
}
|
||||
}
|
||||
if ($nodenumber == 3) {
|
||||
file { '/usr/local/bin/scan_external_mounts':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/scan_external_mounts.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
|
||||
cmd => '/usr/local/bin/scan_external_mounts',
|
||||
hour => '1',
|
||||
minute => '20',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
}
|
||||
file { '/opt/nextcloud/cron.sh':
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
|
@ -93,26 +147,24 @@ define sunetdrive::app_type (
|
|||
user => 'root',
|
||||
minute => '*/5',
|
||||
}
|
||||
if $location =~ /^extern/ {
|
||||
file { '/opt/nextcloud/user-sync.sh':
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
content => template('sunetdrive/application/user-sync.erb.sh'),
|
||||
}
|
||||
-> cron { 'gss_user_sync':
|
||||
command => '/opt/nextcloud/user-sync.sh',
|
||||
user => 'root',
|
||||
minute => '*/5',
|
||||
}
|
||||
file { '/opt/nextcloud/user-sync.sh':
|
||||
ensure => absent,
|
||||
}
|
||||
file { '/usr/local/bin/nocc':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/nocc.erb'),
|
||||
mode => '0740',
|
||||
}
|
||||
file { '/etc/sudoers.d/99-nocc':
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
}
|
||||
#Create users
|
||||
unless $is_multinode{
|
||||
user { 'www-data': ensure => present, system => true }
|
||||
package { 'aufs-tools': ensure => latest, provider => 'apt' }
|
||||
|
||||
file { '/usr/local/bin/occ':
|
||||
ensure => present,
|
||||
force => true,
|
||||
|
@ -136,12 +188,21 @@ define sunetdrive::app_type (
|
|||
content => template('sunetdrive/application/upgrade23-25.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/remount_user_bucket_as_project.sh':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/remount_user_bucket_as_project.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/opt/rotate/conf.d/nextcloud.conf':
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => "#This file is managed by puppet\n#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n",
|
||||
content => "#This file is managed by puppet
|
||||
#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n/opt/nextcloud/audit.log:180:256\n",
|
||||
mode => '0644',
|
||||
}
|
||||
file { '/opt/rotate/conf.d/redis.conf':
|
||||
|
@ -190,7 +251,14 @@ define sunetdrive::app_type (
|
|||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
mode => '0640',
|
||||
}
|
||||
file { '/opt/nextcloud/audit.log':
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
mode => '0640',
|
||||
}
|
||||
file { '/opt/nextcloud/rclone.conf':
|
||||
ensure => file,
|
||||
|
@ -287,23 +355,7 @@ define sunetdrive::app_type (
|
|||
mode => '0744',
|
||||
}
|
||||
}
|
||||
if $location =~ /^gss-test/ {
|
||||
file { '/opt/nextcloud/mappingfile.json':
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/mappingfile-test.json.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
} elsif $location =~ /^gss/ {
|
||||
file { '/opt/nextcloud/mappingfile.json':
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/mappingfile-prod.json.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
} elsif $location =~ /^kau/ {
|
||||
if $location =~ /^kau/ {
|
||||
file { '/mnt':
|
||||
ensure => directory,
|
||||
owner => 'www-data',
|
||||
|
@ -340,10 +392,17 @@ define sunetdrive::app_type (
|
|||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Nextcloud application',
|
||||
}
|
||||
sunet::misc::ufw_allow { 'https':
|
||||
from => '0.0.0.0/0',
|
||||
port => 443,
|
||||
if $::facts['sunet_nftables_enabled'] == 'yes' {
|
||||
sunet::nftables::docker_expose { 'https':
|
||||
allow_clients => ['any'],
|
||||
port => 443,
|
||||
iif => 'ens3',
|
||||
}
|
||||
} else {
|
||||
sunet::misc::ufw_allow { 'https':
|
||||
from => '0.0.0.0/0',
|
||||
port => 443,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -38,8 +38,8 @@ define sunetdrive::cloudimage(
|
|||
resolver => ['130.242.80.14', '130.242.80.99'],
|
||||
search => $search,
|
||||
#
|
||||
repo => $::cosmos_repo_origin_url,
|
||||
tagpattern => $::cosmos_tag_pattern,
|
||||
repo => $facts['cosmos_repo_origin_url'],
|
||||
tagpattern => $facts['cosmos_tag_pattern'],
|
||||
#
|
||||
cpus => $cpus,
|
||||
memory => $memory,
|
||||
|
|
53
manifests/common.pp
Normal file
53
manifests/common.pp
Normal file
|
@ -0,0 +1,53 @@
|
|||
|
||||
# Common class
|
||||
class sunetdrive::common {
|
||||
include sunet::tools
|
||||
include sunet::motd
|
||||
include apt
|
||||
include apparmor
|
||||
include sunet::packages::jq
|
||||
if $::facts['sunet_nftables_enabled'] != 'yes' {
|
||||
warning('Enabling UFW')
|
||||
include ufw
|
||||
} else {
|
||||
if $facts['networking']['hostname'] =~ /^kube[wc]/ {
|
||||
warning('Setting nftables to installed but disabled')
|
||||
ensure_resource ('class','sunet::nftables::init', { enabled => false })
|
||||
} else {
|
||||
warning('Enabling nftables')
|
||||
ensure_resource ('class','sunet::nftables::init', { })
|
||||
}
|
||||
}
|
||||
package {'sysstat': ensure => 'latest'}
|
||||
package {'needrestart': ensure => installed}
|
||||
service {'sysstat': provider => 'systemd'}
|
||||
file_line { 'enable_sa':
|
||||
ensure => 'present',
|
||||
line => 'ENABLED="true"',
|
||||
path => '/etc/default/sysstat',
|
||||
match => 'ENABLED="false"',
|
||||
require => Package['sysstat'],
|
||||
}
|
||||
file_line { 'sa_cron_comment':
|
||||
ensure => 'present',
|
||||
line => '# Activity reports every 2 minutes everyday',
|
||||
path => '/etc/cron.d/sysstat',
|
||||
match => '^#\ Activity\ reports\ every\ 10\ minutes\ everyday',
|
||||
require => Package['sysstat'],
|
||||
notify => Service['sysstat'],
|
||||
}
|
||||
file_line { 'sa_cron':
|
||||
ensure => 'present',
|
||||
line => '*/2 * * * * root command -v debian-sa1 > /dev/null && debian-sa1 1 1',
|
||||
path => '/etc/cron.d/sysstat',
|
||||
match => '^5-55/10',
|
||||
require => Package['sysstat'],
|
||||
notify => Service['sysstat'],
|
||||
}
|
||||
if $::facts['os']['distro']['id'] == 'Debian' {
|
||||
exec { 'sysstat_systemd_timer':
|
||||
command => 'systemctl enable --now sysstat-collect.timer',
|
||||
unless => 'systemctl is-enabled --quiet sysstat-collect.timer',
|
||||
}
|
||||
}
|
||||
}
|
|
@ -4,59 +4,52 @@ define sunetdrive::db_type(
|
|||
$bootstrap=undef,
|
||||
$location=undef,
|
||||
$override_config = undef,
|
||||
$override_compose = undef)
|
||||
$override_compose = undef,
|
||||
)
|
||||
{
|
||||
|
||||
# Config from group.yaml
|
||||
$environment = sunetdrive::get_environment()
|
||||
$mariadb_version = hiera("mariadb_version_${environment}")
|
||||
$is_multinode = (($override_config != undef) and ($override_compose != undef))
|
||||
if $is_multinode {
|
||||
$config = $override_config
|
||||
$mysql_root_password = $config['mysql_root_password']
|
||||
$mysql_user_password = $config['mysql_user_password']
|
||||
$backup_password = $config['backup_password']
|
||||
$mariadb_dir = $config['mariadb_dir']
|
||||
$mycnf_path = $config['mycnf_path']
|
||||
$server_id = '1000'
|
||||
} else {
|
||||
$config = hiera_hash($environment)
|
||||
$mysql_root_password = safe_hiera('mysql_root_password')
|
||||
$backup_password = safe_hiera('backup_password')
|
||||
$proxysql_password = safe_hiera('proxysql_password')
|
||||
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||
$mariadb_dir = '/etc/mariadb'
|
||||
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
|
||||
$server_id = 1000 + Integer($facts['hostname'][-1])
|
||||
ensure_resource('file',"${mariadb_dir}", { ensure => directory, recurse => true } )
|
||||
$dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
|
||||
$dirs.each |$dir| {
|
||||
ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
|
||||
}
|
||||
$config = hiera_hash($environment)
|
||||
$mysql_root_password = safe_hiera('mysql_root_password')
|
||||
$backup_password = safe_hiera('backup_password')
|
||||
$proxysql_password = safe_hiera('proxysql_password')
|
||||
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||
$roundcube_password = safe_hiera('roundcube_password')
|
||||
$mariadb_dir = '/etc/mariadb'
|
||||
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
|
||||
$server_id = 1000 + Integer($facts['networking']['hostname'][-1])
|
||||
ensure_resource('file',$mariadb_dir, { ensure => directory } )
|
||||
$dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
|
||||
$dirs.each |$dir| {
|
||||
ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
|
||||
}
|
||||
|
||||
$nextcloud_ip = $config['app']
|
||||
|
||||
unless $is_multinode {
|
||||
$db_ip = $config['db']
|
||||
$db_ipv6 = $config['db_v6']
|
||||
$backup_ip = $config['backup']
|
||||
$backup_ipv6 = $config['backup_v6']
|
||||
$ports = [3306, 4444, 4567, 4568]
|
||||
|
||||
sunet::misc::ufw_allow { 'mariadb_ports':
|
||||
from => $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6,
|
||||
port => $ports,
|
||||
}
|
||||
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
|
||||
}
|
||||
|
||||
|
||||
if $location =~ /^lookup/ {
|
||||
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '05-lookup.sql']
|
||||
$db_ip = $config['db']
|
||||
$db_ipv6 = $config['db_v6']
|
||||
$backup_ip = $config['backup']
|
||||
$backup_ipv6 = $config['backup_v6']
|
||||
$ports = [3306, 4444, 4567, 4568]
|
||||
if $location =~ /^multinode/ {
|
||||
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['kube'] + $config['kube_v6']
|
||||
} elsif $location == 'sunet-prod' {
|
||||
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6'] + $config['webmail'] + $config['webmail_v6']
|
||||
} elsif $location == 'sunet-test' {
|
||||
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6 + $config['imap'] + $config['imap_v6'] + $config['smtp'] + $config['smtp_v6'] + $config['webmail'] + $config['webmail_v6'] + $config['calendar'] + $config['calendar_v6']
|
||||
} else {
|
||||
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql']
|
||||
$from = $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6
|
||||
}
|
||||
|
||||
sunet::misc::ufw_allow { 'mariadb_ports':
|
||||
from => $from,
|
||||
port => $ports,
|
||||
}
|
||||
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
|
||||
|
||||
|
||||
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql', '05-roundcube.sql']
|
||||
$sql_files.each |$sql_file|{
|
||||
file { "${mariadb_dir}/init/${sql_file}":
|
||||
ensure => present,
|
||||
|
@ -74,6 +67,11 @@ define sunetdrive::db_type(
|
|||
content => template($mycnf_path),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/purge-binlogs':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/purge-binlogs.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { "${mariadb_dir}/scripts/run_manual_backup_dump.sh":
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/run_manual_backup_dump.erb.sh'),
|
||||
|
@ -84,39 +82,47 @@ define sunetdrive::db_type(
|
|||
content => template('sunetdrive/mariadb/rename-docker.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
if $is_multinode {
|
||||
$docker_compose = $override_compose
|
||||
} else {
|
||||
file { '/usr/local/bin/size-test':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/size-test.erb'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/status-test':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/status-test.erb'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { "/etc/sudoers.d/99-size-test":
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/size-test\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { "/etc/sudoers.d/99-status-test":
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
$docker_compose = sunet::docker_compose { 'drive_mariadb_docker_compose':
|
||||
content => template('sunetdrive/mariadb/docker-compose_mariadb.yml.erb'),
|
||||
service_name => 'mariadb',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Mariadb server',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'purge_binlogs':
|
||||
cmd => '/usr/local/bin/purge-binlogs',
|
||||
hour => '6',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
file { '/usr/local/bin/mysql':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/mysql.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/size-test':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/size-test.erb'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/status-test':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb/status-test.erb'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/etc/sudoers.d/99-size-test':
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/size-test\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/etc/sudoers.d/99-status-test':
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
$docker_compose = sunet::docker_compose { 'drive_mariadb_docker_compose':
|
||||
content => template('sunetdrive/mariadb/docker-compose_mariadb.yml.erb'),
|
||||
service_name => 'mariadb',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Mariadb server',
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
include apt
|
||||
# Wrapper for sunet::dockerhost to do thiss specific things
|
||||
class sunetdrive::dockerhost(
|
||||
String $version = safe_hiera('docker_version'),
|
||||
|
@ -8,6 +7,8 @@ class sunetdrive::dockerhost(
|
|||
String $docker_args = '',
|
||||
Optional[String] $docker_dns = undef,
|
||||
String $storage_driver = 'aufs',
|
||||
Boolean $write_daemon_config = false,
|
||||
Boolean $enable_ipv6 = false,
|
||||
) {
|
||||
if $version == 'NOT_SET_IN_HIERA' {
|
||||
fail('Docker version not set in Hiera')
|
||||
|
@ -26,6 +27,8 @@ class sunetdrive::dockerhost(
|
|||
storage_driver => $storage_driver,
|
||||
docker_network => true, # let docker choose a network for the 'docker' bridge
|
||||
compose_version => $compose_version,
|
||||
write_daemon_config => $write_daemon_config,
|
||||
enable_ipv6 => $enable_ipv6,
|
||||
}
|
||||
file { '/etc/apt/preferences.d/containerd.io.pref':
|
||||
ensure => absent,
|
||||
|
|
|
@ -4,10 +4,8 @@ class sunetdrive::infra_script (
|
|||
$location = undef
|
||||
) {
|
||||
$environment = sunetdrive::get_environment()
|
||||
$customer = "common"
|
||||
$customer = 'common'
|
||||
$config = hiera_hash($environment)
|
||||
$gss_backup_server = $config['gss_backup_server']
|
||||
$lookup_backup_server = $config['lookup_backup_server']
|
||||
$ssh_config = "Host *.sunet.se
|
||||
User script
|
||||
IdentityFile /root/.ssh/id_script"
|
||||
|
@ -59,14 +57,16 @@ class sunetdrive::infra_script (
|
|||
mode => '0700',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backupgssdb':
|
||||
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
|
||||
ensure => 'absent',
|
||||
cmd => 'true',
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backuplookupdb':
|
||||
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
|
||||
ensure => 'absent',
|
||||
cmd => 'true',
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
|
|
38
manifests/jupyter_site.pp
Normal file
38
manifests/jupyter_site.pp
Normal file
|
@ -0,0 +1,38 @@
|
|||
#Class for SUNET-Drive-portal-Server
|
||||
class sunetdrive::jupyter_site (
|
||||
String $site_version = '0.0.1-1'
|
||||
) {
|
||||
|
||||
$domain = 'jupyter.sunet.dev'
|
||||
sunet::docker_compose { 'portal_compose':
|
||||
content => template('sunetdrive/jupyter_site/docker-compose.erb.yaml'),
|
||||
service_name => 'jupyter_site',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Web server',
|
||||
}
|
||||
|
||||
exec { 'workaround_docker_compose_dir':
|
||||
command => 'mkdir -p /opt/jupyter_site/nginx',
|
||||
unless => 'test -d /opt/jupyter_site/nginx',
|
||||
}
|
||||
|
||||
$nginx_dirs = ['acme', 'certs', 'conf', 'dhparam', 'html', 'vhost']
|
||||
$nginx_dirs.each | $dir| {
|
||||
file { "/opt/jupyter_site/nginx/${dir}":
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0751',
|
||||
}
|
||||
}
|
||||
sunet::misc::ufw_allow { 'https':
|
||||
from => '0.0.0.0/0',
|
||||
port => 443,
|
||||
}
|
||||
# For acme and redirect
|
||||
sunet::misc::ufw_allow { 'http':
|
||||
from => '0.0.0.0/0',
|
||||
port => 80,
|
||||
}
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
#Class for SUNET-Drive-Lookup-Server
|
||||
class sunetdrive::lookup (
|
||||
$bootstrap = undef,
|
||||
$location = undef
|
||||
) {
|
||||
|
||||
$environment = sunetdrive::get_environment()
|
||||
|
||||
|
||||
# Firewall settings
|
||||
$nextcloud_ip = hiera_array("${location}_app", [])
|
||||
$tug_office = hiera_array('tug_office')
|
||||
|
||||
$dbhost = 'proxysql_proxysql_1'
|
||||
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||
$lookup_version = hiera("lookup_version_${environment}")
|
||||
|
||||
#Create users
|
||||
user { 'www-data': ensure => present, system => true }
|
||||
|
||||
file { '/opt/lookup/config.php':
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/lookup/config.php.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
sunet::docker_compose { 'drive_lookup_docker_compose':
|
||||
content => template('sunetdrive/lookup/docker-compose_lookup.yml.erb'),
|
||||
service_name => 'lookup',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Lookup server',
|
||||
}
|
||||
|
||||
sunet::misc::ufw_allow { 'https':
|
||||
from => '0.0.0.0/0',
|
||||
port => 443,
|
||||
}
|
||||
}
|
|
@ -4,10 +4,10 @@ class sunetdrive::mariadb (
|
|||
$location = undef,
|
||||
$tag_mariadb = undef,
|
||||
$override_config = undef,
|
||||
$override_compose = undef
|
||||
$override_compose = undef,
|
||||
) {
|
||||
|
||||
$quorum_id = $::fqdn
|
||||
$quorum_id = $facts['networking']['fqdn']
|
||||
$quorum_password = safe_hiera('quorum_password')
|
||||
$db = sunetdrive::db_type { 'base_db':
|
||||
bootstrap => $bootstrap,
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
# This is a asyncronous replica of the Maria DB Cluster for SUNET Drive
|
||||
class sunetdrive::mariadb_backup($tag_mariadb=undef, $location=undef) {
|
||||
include sunet::packages::netcat_openbsd
|
||||
$dirs = [ 'datadir', 'init', 'conf', 'backups' ]
|
||||
$dirs.each | $dir | {
|
||||
ensure_resource('file',"/opt/mariadb_backup/${dir}", { ensure => directory, recurse => true } )
|
||||
}
|
||||
# Config from group.yaml
|
||||
$environment = sunetdrive::get_environment()
|
||||
$mariadb_version = hiera("mariadb_version_${environment}")
|
||||
$config = hiera_hash($environment)
|
||||
$first_db = $config['first_db']
|
||||
|
||||
|
|
|
@ -4,11 +4,16 @@ class sunetdrive::multinode (
|
|||
$location = undef
|
||||
)
|
||||
{
|
||||
$myname = $facts['hostname']
|
||||
include sunet::packages::yq
|
||||
$myname = $facts['networking']['hostname']
|
||||
$is_multinode = true;
|
||||
$environment = sunetdrive::get_environment()
|
||||
$lb_servers = hiera_hash($environment)['lb_servers']
|
||||
$document_servers = hiera_hash($environment)['document_servers']
|
||||
$nextcloud_ip = hiera_hash($environment)['app']
|
||||
$db_ip = hiera_hash($environment)['db']
|
||||
$admin_password = hiera('admin_password')
|
||||
$cluster_admin_password = hiera('cluster_admin_password')
|
||||
|
||||
$twofactor_enforced_groups = []
|
||||
$twofactor_enforced_excluded_groups = []
|
||||
|
@ -22,11 +27,70 @@ class sunetdrive::multinode (
|
|||
nil
|
||||
}
|
||||
}
|
||||
$php_memory_limit_mb = 512
|
||||
$nodenumber = $::fqdn[9,1]
|
||||
$customers = $tempcustomers - nil
|
||||
$passwords = $allnames.map | $index, $customer | {
|
||||
hiera("${customer}_mysql_user_password")
|
||||
}
|
||||
$transaction_persistent = 1
|
||||
$monitor_password = hiera('proxysql_password')
|
||||
user { 'www-data': ensure => present, system => true }
|
||||
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
|
||||
ensure_resource('file', '/opt/nextcloud' , { ensure => directory, recurse => true } )
|
||||
file { '/usr/local/bin/get_containers':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/get_containers'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/lib/nagios/plugins/check_nextcloud_mounts.py':
|
||||
ensure => present,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/check_nextcloud_mounts.py'),
|
||||
mode => '0744',
|
||||
}
|
||||
sunet::sudoer {'nagios_run_nextcloud_mounts_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_nextcloud_mounts_check',
|
||||
command_line => '/usr/lib/nagios/plugins/check_nextcloud_mounts.py'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_nextcloud_mounts':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_nextcloud_mounts.py'
|
||||
}
|
||||
file { '/usr/local/bin/scan_external_mounts':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/scan_external_mounts.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'scriptherder_scan_external_mounts':
|
||||
cmd => '/usr/local/bin/scan_external_mounts',
|
||||
hour => '1',
|
||||
minute => '20',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
file { '/usr/local/bin/nocc':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/application/nocc.erb'),
|
||||
mode => '0740',
|
||||
}
|
||||
file { '/etc/sudoers.d/99-nocc':
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/nocc\n",
|
||||
mode => '0440',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/usr/local/bin/occ':
|
||||
ensure => present,
|
||||
force => true,
|
||||
|
@ -43,19 +107,30 @@ class sunetdrive::multinode (
|
|||
group => 'root',
|
||||
}
|
||||
file { '/usr/local/bin/upgrade23-25.sh':
|
||||
ensure => absent,
|
||||
}
|
||||
file { '/usr/local/bin/get_paying_customers':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/multinode/upgrade23-25.erb.sh'),
|
||||
content => template('sunetdrive/multinode/get_paying_customers.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/get_containers':
|
||||
file { '/usr/local/bin/get_non_paying_customers':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/multinode/get_containers'),
|
||||
content => template('sunetdrive/multinode/get_non_paying_customers.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/restart_and_prune':
|
||||
ensure => present,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/multinode/restart_and_prune.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
file { '/usr/local/bin/add_admin_user':
|
||||
|
@ -74,12 +149,29 @@ class sunetdrive::multinode (
|
|||
content => template('sunetdrive/multinode/prune.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
cron { 'multinode_prune':
|
||||
command => '/opt/nextcloud/prune.sh',
|
||||
require => File['/opt/nextcloud/prune.sh'],
|
||||
user => 'root',
|
||||
minute => '25',
|
||||
hour => '3',
|
||||
file { '/opt/proxysql/proxysql.cnf':
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/multinode/proxysql.cnf.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'prune_non_paying':
|
||||
cmd => '/usr/local/bin/restart_and_prune',
|
||||
weekday => '1-6',
|
||||
hour => '2',
|
||||
minute => '45',
|
||||
ok_criteria => ['exit_status=0','max_age=3d'],
|
||||
warn_criteria => ['exit_status=1','max_age=5d'],
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'prune_all_paying':
|
||||
cmd => '/usr/local/bin/restart_and_prune include_paying',
|
||||
weekday => '0',
|
||||
hour => '2',
|
||||
minute => '45',
|
||||
ok_criteria => ['exit_status=0','max_age=7d'],
|
||||
warn_criteria => ['exit_status=1','max_age=9d'],
|
||||
}
|
||||
file { '/opt/nextcloud/apache.php.ini':
|
||||
ensure => file,
|
||||
|
@ -162,19 +254,36 @@ MACAddressPolicy=none'
|
|||
hour => '0',
|
||||
weekday => '0',
|
||||
}
|
||||
cron { 'multinode_cron':
|
||||
command => '/opt/nextcloud/cron.sh',
|
||||
require => File['/opt/nextcloud/cron.sh'],
|
||||
user => 'root',
|
||||
minute => '*/10',
|
||||
}
|
||||
# if $nodenumber == '2' {
|
||||
# cron { 'add_back_bucket_for_karin_nordgren':
|
||||
# command => '(/usr/local/bin/occ nextcloud-kmh-app-1 files_external:list karin_nordgren@kmh.se && /home/script/bin/create_bucket.sh nextcloud-kmh-app-1 karin_nordgren@kmh.se karin-nordgren-drive-sunet-se) || /bin/true',
|
||||
# user => 'root',
|
||||
# minute => '*/10',
|
||||
# }
|
||||
# }
|
||||
$customers.each | $index, $customer | {
|
||||
$customer_config_full = hiera_hash($customer)
|
||||
$customer_config = $customer_config_full[$environment]
|
||||
cron { "multinode_cron_${customer}":
|
||||
command => "/opt/nextcloud/cron.sh nextcloud-${customer}-app-1",
|
||||
require => File['/opt/nextcloud/cron.sh'],
|
||||
user => 'root',
|
||||
minute => '*/10',
|
||||
}
|
||||
if $environment == 'prod' {
|
||||
$s3_bucket = "primary-${customer}-drive.sunet.se"
|
||||
if 'primary_bucket' in $customer_config.keys() {
|
||||
$s3_bucket = $customer_config['primary_bucket']
|
||||
} else {
|
||||
$s3_bucket = "primary-${customer}-drive.sunet.se"
|
||||
}
|
||||
$site_name = "${customer}.drive.sunet.se"
|
||||
$trusted_proxies = ['lb1.drive.sunet.se','lb2.drive.sunet.se', 'lb3.drive.sunet.se', 'lb4.drive.sunet.se']
|
||||
} else {
|
||||
$s3_bucket = "primary-${customer}-${environment}.sunet.se"
|
||||
if 'primary_bucket' in $customer_config.keys() {
|
||||
$s3_bucket = $customer_config['primary_bucket']
|
||||
} else {
|
||||
$s3_bucket = "primary-${customer}-${environment}.sunet.se"
|
||||
}
|
||||
$site_name = "${customer}.drive.${environment}.sunet.se"
|
||||
$trusted_proxies = ["lb1.drive.${environment}.sunet.se","lb2.drive.${environment}.sunet.se",
|
||||
"lb3.drive.${environment}.sunet.se","lb4.drive.${environment}.sunet.se"]
|
||||
|
@ -183,33 +292,54 @@ MACAddressPolicy=none'
|
|||
$apache_error_path = "/opt/multinode/${customer}/404.html"
|
||||
$config_php_path = "/opt/multinode/${customer}/config.php"
|
||||
$cron_log_path ="/opt/multinode/${customer}/cron.log"
|
||||
$customer_config_full = hiera_hash($customer)
|
||||
$customer_config = $customer_config_full[$environment]
|
||||
$dbhost = "mariadb${customer}_db_1"
|
||||
|
||||
|
||||
$dbhost = 'proxysql_proxysql_1'
|
||||
$dbname = "nextcloud_${customer}"
|
||||
$dbuser = "nextcloud_${customer}"
|
||||
|
||||
$gs_enabled = hiera('gs_enabled')
|
||||
$gs_federation = hiera('gs_federation')
|
||||
$gss_master_admin = hiera_array('gss_master_admin')
|
||||
$gss_master_url = hiera("gss_master_url_${environment}")
|
||||
$https_port = hiera_hash('multinode_mapping')[$customer]['port']
|
||||
$lookup_server = hiera("lookup_server_${environment}")
|
||||
$mail_domain = hiera("mail_domain_${environment}")
|
||||
$mail_from_address = hiera("mail_from_address_${environment}")
|
||||
$mail_smtphost = hiera("mail_smtphost_${environment}")
|
||||
$nextcloud_log_path ="/opt/multinode/${customer}/nextcloud.log"
|
||||
$nextcloud_version = hiera("nextcloud_version_${environment}")
|
||||
$audit_log_path ="/opt/multinode/${customer}/audit.log"
|
||||
if $customer_config['nextcloud_version'] {
|
||||
$nextcloud_version = $customer_config['nextcloud_version']
|
||||
} else {
|
||||
$nextcloud_version = hiera("nextcloud_version_${environment}")
|
||||
}
|
||||
$nextcloud_version_string = split($nextcloud_version, '[-]')[0]
|
||||
$rclone_conf_path = "/opt/multinode/${customer}/rclone.conf"
|
||||
$redis_conf_dir = "/opt/multinode/${customer}/server"
|
||||
$redis_conf_path = "${redis_conf_dir}/redis.conf"
|
||||
$redis_host= "redis${customer}_redis-server_1"
|
||||
if $::facts['sunet_nftables_enabled'] == 'yes' {
|
||||
$redis_host= "redis-${customer}-redis-server-1"
|
||||
} else {
|
||||
$redis_host= "redis-${customer}_redis-server_1"
|
||||
}
|
||||
|
||||
$s3_host = $customer_config['s3_host']
|
||||
$s3_usepath = hiera('s3_usepath')
|
||||
$smtpuser = hiera("smtp_user_${environment}")
|
||||
$trusted_domains = [$site_name, $facts['fqdn'], 'localhost']
|
||||
$trusted_domains = [$site_name, $facts['networking']['fqdn'], 'localhost']
|
||||
$tug_office = hiera_array('tug_office')
|
||||
if $customer_config['twofactor_enforced_groups'] {
|
||||
$twofactor_enforced_groups = $customer_config['twofactor_enforced_groups']
|
||||
}
|
||||
if $customer_config['twofactor_enforced_excluded_groups'] {
|
||||
$twofactor_enforced_excluded_groups = $customer_config['twofactor_enforced_excluded_groups']
|
||||
}
|
||||
if $customer_config['full_backup_retention'] {
|
||||
$full_backup_retention = $customer_config['full_backup_retention']
|
||||
} else {
|
||||
$full_backup_retention = hiera('full_backup_retention')
|
||||
}
|
||||
# Calculate some values
|
||||
$expiration_months = max(12, $full_backup_retention)
|
||||
$expiration_days_min = $expiration_months * 31
|
||||
$expiration_days_max = $expiration_months * 31 + 93
|
||||
|
||||
# Secrets from local.eyaml
|
||||
$admin_password = safe_hiera("${customer}_admin_password")
|
||||
|
@ -222,13 +352,14 @@ MACAddressPolicy=none'
|
|||
$secret = safe_hiera("${customer}_secret")
|
||||
$passwordsalt= safe_hiera("${customer}_passwordsalt")
|
||||
$redis_host_password = safe_hiera("${customer}_redis_host_password")
|
||||
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||
$smtppassword = safe_hiera('smtp_password')
|
||||
|
||||
$extra_config = {
|
||||
admin_password => $admin_password,
|
||||
backup_password => $backup_password,
|
||||
dbhost => $dbhost,
|
||||
dbname => $dbname,
|
||||
dbuser => $dbuser,
|
||||
drive_email_template_plain_text_left => hiera($environment)['drive_email_template_plain_text_left'],
|
||||
drive_email_template_text_left => hiera($environment)['drive_email_template_text_left'],
|
||||
drive_email_template_url_left => hiera($environment)['drive_email_template_url_left'],
|
||||
|
@ -241,18 +372,6 @@ MACAddressPolicy=none'
|
|||
}
|
||||
$config = deep_merge($customer_config, $extra_config)
|
||||
ensure_resource('file', "/opt/multinode/${customer}" , { ensure => directory, recurse => true } )
|
||||
$dirs = ['datadir', 'init', 'conf', 'scripts' ]
|
||||
$dirs.each |$dir| {
|
||||
ensure_resource('file',"${config['mariadb_dir']}/${dir}", { ensure => directory, recurse => true } )
|
||||
}
|
||||
|
||||
ensure_resource('file',"${config['mariadb_dir']}/backups", {
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'script',
|
||||
mode => '0750',
|
||||
recurse => true
|
||||
} )
|
||||
# Use the other sunetdrive classes with overridden config
|
||||
$db_ip = ['127.0.0.1']
|
||||
$app_compose = sunet::docker_compose { "drive_${customer}_app_docker_compose":
|
||||
|
@ -275,33 +394,12 @@ MACAddressPolicy=none'
|
|||
description => "Redis cache server for ${customer}",
|
||||
require => File[$redis_conf_path],
|
||||
}
|
||||
$mariadb_compose = sunet::docker_compose { "drive_mariadb_${customer}_compose":
|
||||
content => template('sunetdrive/multinode/docker-compose_mariadb.yml.erb'),
|
||||
service_name => "mariadb-${customer}",
|
||||
compose_dir => "/opt/multinode/${customer}",
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => "Mariadb server for ${customer}",
|
||||
owner => 'root',
|
||||
group => 'script',
|
||||
mode => '0750',
|
||||
}
|
||||
|
||||
file { "/opt/multinode/${customer}/mariadb-${customer}/do_backup.sh":
|
||||
ensure => present,
|
||||
content => template('sunetdrive/mariadb_backup/do_backup.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
sunetdrive::app_type { "app_${customer}":
|
||||
location => $location,
|
||||
override_config => $config,
|
||||
override_compose => $app_compose,
|
||||
}
|
||||
|
||||
sunetdrive::db_type { "db_${customer}":
|
||||
location => $location,
|
||||
override_config => $config,
|
||||
override_compose => $mariadb_compose,
|
||||
}
|
||||
file { $redis_conf_dir:
|
||||
ensure => directory,
|
||||
recurse => true,
|
||||
|
@ -340,6 +438,13 @@ MACAddressPolicy=none'
|
|||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
file { $audit_log_path:
|
||||
ensure => file,
|
||||
force => true,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
mode => '0644',
|
||||
}
|
||||
file { $rclone_conf_path:
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
|
@ -355,10 +460,18 @@ MACAddressPolicy=none'
|
|||
content => template('sunetdrive/multinode/complete_reinstall.erb.sh'),
|
||||
mode => '0744',
|
||||
}
|
||||
# Open ports
|
||||
sunet::misc::ufw_allow { "https_port_${customer}":
|
||||
from => '0.0.0.0',
|
||||
port => $https_port,
|
||||
if $::facts['sunet_nftables_enabled'] == 'yes' {
|
||||
$name = "https_port_${customer}"
|
||||
ensure_resource('sunet::nftables::ufw_allow_compat', $name, {
|
||||
from => ['0.0.0.0/0', '::/0'],
|
||||
port => $https_port,
|
||||
})
|
||||
} else {
|
||||
# Open ports
|
||||
sunet::misc::ufw_allow { "https_port_${customer}":
|
||||
from => '0.0.0.0',
|
||||
port => $https_port,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
80
manifests/multinode_db.pp
Normal file
80
manifests/multinode_db.pp
Normal file
|
@ -0,0 +1,80 @@
|
|||
class sunetdrive::multinode_db(){
|
||||
$is_multinode = true;
|
||||
$environment = sunetdrive::get_environment()
|
||||
$allcustomers = hiera_hash('multinode_mapping')
|
||||
$customers = $allcustomers.keys
|
||||
|
||||
$customers.each |$customer| {
|
||||
file { "/etc/mariadb/backups/${customer}":
|
||||
ensure => directory,
|
||||
}
|
||||
file { "/etc/mariadb/init/04-nextcloud.${customer}.sql":
|
||||
ensure => present,
|
||||
content => "CREATE SCHEMA nextcloud_${customer};\nCREATE USER 'nextcloud_${customer}'@'%' IDENTIFIED BY '${hiera("${customer}_mysql_user_password")}';\nGRANT ALL PRIVILEGES ON nextcloud_${customer}.* TO 'nextcloud_${customer}'@'%' IDENTIFIED BY '${hiera("${customer}_mysql_user_password")}';\n",
|
||||
mode => '0744',
|
||||
}
|
||||
}
|
||||
if $facts["networking"]["fqdn"] =~ /^multinode-db1\.drive\.(test\.){1}sunet\.se$/ {
|
||||
$statistics_secret = safe_hiera('statistics_secret')
|
||||
notify { 'hostmessage':
|
||||
message => 'We are on multinode-db1. Set up statistics environment.',
|
||||
}
|
||||
$custdata=$customers.reduce({}) |$memo, $value| {
|
||||
$memo + {$value => lookup($value)}
|
||||
}
|
||||
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
|
||||
$local_path = '/tmp/rclone-current-linux-amd64.deb'
|
||||
exec { 'rclone_deb':
|
||||
command => "/usr/bin/wget -q ${rclone_url} -O ${local_path}",
|
||||
creates => $local_path,
|
||||
}
|
||||
package { 'rclone':
|
||||
ensure => installed,
|
||||
provider => dpkg,
|
||||
source => $local_path,
|
||||
require => Exec['rclone_deb'],
|
||||
}
|
||||
file { '/root/.rclone.conf':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb_backup/rclone.conf.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
}
|
||||
file { '/root/tasks/':
|
||||
ensure => directory,
|
||||
}
|
||||
-> file { '/root/tasks/listusersbydep.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb/listusersdep.sh.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/tasks/genusersondepartmentlists.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb/genuserdeplists.sh.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file {'/opt/mariadb/statistics/':
|
||||
ensure => directory,
|
||||
}
|
||||
-> file {'/opt/mariadb/statistics/custdata.json':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/mariadb/custconfig.json.erb'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0600',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'genuserdeplists':
|
||||
cmd => '/root/tasks/genusersondepartmentlists.sh',
|
||||
hour => '2',
|
||||
minute => '5',
|
||||
ok_criteria => ['exit_status=0','max_age=30h'],
|
||||
warn_criteria => ['exit_status=1', 'max_age=60h'],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
79
manifests/nrpe.pp
Normal file
79
manifests/nrpe.pp
Normal file
|
@ -0,0 +1,79 @@
|
|||
|
||||
# NRPE class
|
||||
class sunetdrive::nrpe(
|
||||
$loadw = '15,10,5',
|
||||
$loadc = '30,25,20',
|
||||
$procsw = 150,
|
||||
$procsc = 200,
|
||||
) {
|
||||
|
||||
require apt
|
||||
class { 'sunet::nagios':
|
||||
command_timeout => 600,
|
||||
loadw => $loadw,
|
||||
loadc => $loadc,
|
||||
procsw => $procsw,
|
||||
procsc => $procsc,
|
||||
}
|
||||
package {'nagios-plugins-contrib': ensure => latest}
|
||||
if ($facts['os']['name'] == 'Ubuntu' and versioncmp($facts['os']['release']['full'], '22.04') >= 0 ){
|
||||
$mem_w = '90'
|
||||
$mem_c = '95'
|
||||
} else {
|
||||
$mem_w = '10'
|
||||
$mem_c = '5'
|
||||
}
|
||||
$checks = ['nrpe_check_memory']
|
||||
$checks.each |$check| {
|
||||
ensure_resource("sunet::nagios::${check}", "nagios-nrpe-${check}")
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_entropy':
|
||||
command_line => '/usr/lib/nagios/plugins/check_entropy -w 256'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_ntp_time':
|
||||
command_line => '/usr/lib/nagios/plugins/check_ntp_time -H ntp.se'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_scriptherder':
|
||||
command_line => '/usr/local/bin/scriptherder --mode check'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_needrestart_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_needrestart_check',
|
||||
command_line => '/usr/sbin/needrestart -p -l'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_galera_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_galera_check',
|
||||
command_line => '/usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_galera_cluster':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_galera_cluster -w 2 -c 1 -0'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_proxysql_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_proxysql_check',
|
||||
command_line => '/usr/lib/nagios/plugins/check_proxysql_server, /usr/lib/nagios/plugins/check_mysql_server_status'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_proxysql_server':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_proxysql_server'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_replication_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_replication_check',
|
||||
command_line => '/usr/local/bin/check_replication'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_async_replication':
|
||||
command_line => '/usr/bin/sudo /usr/local/bin/check_replication'
|
||||
}
|
||||
sunet::sudoer {'nagios_run_backup_command':
|
||||
user_name => 'nagios',
|
||||
collection => 'nrpe_backup_check',
|
||||
command_line => '/usr/local/bin/check_backups'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_backups':
|
||||
command_line => '/usr/bin/sudo /usr/local/bin/check_backups'
|
||||
}
|
||||
sunet::nagios::nrpe_command {'check_mysql_server_status':
|
||||
command_line => '/usr/bin/sudo /usr/lib/nagios/plugins/check_mysql_server_status'
|
||||
}
|
||||
}
|
|
@ -3,6 +3,8 @@ class sunetdrive::onlyoffice () {
|
|||
$environment = sunetdrive::get_environment()
|
||||
$extra_hosts = hiera_hash($environment)['extra_hosts']
|
||||
$docker_tag = hiera_hash($environment)['collabora_tag']
|
||||
$token_name = 'Sunet'
|
||||
$access_token = safe_hiera('collabora_access_token')
|
||||
$customers = hiera('fullnodes')
|
||||
$multinode_customers = keys(hiera_hash('multinode_mapping'))
|
||||
if $environment == 'prod' {
|
||||
|
@ -10,14 +12,18 @@ class sunetdrive::onlyoffice () {
|
|||
} else {
|
||||
$domain = 'drive.test.sunet.se'
|
||||
}
|
||||
exec { 'collabora_docker_login':
|
||||
command => "docker login registry.gitlab.collabora.com -u ${token_name} -p ${access_token}",
|
||||
}
|
||||
sunet::collabora::docs { 'sunet-onlyoffice':
|
||||
dns => [ '89.32.32.32' ],
|
||||
extra_hosts => $extra_hosts,
|
||||
dns => [ '89.32.32.32' ],
|
||||
extra_hosts => $extra_hosts,
|
||||
extra_volumes => ['/opt/collabora/coolwsd.xml:/etc/coolwsd/coolwsd.xml'],
|
||||
docker_tag => $docker_tag,
|
||||
docker_tag => $docker_tag,
|
||||
docker_image => 'registry.gitlab.collabora.com/productivity/collabora-online-nc',
|
||||
}
|
||||
file {'/opt/collabora/coolwsd.xml':
|
||||
ensure => present,
|
||||
ensure => present,
|
||||
content => template('sunetdrive/document/coolwsd.xml.erb'),
|
||||
}
|
||||
}
|
||||
|
|
48
manifests/portal.pp
Normal file
48
manifests/portal.pp
Normal file
|
@ -0,0 +1,48 @@
|
|||
#Class for SUNET-Drive-portal-Server
|
||||
class sunetdrive::portal (
|
||||
String $portal_version = '0.0.1-1'
|
||||
) {
|
||||
|
||||
$environment = sunetdrive::get_environment()
|
||||
if $environment == 'prod' {
|
||||
$domain = 'drive.sunet.se'
|
||||
} else {
|
||||
$domain = 'drive.test.sunet.se'
|
||||
}
|
||||
sunet::docker_compose { 'portal_compose':
|
||||
content => template('sunetdrive/portal/docker-compose.erb.yaml'),
|
||||
service_name => 'portal',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Portal server',
|
||||
}
|
||||
|
||||
exec { 'workaround_docker_compose_dir':
|
||||
command => 'mkdir -p /opt/portal/nginx',
|
||||
unless => 'test -d /opt/portal/nginx',
|
||||
}
|
||||
|
||||
$nginx_dirs = ['acme', 'certs', 'conf', 'dhparam', 'html', 'vhost']
|
||||
$nginx_dirs.each | $dir| {
|
||||
file { "/opt/portal/nginx/${dir}":
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0751',
|
||||
}
|
||||
}
|
||||
file { '/opt/portal/config.yaml':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/portal/config.erb.yaml'),
|
||||
mode => '0644',
|
||||
}
|
||||
sunet::misc::ufw_allow { 'https':
|
||||
from => '0.0.0.0/0',
|
||||
port => 443,
|
||||
}
|
||||
# For acme and redirect
|
||||
sunet::misc::ufw_allow { 'http':
|
||||
from => '0.0.0.0/0',
|
||||
port => 80,
|
||||
}
|
||||
}
|
|
@ -3,6 +3,8 @@ class sunetdrive::proxysql (
|
|||
$bootstrap = undef,
|
||||
$location = undef,
|
||||
$proxysql_container_name = 'proxysql_proxysql_1',
|
||||
$manage_config = true,
|
||||
$manage_network = true,
|
||||
) {
|
||||
|
||||
# Config from group.yaml
|
||||
|
@ -10,6 +12,8 @@ class sunetdrive::proxysql (
|
|||
$config = hiera_hash($environment)
|
||||
$db_ip = $config['db']
|
||||
$nextcloud_ip = $config['app']
|
||||
$nextcloud_ipv6 = $config['app_v6']
|
||||
$nextcloud_ip_all = $nextcloud_ip + $nextcloud_ipv6
|
||||
$proxysql_ok_num = length($nextcloud_ip)
|
||||
$proxysql_warn_num = $proxysql_ok_num - 1
|
||||
|
||||
|
@ -25,6 +29,9 @@ class sunetdrive::proxysql (
|
|||
$mysql_user = safe_hiera('mysql_user')
|
||||
|
||||
$transaction_persistent = 1
|
||||
if $::facts['dockerhost2'] == 'yes' and $manage_network {
|
||||
$hostnet = true
|
||||
}
|
||||
|
||||
file { '/usr/local/bin/proxysql':
|
||||
ensure => file,
|
||||
|
@ -54,10 +61,12 @@ class sunetdrive::proxysql (
|
|||
require => Package['nagios-nrpe-server'],
|
||||
content => template('sunetdrive/proxysql/check_mysql_server_status.erb'),
|
||||
}
|
||||
file { '/opt/proxysql/proxysql.cnf':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/proxysql/proxysql.cnf.erb'),
|
||||
mode => '0644',
|
||||
if $manage_config {
|
||||
file { '/opt/proxysql/proxysql.cnf':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/proxysql/proxysql.cnf.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
}
|
||||
|
||||
file { '/opt/proxysql/my.cnf':
|
||||
|
@ -65,9 +74,22 @@ class sunetdrive::proxysql (
|
|||
content => template('sunetdrive/proxysql/my.cnf.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
sunet::misc::ufw_allow { 'stats_ports':
|
||||
from => $tug_office,
|
||||
port => 6080,
|
||||
if $::facts['sunet_nftables_enabled'] == 'yes' {
|
||||
sunet::nftables::docker_expose { 'stats_ports':
|
||||
allow_clients => $tug_office,
|
||||
port => 6080,
|
||||
iif => 'ens3',
|
||||
}
|
||||
sunet::nftables::docker_expose { 'proxysql':
|
||||
allow_clients => $nextcloud_ip_all,
|
||||
port => 6032,
|
||||
iif => 'ens3',
|
||||
}
|
||||
} else {
|
||||
sunet::misc::ufw_allow { 'stats_ports':
|
||||
from => $tug_office,
|
||||
port => 6080,
|
||||
}
|
||||
}
|
||||
|
||||
sunet::docker_compose { 'drive_proxysql_docker_compose':
|
||||
|
|
|
@ -17,10 +17,10 @@ class sunetdrive::redis_cluster (
|
|||
content => template('sunetdrive/redis_cluster/reset_cluster.erb.sh'),
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/.bashrc':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/redis_cluster/bashrc.erb'),
|
||||
mode => '0644',
|
||||
exec { 'set_permissions_directory':
|
||||
command => 'chown -R 999:root /opt/redis/node-*'
|
||||
}
|
||||
exec { 'set_permissions_files':
|
||||
command => 'chown -R 999:999 /opt/redis/node-*/*'
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,12 +1,16 @@
|
|||
#Class for SUNET-Drive-Lookup-Server
|
||||
class sunetdrive::reva (
|
||||
String $domain = '',
|
||||
String $reva_domain = ''
|
||||
String $domain = 'drive.test.sunet.se',
|
||||
String $customer = 'sunet',
|
||||
String $reva_domain = "${customer}-reva.${domain}",
|
||||
String $reva_version = 'v1.26.0',
|
||||
) {
|
||||
|
||||
$environment = sunetdrive::get_environment()
|
||||
$shared_secret = safe_hiera('shared_secret')
|
||||
$statistics_secret = safe_hiera('statistics_secret')
|
||||
$iopsecret = safe_hiera('iopsecret')
|
||||
$smtp_credentials = safe_hiera('smtp_credentials')
|
||||
|
||||
# Firewall settings
|
||||
#Create users
|
||||
|
@ -19,18 +23,17 @@ class sunetdrive::reva (
|
|||
content => template('sunetdrive/reva/revad.toml.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
file { '/opt/reva/rclone.conf':
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/reva/rclone.conf.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
file { '/opt/reva/data':
|
||||
ensure => directory,
|
||||
owner => 'www-data',
|
||||
}
|
||||
file { '/opt/reva/ocm-providers.json':
|
||||
ensure => present,
|
||||
owner => 'www-data',
|
||||
group => 'root',
|
||||
content => template('sunetdrive/reva/ocm-providers.json.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
sunet::docker_compose { 'drive_reva_docker_compose':
|
||||
content => template('sunetdrive/reva/docker-compose.yml.erb'),
|
||||
service_name => 'reva',
|
||||
|
@ -38,9 +41,12 @@ class sunetdrive::reva (
|
|||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Sciencemesh reva server',
|
||||
}
|
||||
|
||||
sunet::misc::ufw_allow { 'https_reva':
|
||||
from => '0.0.0.0/0',
|
||||
port => 443,
|
||||
$ports = [443,19000]
|
||||
$ports.each | $port|{
|
||||
sunet::misc::ufw_allow { "reva_${port}":
|
||||
from => '0.0.0.0/0',
|
||||
port => $port,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -29,16 +29,23 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
|
|||
}
|
||||
}
|
||||
}
|
||||
sunet::docker_run {'satosa':
|
||||
image => $image,
|
||||
imagetag => $tag,
|
||||
volumes => ['/etc/satosa:/etc/satosa','/etc/dehydrated:/etc/dehydrated'],
|
||||
ports => ['443:8000'],
|
||||
env => ['METADATA_DIR=/etc/satosa/metadata', 'WORKER_TIMEOUT=120']
|
||||
$dehydrated_status = $dehydrated_name ? {
|
||||
undef => 'absent',
|
||||
default => 'present'
|
||||
}
|
||||
file {'/opt/docker_run':
|
||||
ensure => 'absent',
|
||||
}
|
||||
sunet::docker_compose { 'satosa':
|
||||
content => template('sunetdrive/satosa/docker-compose.yml.erb'),
|
||||
service_name => 'satosa',
|
||||
compose_dir => '/opt/',
|
||||
compose_filename => 'docker-compose.yml',
|
||||
description => 'Satosa',
|
||||
}
|
||||
file {'/etc/satosa/proxy_conf.yaml':
|
||||
content => inline_template("<%= @merged_conf.to_yaml %>\n"),
|
||||
notify => Sunet::Docker_run['satosa']
|
||||
notify => Sunet::Docker_compose['satosa']
|
||||
}
|
||||
$plugins = hiera('satosa_config')
|
||||
sort(keys($plugins)).each |$n| {
|
||||
|
@ -46,26 +53,16 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
|
|||
$fn = $plugins[$n]
|
||||
file { $fn:
|
||||
content => inline_template("<%= @conf.to_yaml %>\n"),
|
||||
notify => Sunet::Docker_run['satosa']
|
||||
notify => Sunet::Docker_compose['satosa']
|
||||
}
|
||||
}
|
||||
ufw::allow { 'satosa-allow-https':
|
||||
ip => 'any',
|
||||
sunet::misc::ufw_allow { 'satosa-allow-https':
|
||||
from => 'any',
|
||||
port => '443'
|
||||
}
|
||||
$dehydrated_status = $dehydrated_name ? {
|
||||
undef => 'absent',
|
||||
default => 'present'
|
||||
}
|
||||
sunet::docker_run {'alwayshttps':
|
||||
sunet::misc::ufw_allow { 'satosa-allow-http':
|
||||
ensure => $dehydrated_status,
|
||||
image => 'docker.sunet.se/always-https',
|
||||
ports => ['80:80'],
|
||||
env => ['ACME_URL=http://acme-c.sunet.se']
|
||||
}
|
||||
ufw::allow { 'satosa-allow-http':
|
||||
ensure => $dehydrated_status,
|
||||
ip => 'any',
|
||||
from => 'any',
|
||||
port => '80'
|
||||
}
|
||||
if ($dehydrated_name) {
|
||||
|
@ -77,12 +74,6 @@ class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',
|
|||
cert_file => '/etc/satosa/https.crt'
|
||||
}
|
||||
}
|
||||
file { '/opt/satosa':
|
||||
ensure => directory,
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0755',
|
||||
}
|
||||
-> file { '/opt/satosa/restart.sh':
|
||||
ensure => file,
|
||||
owner => 'root',
|
||||
|
|
|
@ -3,6 +3,8 @@ class sunetdrive::script (
|
|||
$bootstrap = undef,
|
||||
$location = undef
|
||||
) {
|
||||
include sunet::packages::python3_pip
|
||||
include sunet::packages::kopia
|
||||
$environment = sunetdrive::get_environment()
|
||||
$customer = sunetdrive::get_customer()
|
||||
$apikey_test = safe_hiera('monitor_apikey_test')
|
||||
|
@ -17,7 +19,21 @@ class sunetdrive::script (
|
|||
$backup_server = $config['backup_server']
|
||||
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
|
||||
$local_path = '/tmp/rclone-current-linux-amd64.deb'
|
||||
$singlenodes = hiera('singlenodes')
|
||||
$singlenodes = lookup('singlenodes')
|
||||
$multinodes = keys(lookup('multinode_mapping'))
|
||||
$extra_backup_jobs = pick($config['extra_backup_jobs'], {})
|
||||
|
||||
if $customer == 'mdu' {
|
||||
$eppn_suffix = 'mdh.se'
|
||||
$include_userbuckets = 'true'
|
||||
} elsif $customer == 'uu' {
|
||||
$eppn_suffix = 'users.uu.se'
|
||||
$include_userbuckets = 'false'
|
||||
}
|
||||
else {
|
||||
$eppn_suffix = "${customer}.se"
|
||||
$include_userbuckets = 'false'
|
||||
}
|
||||
|
||||
$ssh_config = "Host *.sunet.se
|
||||
User script
|
||||
|
@ -41,6 +57,17 @@ class sunetdrive::script (
|
|||
}
|
||||
$site_name = $config['site_name']
|
||||
$user_bucket_name = $config['user_bucket_name']
|
||||
if $config['user_scans'] {
|
||||
$config['user_scans'].each |$job| {
|
||||
sunet::scriptherder::cronjob { $job['name']:
|
||||
cmd => "ssh -t -l script ${job['server']} /usr/bin/sudo /usr/local/bin/occ ${job['container']} files:scan ${job['user']}",
|
||||
hour => $job['hour'],
|
||||
minute => $job['minute'],
|
||||
ok_criteria => ['exit_status=0','max_age=1d'],
|
||||
warn_criteria => ['exit_status=1','max_age=2d'],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# It is a start that will get us user buckets and primary buckets
|
||||
$backup_projects = $location
|
||||
|
@ -54,11 +81,11 @@ class sunetdrive::script (
|
|||
source => $local_path,
|
||||
require => Exec['rclone_deb'],
|
||||
}
|
||||
package { 'python3.9':
|
||||
package { 'fuse3':
|
||||
ensure => installed,
|
||||
provider => apt,
|
||||
}
|
||||
-> package { 'python3-pip':
|
||||
package { 'python3':
|
||||
ensure => installed,
|
||||
provider => apt,
|
||||
}
|
||||
|
@ -66,11 +93,29 @@ class sunetdrive::script (
|
|||
ensure => installed,
|
||||
provider => apt,
|
||||
}
|
||||
package { 'xmlstarlet':
|
||||
ensure => installed,
|
||||
provider => apt,
|
||||
}
|
||||
$drive_version = '0.3.1'
|
||||
if $facts['os']['distro']['id'] == 'Debian' {
|
||||
$pip_cmd = 'pip3 install --break-system-packages'
|
||||
} else {
|
||||
$pip_cmd = 'python3 -m pip install'
|
||||
|
||||
}
|
||||
exec { 'drive-utils':
|
||||
command => "python3.9 -m pip install https://pypi.sunet.se/packages/drive-utils-${drive_version}.tar.gz",
|
||||
unless => "python3.9 -m pip list | grep drive-utils | grep ${drive_version}",
|
||||
require => Package['python3.9'],
|
||||
command => "${pip_cmd} https://pypi.sunet.se/packages/drive-utils-${drive_version}.tar.gz",
|
||||
unless => "python3 -m pip list | grep drive-utils | grep ${drive_version}",
|
||||
require => Package['python3'],
|
||||
}
|
||||
file { '/opt/backups':
|
||||
ensure => directory,
|
||||
mode => '0700'
|
||||
}
|
||||
file { '/opt/backups/scripts':
|
||||
ensure => directory,
|
||||
mode => '0700'
|
||||
}
|
||||
file { '/root/.ssh/':
|
||||
ensure => directory,
|
||||
|
@ -139,13 +184,6 @@ class sunetdrive::script (
|
|||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/tasks/switch_redis_master_to.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/switch_redis_master_to.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/tasks/restart-nextcloud-farm':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/restart-nextcloud-farm.erb'),
|
||||
|
@ -216,6 +254,12 @@ class sunetdrive::script (
|
|||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file_line { 'FIXME_remove_when_s3_migration_done_in_sto3':
|
||||
ensure => 'present',
|
||||
line => '37.156.195.53 s3.sto3.safedc.net',
|
||||
path => '/etc/hosts',
|
||||
match => '^37.156.195.53',
|
||||
}
|
||||
if $environment == 'test' {
|
||||
sunet::scriptherder::cronjob { 'reboot-customer':
|
||||
cmd => '/root/tasks/reboot-customer.sh',
|
||||
|
@ -225,8 +269,13 @@ class sunetdrive::script (
|
|||
warn_criteria => ['exit_status=1','max_age=31d'],
|
||||
}
|
||||
}
|
||||
# Opt out of userbuckets
|
||||
unless $customer in ['extern', 'gih', 'suni', 'common'] {
|
||||
# Opt out of userbuckets, also customers that ended the contract
|
||||
if $customer in ['extern', 'gih', 'hkr', 'suni', 'common', 'su', 'lnu'] {
|
||||
sunet::scriptherder::cronjob { 'makebuckets':
|
||||
ensure => absent,
|
||||
cmd => 'bin/true',
|
||||
}
|
||||
} else {
|
||||
sunet::scriptherder::cronjob { 'makebuckets':
|
||||
cmd => '/root/tasks/makebuckets.sh',
|
||||
minute => '*/5',
|
||||
|
@ -260,31 +309,69 @@ class sunetdrive::script (
|
|||
}
|
||||
# Opt in to folder structure in projectbuckets
|
||||
if $customer in ['gih', 'mdu'] {
|
||||
sunet::scriptherder::cronjob { 'create_folders_in_project_buckets':
|
||||
ensure => absent,
|
||||
cmd => 'true',
|
||||
}
|
||||
file { '/root/tasks/create_folders_in_project_buckets.sh':
|
||||
ensure => absent,
|
||||
}
|
||||
file { '/root/tasks/create_folders_in_fullnode_buckets.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/create_folders_in_project_buckets.erb.sh'),
|
||||
content => template('sunetdrive/script/create_folders_in_fullnode_buckets.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
}
|
||||
if $customer in ['gih'] {
|
||||
sunet::scriptherder::cronjob { 'create_folders_in_project_buckets':
|
||||
cmd => '/root/tasks/create_folders_in_project_buckets.sh',
|
||||
sunet::scriptherder::cronjob { 'create_folders_in_fullnode_buckets':
|
||||
cmd => '/root/tasks/create_folders_in_fullnode_buckets.sh',
|
||||
minute => '*/30',
|
||||
ok_criteria => ['exit_status=0','max_age=1h'],
|
||||
warn_criteria => ['exit_status=1','max_age=2h'],
|
||||
}
|
||||
}
|
||||
if $customer in ['mdu'] {
|
||||
sunet::scriptherder::cronjob { 'create_folders_in_project_buckets':
|
||||
cmd => '/root/tasks/create_folders_in_project_buckets.sh "Arbetsmaterial (work material)" "Bevarande (retention)" "Gallringsbart (disposal)"',
|
||||
sunet::scriptherder::cronjob { 'create_folders_in_fullnode_buckets':
|
||||
cmd => '/root/tasks/create_folders_in_fullnode_buckets.sh "Arbetsmaterial (work material)" "Bevarande (retention)" "Gallringsbart (disposal)"',
|
||||
minute => '*/30',
|
||||
ok_criteria => ['exit_status=0','max_age=1h'],
|
||||
warn_criteria => ['exit_status=1','max_age=2h'],
|
||||
}
|
||||
}
|
||||
if $customer == 'common' {
|
||||
$multinode_passwords = $multinodes.map | $index, $customer | {
|
||||
safe_hiera("${customer}_admin_app_password")
|
||||
}
|
||||
file { '/root/tasks/announce.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/multinodeannounce.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/tasks/backupmultinodedb.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/backupmultinodedb.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/opt/backups/scripts/hb.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/backup-hb.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backupmultinodedb':
|
||||
cmd => '/root/tasks/backupmultinodedb.sh',
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
if $environment == 'prod' {
|
||||
file { '/root/tasks/aggregate.sh':
|
||||
ensure => file,
|
||||
|
@ -300,33 +387,22 @@ class sunetdrive::script (
|
|||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
|
||||
}
|
||||
file { '/root/tasks/backupsinglenodedb.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/backupsinglenodedb.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
$singlenodes.each | $singlenode| {
|
||||
$multinode = hiera_hash('multinode_mapping')[$singlenode]['server']
|
||||
$multinodeserver = "${multinode}.${site_name}"
|
||||
$nccontainer = "nextcloud${singlenode}_app_1"
|
||||
$nccontainer = "nextcloud-${singlenode}-app-1"
|
||||
|
||||
sunet::scriptherder::cronjob { "backup${singlenode}db":
|
||||
ensure => absent,
|
||||
cmd => 'true',
|
||||
}
|
||||
sunet::scriptherder::cronjob { "listusers_${singlenode}":
|
||||
cmd => "/root/tasks/listusers.sh ${singlenode} ${multinodeserver}",
|
||||
minute => '*/5',
|
||||
ok_criteria => ['exit_status=0','max_age=30m'],
|
||||
warn_criteria => ['exit_status=1', 'max_age=60m'],
|
||||
}
|
||||
sunet::scriptherder::cronjob { "backup${singlenode}db":
|
||||
cmd => "/root/tasks/backupsinglenodedb.sh ${multinodeserver} ${singlenode}",
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
if $environment == 'prod' {
|
||||
sunet::scriptherder::cronjob { "statistics${singlenode}":
|
||||
cmd => "/root/tasks/usage.sh ${singlenode} ${multinodeserver}",
|
||||
|
@ -336,7 +412,7 @@ class sunetdrive::script (
|
|||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
}
|
||||
unless $singlenode in ['mau'] {
|
||||
unless $singlenode in ['mau', 'uu'] {
|
||||
sunet::scriptherder::cronjob { "make${singlenode}buckets":
|
||||
cmd => "/root/tasks/makebuckets.sh ${multinodeserver} ${nccontainer} ${singlenode}-${environment}",
|
||||
minute => '*',
|
||||
|
@ -345,23 +421,45 @@ class sunetdrive::script (
|
|||
}
|
||||
}
|
||||
}
|
||||
$gss_backup_server = $config['gss_backup_server']
|
||||
$lookup_backup_server = $config['lookup_backup_server']
|
||||
sunet::scriptherder::cronjob { 'backupgssdb':
|
||||
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
|
||||
ensure => 'absent',
|
||||
cmd => 'true',
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backuplookupdb':
|
||||
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
|
||||
ensure => 'absent',
|
||||
cmd => 'true',
|
||||
hour => '2',
|
||||
minute => '0',
|
||||
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||
}
|
||||
} else {
|
||||
$admin_app_password = safe_hiera('admin_app_password')
|
||||
file { '/root/tasks/announce.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/announce.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/tasks/delete_announcement_with_subject.sh':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/script/delete_announcement_with_subject.erb.sh'),
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
mode => '0700',
|
||||
}
|
||||
file { '/root/tasks/backupmultinodedb.sh':
|
||||
ensure => absent,
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backupmultinodedb':
|
||||
ensure => absent,
|
||||
cmd => 'true',
|
||||
}
|
||||
sunet::scriptherder::cronjob { 'backupdb':
|
||||
cmd => "/root/tasks/backupdb.sh ${backup_server}",
|
||||
hour => '2',
|
||||
|
|
|
@ -1,10 +1,23 @@
|
|||
#Class for SUNET-Drive-Script-receiver
|
||||
class sunetdrive::scriptreceiver()
|
||||
{
|
||||
include sunet::packages::yq
|
||||
sunet::system_user {'script': username => 'script', group => 'script', managehome => true, shell => '/bin/bash' }
|
||||
|
||||
# These tasks correspond to a ${task}.erb.sh template
|
||||
$tasks = ['list_users', 'list_files_for_user', 'create_bucket', 'backup_db', 'purge_backups', 'maintenancemode', 'restart_sunet_service', 'start_sentinel', 'stop_sentinel']
|
||||
$tasks = [
|
||||
'list_users',
|
||||
'list_files_for_user',
|
||||
'create_bucket',
|
||||
'backup_db',
|
||||
'purge_backups',
|
||||
'maintenancemode',
|
||||
'restart_sunet_service',
|
||||
'start_sentinel',
|
||||
'stop_sentinel',
|
||||
'removeswap',
|
||||
'backup_multinode_db'
|
||||
]
|
||||
|
||||
$environment = sunetdrive::get_environment()
|
||||
$config = hiera_hash($environment)
|
||||
|
@ -22,7 +35,6 @@ class sunetdrive::scriptreceiver()
|
|||
owner => 'script',
|
||||
group => 'script',
|
||||
}
|
||||
|
||||
$kano_shell = ['89.46.21.246','2001:6b0:6c::1bc']
|
||||
sunet::misc::ufw_allow { 'script_port':
|
||||
from => $script_ipv4 + $script_ipv6 + $kano_shell,
|
||||
|
@ -35,7 +47,9 @@ class sunetdrive::scriptreceiver()
|
|||
type => 'ssh-ed25519',
|
||||
key => $script_pub_key,
|
||||
}
|
||||
|
||||
file { '/etc/sysctl.d/gofasta.conf':
|
||||
ensure => 'absent',
|
||||
}
|
||||
file { '/opt/rotate':
|
||||
ensure => directory,
|
||||
mode => '0750',
|
||||
|
@ -48,14 +62,28 @@ class sunetdrive::scriptreceiver()
|
|||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/usr/local/bin/safer_reboot':
|
||||
file { '/usr/local/bin/get_drive_customers':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/scriptreceiver/safer_reboot.erb'),
|
||||
mode => '0740',
|
||||
content => template('sunetdrive/scriptreceiver/get_drive_customers.erb.sh'),
|
||||
mode => '0744',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { "/etc/sudoers.d/99-safer_reboot":
|
||||
file { '/usr/local/bin/safer_reboot':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/scriptreceiver/safer_reboot.erb'),
|
||||
mode => '0744',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/root/.bashrc':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/scriptreceiver/bashrc.erb.sh'),
|
||||
mode => '0644',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/etc/sudoers.d/99-safer_reboot':
|
||||
ensure => file,
|
||||
content => "script ALL=(root) NOPASSWD: /usr/local/bin/safer_reboot\n",
|
||||
mode => '0440',
|
||||
|
@ -90,6 +118,19 @@ class sunetdrive::scriptreceiver()
|
|||
minute => '*',
|
||||
hour => '*',
|
||||
}
|
||||
file { '/usr/local/bin/clear_scriptherder':
|
||||
ensure => file,
|
||||
content => template('sunetdrive/scriptreceiver/clear_scriptherder.erb.sh'),
|
||||
mode => '0740',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
file { '/home/script/bin/makeswap.sh':
|
||||
ensure => absent,
|
||||
}
|
||||
file { '/etc/sudoers.d/99-makeswap':
|
||||
ensure => absent,
|
||||
}
|
||||
$tasks.each |String $task| {
|
||||
file { "/home/script/bin/${task}.sh":
|
||||
ensure => file,
|
||||
|
|
|
@ -7,10 +7,13 @@ class sunetdrive::sitemonitornaemon() {
|
|||
$tls_servers_with_port = hiera_array('tls_servers_with_port')
|
||||
$nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0]
|
||||
$nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0]
|
||||
$monitorhost = $::fqdn
|
||||
$monitorhost = $facts['networking']['fqdn']
|
||||
$environment = sunetdrive::get_environment()
|
||||
$influx_passwd = safe_hiera('influx_passwd')
|
||||
$slack_url = safe_hiera('slack_url')
|
||||
$extra_host_groups = {
|
||||
node3_hosts => join($facts['configured_hosts_in_cosmos']['all'].filter |$host| { $host =~ /^node3\./ }, ',')
|
||||
}
|
||||
|
||||
file { '/usr/local/bin/slack_nagios.sh':
|
||||
ensure => present,
|
||||
|
@ -42,11 +45,20 @@ class sunetdrive::sitemonitornaemon() {
|
|||
}
|
||||
file { '/etc/naemon/conf.d/sunetdrive_thruk_templates.conf':
|
||||
ensure => present,
|
||||
owner => 'naemon',
|
||||
group => 'naemon',
|
||||
content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
file { '/etc/naemon/conf.d/sunetdrive_extra_hostgroups.cfg':
|
||||
ensure => present,
|
||||
content => template('sunetdrive/monitor/sunetdrive_extra_hostgroups.cfg.erb'),
|
||||
mode => '0644',
|
||||
}
|
||||
nagioscfg::service {'check_scriptherder':
|
||||
hostgroup_name => ['sunetdrive::nrpe'],
|
||||
check_command => 'check_nrpe_1arg_to300!check_scriptherder',
|
||||
description => 'Scriptherder Status',
|
||||
contact_groups => ['naemon-admins'],
|
||||
}
|
||||
nagioscfg::service {'check_galera_cluster':
|
||||
hostgroup_name => ['galera_monitor'],
|
||||
check_command => 'check_nrpe_1arg!check_galera_cluster',
|
||||
|
@ -95,6 +107,12 @@ class sunetdrive::sitemonitornaemon() {
|
|||
description => 'Status of sarimner interface',
|
||||
contact_groups => ['alerts']
|
||||
}
|
||||
nagioscfg::service {'check_nextcloud_mounts':
|
||||
hostgroup_name => ['node3_hosts','sunetdrive::multinode'],
|
||||
check_command => 'check_nrpe_1arg!check_nextcloud_mounts',
|
||||
description => 'S3 buckets with multiple Nextcloud mounts',
|
||||
contact_groups => ['alerts']
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
10
manifests/ubuntu_2004.pp
Normal file
10
manifests/ubuntu_2004.pp
Normal file
|
@ -0,0 +1,10 @@
|
|||
# Class for Ubuntu 20.04
|
||||
class sunetdrive::ubuntu_2004() {
|
||||
if $facts['os']['name'] == 'Ubuntu' and $facts['os']['distro']['release']['full'] == '20.04' {
|
||||
# Hide deprecation warnings for Ubuntu 2004
|
||||
file_line {'env_rubyopt':
|
||||
path => '/etc/environment',
|
||||
line => 'RUBYOPT=\'-W0\'',
|
||||
}
|
||||
}
|
||||
}
|
|
@ -7,7 +7,7 @@ LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so
|
|||
|
||||
ErrorLog ${APACHE_LOG_DIR}/error.log
|
||||
CustomLog ${APACHE_LOG_DIR}/access.log combined
|
||||
<Directory /var/www/nextcloud/>
|
||||
<Directory /var/www/html/>
|
||||
Require all granted
|
||||
AllowOverride All
|
||||
Options FollowSymLinks MultiViews
|
||||
|
@ -16,6 +16,10 @@ LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so
|
|||
Dav off
|
||||
</IfModule>
|
||||
</Directory>
|
||||
<Directory /var/www/html/data>
|
||||
Order allow,deny
|
||||
deny from all
|
||||
</Directory>
|
||||
</VirtualHost>
|
||||
|
||||
|
||||
|
@ -47,5 +51,9 @@ LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so
|
|||
Dav off
|
||||
</IfModule>
|
||||
</Directory>
|
||||
<Directory /var/www/html/data>
|
||||
Order allow,deny
|
||||
deny from all
|
||||
</Directory>
|
||||
</VirtualHost>
|
||||
# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
Hello and welcome to your personal space for research data storage,
|
||||
Welcome to your personal space for research data storage!
|
||||
|
||||
It is important that research data is managed in a secure and careful manner, so that it is protected from damage and destruction. Mälardalen University offers its researchers and doctoral students a secure and stable IT service that makes it possible to store research data. Furthermore, it enables research collaboration internally and externally since researchers can easily share their data within the project or with other individual researchers at MDU in a secure way. This technical infrastructure entails a good collaboration space for researchers, a secure handling of data, the possibility for the researcher to be able to store data in such a way that data management according to the FAIR principles can be complied with at MDU.
|
||||
Researchers and PhD-students at MDU have access to a free storage space of 200 GB.
|
||||
|
||||
To meet the general need for research data storage, MDU has purchased Sunet Drive's S3 research data storage service. Each researcher will be offered a space of 200GB per person. To simplify administration, all spaces will be appropriated in advance in the same way, regardless of the research domain or total amount of research data.
|
||||
Your storage space is in the “Your storage space” folder that appears a few minutes after opening SUNET Drive for the first time.
|
||||
|
||||
200 GB will not correspond to all of MDU researchers' data storage needs; therefore, the researcher will have the possibility to purchase additional data space via a digital form. The additional purchase function is done by internal billing and the purchased space is then visible at the next login in the researcher's personal space.
|
||||
The “Your storage space” folder has an ABG structure which means that there are folders for Arbetsmaterial (work material), Bevarande (retention) and Gallringsbart (disposable).
|
||||
|
||||
If there is a need for a specific project space for research data storage and/or sharing, this is also possible. Such a space is created by an administrator after ordering project space through the same form as when purchasing additional storage space. The project space will then be visible to project members in the personal storage space at the next login.
|
||||
It is possible to create subfolders within these main folders themselves.
|
||||
|
||||
Can a MDU student be included in the research project? It is possible to assign a student a time-limited access/sharing opportunity to specific folders in either the project space or the researcher's personal space. Contact the Data Access Unit (DAU) to enable such a feature.
|
||||
All research data, except for military material and security-classified information, can be stored on SUNET Drive.
|
||||
If your data needs to undergo an export control, please refer to the check list on the internal portal.
|
||||
|
||||
If you have any questions about the storage space, please contact dau@mdu.se.
|
||||
A user guide and a FAQ regarding SUNET Drive is available on the internal portal. If you have any questions, please contact dau@mdu.se
|
||||
|
|
|
@ -25,7 +25,7 @@ log_errors_max_len = 1024
|
|||
max_execution_time = 86400
|
||||
max_file_uploads = 20
|
||||
max_input_time = 86400
|
||||
memory_limit = 512M
|
||||
memory_limit = <%= @php_memory_limit_mb %>M
|
||||
output_buffering = Off
|
||||
post_max_size = 30G
|
||||
precision = 14
|
||||
|
@ -151,7 +151,7 @@ ldap.max_links = -1
|
|||
[dba]
|
||||
; Nothing here
|
||||
[opcache]
|
||||
; Nothing here
|
||||
opcache.interned_strings_buffer=32
|
||||
[curl]
|
||||
; Nothing here
|
||||
[openssl]
|
||||
|
|
42
templates/application/check_nextcloud_mounts.py
Normal file
42
templates/application/check_nextcloud_mounts.py
Normal file
|
@ -0,0 +1,42 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from collections import Counter
|
||||
import json
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
exit = 0
|
||||
base_message = "OK: no duplicate mounts"
|
||||
long_message = ""
|
||||
|
||||
get_containers = subprocess.Popen('/usr/local/bin/get_containers', stdout=subprocess.PIPE).stdout.read()
|
||||
containers = get_containers.decode().splitlines()
|
||||
|
||||
for i, container in enumerate(containers, start=1):
|
||||
buckets = []
|
||||
list_command = f"/usr/local/bin/nocc {container} files_external:list --all --show-password --output json"
|
||||
command = shlex.split(list_command)
|
||||
mount_data_byte = subprocess.Popen(command, stdout=subprocess.PIPE).stdout.read()
|
||||
try:
|
||||
mount_data = json.loads(mount_data_byte.decode())
|
||||
except json.decoder.JSONDecodeError as err:
|
||||
if i == 1 or i != len(containers):
|
||||
base_message = "WARNING: invalid json"
|
||||
long_message += f"\ncontainer: {container} - json decode error: {err}"
|
||||
# lets do exit 0 for now
|
||||
# exit = 1
|
||||
continue
|
||||
for items in mount_data:
|
||||
buckets.append(items["configuration"]["bucket"])
|
||||
bucket_count = dict(Counter(buckets))
|
||||
for k, v in bucket_count.items():
|
||||
if v > 1:
|
||||
base_message = "WARNING: buckets with multiple mounts"
|
||||
long_message += f"\ncontainer: {container} - bucket: {k} - {v}"
|
||||
# lets do exit 0 for now
|
||||
# exit = 1
|
||||
print(base_message)
|
||||
if long_message != "":
|
||||
print(long_message.lstrip())
|
||||
sys.exit(exit)
|
|
@ -147,7 +147,9 @@ ldap.max_links = -1
|
|||
[dba]
|
||||
; Nothing here
|
||||
[opcache]
|
||||
; Nothing here
|
||||
opcache.interned_strings_buffer=16
|
||||
opcache.validate_timestamps=0
|
||||
opcache.memory_consumption=128
|
||||
[curl]
|
||||
; Nothing here
|
||||
[openssl]
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
config_php='/var/www/html/config/config.php'
|
||||
dbhost="<%= @dbhost %>"
|
||||
mysql_user_password="<%= @mysql_user_password %>"
|
||||
admin_password="<%= @admin_password %>"
|
||||
location="<%= @location %>"
|
||||
bucket="<%= @s3_bucket %>"
|
||||
|
||||
|
@ -14,6 +13,9 @@ if [[ "${user_input}" == "IKnowWhatIAmDoing" ]]; then
|
|||
echo "WARNING: This will delete everything in the database and reinstall Nextcloud."
|
||||
echo "You have 10 seconds to abort by hitting CTRL/C"
|
||||
sleep 10s
|
||||
echo "Setting temp admin password"
|
||||
apt update && apt install -y apg
|
||||
admin_password="$(apg -m 40 | head -1)"
|
||||
echo "Ok, proceeding."
|
||||
echo "Dropping database in 3 seconds"
|
||||
sleep 3s
|
||||
|
@ -48,6 +50,11 @@ EOF
|
|||
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
|
||||
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
|
||||
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
|
||||
echo "Now delete the admin user:"
|
||||
echo " occ user:delete admin"
|
||||
echo "and then create a new admin user:"
|
||||
echo " /usr/local/bin/add_admin_user <username> <email address>"
|
||||
echo ""
|
||||
echo "Please use edit-secrets to add these variables to all Nextcloud servers:"
|
||||
echo "instanceid: DEC::PKCS7[${instanceid}]!"
|
||||
echo "secret: DEC::PKCS7[${secret}]!"
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
<?php
|
||||
$CONFIG = array (
|
||||
'memcache.local' => '\\OC\\Memcache\\APCu',
|
||||
'appstoreenabled' => false,
|
||||
'apps_paths' =>
|
||||
array (
|
||||
0 =>
|
||||
|
@ -17,145 +15,41 @@ $CONFIG = array (
|
|||
'writable' => true,
|
||||
),
|
||||
),
|
||||
'memcache.distributed' => '\\OC\\Memcache\\Redis',
|
||||
'memcache.locking' => '\\OC\\Memcache\\Redis',
|
||||
<% if @location == 'sunet-test' -%>
|
||||
'redis.cluster' => [
|
||||
'seeds' => [
|
||||
'redis1.drive.test.sunet.se:6379',
|
||||
'redis2.drive.test.sunet.se:6379',
|
||||
'redis3.drive.test.sunet.se:6379',
|
||||
'redis1.drive.test.sunet.se:6380',
|
||||
'redis2.drive.test.sunet.se:6380',
|
||||
'redis3.drive.test.sunet.se:6380',
|
||||
'redis1.drive.test.sunet.se:6381',
|
||||
'redis2.drive.test.sunet.se:6381',
|
||||
'redis3.drive.test.sunet.se:6381'
|
||||
],
|
||||
'timeout' => 1.1,
|
||||
'read_timeout' => 0.0,
|
||||
'password' => '<%= @redis_cluster_password %>',
|
||||
'failover_mode' => \RedisCluster::FAILOVER_ERROR
|
||||
],
|
||||
<% elsif @environment == 'test' && ! @is_multinode && @location != 'gss-test' -%>
|
||||
'redis.cluster' => [
|
||||
'seeds' => [
|
||||
'redis1.<%= @customer %>.drive.test.sunet.se:6379',
|
||||
'redis2.<%= @customer %>.drive.test.sunet.se:6379',
|
||||
'redis3.<%= @customer %>.drive.test.sunet.se:6379',
|
||||
'redis1.<%= @customer %>.drive.test.sunet.se:6380',
|
||||
'redis2.<%= @customer %>.drive.test.sunet.se:6380',
|
||||
'redis3.<%= @customer %>.drive.test.sunet.se:6380',
|
||||
'redis1.<%= @customer %>.drive.test.sunet.se:6381',
|
||||
'redis2.<%= @customer %>.drive.test.sunet.se:6381',
|
||||
'redis3.<%= @customer %>.drive.test.sunet.se:6381'
|
||||
],
|
||||
'timeout' => 1.1,
|
||||
'read_timeout' => 0.0,
|
||||
'password' => '<%= @redis_cluster_password %>',
|
||||
'failover_mode' => \RedisCluster::FAILOVER_ERROR
|
||||
],
|
||||
<% elsif @environment == 'prod' && ! @is_multinode && @location != 'gss-prod' -%>
|
||||
'redis.cluster' => [
|
||||
'seeds' => [
|
||||
'redis1.<%= @customer %>.drive.sunet.se:6379',
|
||||
'redis2.<%= @customer %>.drive.sunet.se:6379',
|
||||
'redis3.<%= @customer %>.drive.sunet.se:6379',
|
||||
'redis1.<%= @customer %>.drive.sunet.se:6380',
|
||||
'redis2.<%= @customer %>.drive.sunet.se:6380',
|
||||
'redis3.<%= @customer %>.drive.sunet.se:6380',
|
||||
'redis1.<%= @customer %>.drive.sunet.se:6381',
|
||||
'redis2.<%= @customer %>.drive.sunet.se:6381',
|
||||
'redis3.<%= @customer %>.drive.sunet.se:6381'
|
||||
],
|
||||
'timeout' => 1.1,
|
||||
'read_timeout' => 0.0,
|
||||
'password' => '<%= @redis_cluster_password %>',
|
||||
'failover_mode' => \RedisCluster::FAILOVER_ERROR
|
||||
],
|
||||
<% else -%>
|
||||
'redis' =>
|
||||
array (
|
||||
'host' => '<%= @redis_host %>',
|
||||
'password' => '<%= @redis_host_password %>',
|
||||
'port' => 6379,
|
||||
),
|
||||
<% end -%>
|
||||
'forcessl' => true,
|
||||
'overwriteprotocol' => 'https',
|
||||
'objectstore' =>
|
||||
array (
|
||||
'class' => '\\OC\\Files\\ObjectStore\\S3',
|
||||
'arguments' =>
|
||||
array (
|
||||
'bucket' => '<%= @s3_bucket %>',
|
||||
'key' => '<%= @s3_key %>',
|
||||
'secret' => '<%= @s3_secret %>',
|
||||
'region' => 'us-east-1',
|
||||
'hostname' => '<%= @s3_host %>',
|
||||
'port' => '',
|
||||
'objectPrefix' => 'urn:oid:',
|
||||
'autocreate' => false,
|
||||
'use_ssl' => true,
|
||||
'use_path_style' => true,
|
||||
'legacy_auth' => false,
|
||||
),
|
||||
),
|
||||
'csrf.disabled' => true,
|
||||
'passwordsalt' => '<%= @passwordsalt %>',
|
||||
'secret' => '<%= @secret %>',
|
||||
'trusted_domains' =>
|
||||
array (
|
||||
<%- index = 0 -%>
|
||||
<%- @trusted_domains.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',<% index += 1 %>
|
||||
<%- end -%>
|
||||
),
|
||||
<%- unless @trusted_proxies.empty? -%>
|
||||
'trusted_proxies' =>
|
||||
array (
|
||||
<%- index = 0 -%>
|
||||
<%- @trusted_proxies.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
),
|
||||
<%- end -%>
|
||||
'appstoreenabled' => false,
|
||||
'auth.bruteforce.protection.enabled' => false,
|
||||
'config_is_read_only' => true,
|
||||
'csrf.disabled' => true,
|
||||
'datadirectory' => '/var/www/html/data',
|
||||
'davstorage.request_timeout' => 86401,
|
||||
<%- if @hostnet -%>
|
||||
'dbhost' => '127.0.0.1',
|
||||
<%- else -%>
|
||||
'dbhost' => '<%= @dbhost %>',
|
||||
'dbname' => 'nextcloud',
|
||||
<%- end -%>
|
||||
'dbname' => '<%= @dbname %>',
|
||||
'dbpassword' => '<%= @mysql_user_password %>',
|
||||
'dbport' => '3306',
|
||||
'dbtableprefix' => 'oc_',
|
||||
'dbtype' => 'mysql',
|
||||
'dbuser' => 'nextcloud',
|
||||
'dbuser' => '<%= @dbuser %>',
|
||||
'default_phone_region' => 'SE',
|
||||
'drive_email_template_text_left' => '<%= @drive_email_template_text_left %>',
|
||||
'drive_email_template_plain_text_left' => '<%= @drive_email_template_plain_text_left %>',
|
||||
'drive_email_template_url_left' => '<%= @drive_email_template_url_left %>',
|
||||
<% if @location == 'sunet-test' -%>
|
||||
'filelocking.debug' => true,
|
||||
<% end -%>
|
||||
'files_external_allow_create_new_local' => false,
|
||||
'forcessl' => true,
|
||||
'gs.enabled' => '<%= @gs_enabled %>',
|
||||
'gs.federation' => '<%= @gs_federation %>',
|
||||
'gs.trustedHosts' => ['*.sunet.se'],
|
||||
'gss.discovery.manual.mapping.file' => '/var/www/html/mappingfile.json',
|
||||
'gss.discovery.manual.mapping.parameter' => 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6',
|
||||
'gss.discovery.manual.mapping.regex' => true,
|
||||
'gss.jwt.key' => '<%= @gss_jwt_key %>',
|
||||
'gss.master.admin' =>
|
||||
array (
|
||||
<%- index = 0 -%>
|
||||
<%- @gss_master_admin.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
),
|
||||
'gss.master.url' => '<%= @gss_master_url %>',
|
||||
'gss.mode' => '<%= @gss_mode %>',
|
||||
'gss.user.discovery.module' => '\\OCA\\GlobalSiteSelector\\UserDiscoveryModules\\ManualUserMapping',
|
||||
'htaccess.RewriteBase' => '/',
|
||||
'installed' => true,
|
||||
'instanceid' => '<%= @instanceid %>',
|
||||
'integrity.check.disabled' => true,
|
||||
'log_type' => 'file',
|
||||
'loglevel' => 0,
|
||||
'lookup_server' => '<%= @lookup_server %>',
|
||||
'loglevel' => 1,
|
||||
'mail_domain' => '<%= @mail_domain %>',
|
||||
'mail_from_address' => '<%= @mail_from_address %>',
|
||||
'mail_sendmailmode' => 'smtp',
|
||||
|
@ -168,33 +62,126 @@ $CONFIG = array (
|
|||
'mail_smtpport' => '587',
|
||||
'mail_smtpsecure' => 'tls',
|
||||
'mail_template_class' => 'OCA\DriveEmailTemplate\EMailTemplate',
|
||||
'memcache.distributed' => '\\OC\\Memcache\\Redis',
|
||||
'memcache.local' => '\\OC\\Memcache\\APCu',
|
||||
'memcache.locking' => '\\OC\\Memcache\\Redis',
|
||||
'mysql.utf8mb4' => true,
|
||||
'objectstore' =>
|
||||
array (
|
||||
'class' => '\\OC\\Files\\ObjectStore\\S3',
|
||||
'arguments' =>
|
||||
array (
|
||||
'bucket' => '<%= @s3_bucket %>',
|
||||
'key' => '<%= @s3_key %>',
|
||||
'secret' => '<%= @s3_secret %>',
|
||||
'region' => 'us-east-1',
|
||||
'hostname' => '<%= @s3_host %>',
|
||||
'port' => '',
|
||||
'useMultipartCopy' => true,
|
||||
'objectPrefix' => 'urn:oid:',
|
||||
'autocreate' => false,
|
||||
'use_ssl' => true,
|
||||
'use_path_style' => true,
|
||||
'legacy_auth' => false,
|
||||
),
|
||||
),
|
||||
'overwrite.cli.url' => 'https://<%= @site_name %>/',
|
||||
'overwritehost' => '<%= @site_name %>',
|
||||
'overwrite.cli.url' => 'https://<%= @site_name %>',
|
||||
'templatedirectory' => '',
|
||||
'overwriteprotocol' => 'https',
|
||||
'passwordsalt' => '<%= @passwordsalt %>',
|
||||
<% if @environment == 'test' && ! @is_multinode -%>
|
||||
'redis.cluster' => [
|
||||
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
|
||||
'password' => '<%= @redis_cluster_password %>',
|
||||
'read_timeout' => 0.0,
|
||||
'seeds' => [
|
||||
'redis1.<%= @customer %>.drive.test.sunet.se:6379',
|
||||
'redis2.<%= @customer %>.drive.test.sunet.se:6379',
|
||||
'redis3.<%= @customer %>.drive.test.sunet.se:6379',
|
||||
'redis1.<%= @customer %>.drive.test.sunet.se:6380',
|
||||
'redis2.<%= @customer %>.drive.test.sunet.se:6380',
|
||||
'redis3.<%= @customer %>.drive.test.sunet.se:6380',
|
||||
'redis1.<%= @customer %>.drive.test.sunet.se:6381',
|
||||
'redis2.<%= @customer %>.drive.test.sunet.se:6381',
|
||||
'redis3.<%= @customer %>.drive.test.sunet.se:6381'
|
||||
],
|
||||
'timeout' => 1.1
|
||||
],
|
||||
<% elsif @environment == 'prod' && ! @is_multinode -%>
|
||||
'redis.cluster' => [
|
||||
'failover_mode' => \RedisCluster::FAILOVER_ERROR,
|
||||
'password' => '<%= @redis_cluster_password %>',
|
||||
'read_timeout' => 0.0,
|
||||
'seeds' => [
|
||||
'redis1.<%= @customer %>.drive.sunet.se:6379',
|
||||
'redis2.<%= @customer %>.drive.sunet.se:6379',
|
||||
'redis3.<%= @customer %>.drive.sunet.se:6379',
|
||||
'redis1.<%= @customer %>.drive.sunet.se:6380',
|
||||
'redis2.<%= @customer %>.drive.sunet.se:6380',
|
||||
'redis3.<%= @customer %>.drive.sunet.se:6380',
|
||||
'redis1.<%= @customer %>.drive.sunet.se:6381',
|
||||
'redis2.<%= @customer %>.drive.sunet.se:6381',
|
||||
'redis3.<%= @customer %>.drive.sunet.se:6381'
|
||||
],
|
||||
'timeout' => 1.1
|
||||
],
|
||||
<% else -%>
|
||||
'redis' =>
|
||||
array (
|
||||
'host' => '<%= @redis_host %>',
|
||||
'password' => '<%= @redis_host_password %>',
|
||||
'port' => 6379,
|
||||
),
|
||||
<% end -%>
|
||||
'secret' => '<%= @secret %>',
|
||||
'skeletondirectory' => '<%= @skeletondirectory %>',
|
||||
'templatedirectory' => '',
|
||||
<% if @environment == 'test' -%>
|
||||
'trashbin_retention_obligation' => 'auto, 30',
|
||||
<% end -%>
|
||||
'trusted_domains' =>
|
||||
array (
|
||||
<%- index = 0 -%>
|
||||
<%- @trusted_domains.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',<% index += 1 %>
|
||||
<%- end -%>
|
||||
),
|
||||
<%- unless @trusted_proxies.empty? -%>
|
||||
'trusted_proxies' =>
|
||||
array (
|
||||
<%- index = 0 -%>
|
||||
<%- @trusted_proxies.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
),
|
||||
<%- end -%>
|
||||
'twofactor_enforced' => 'true',
|
||||
'twofactor_enforced_groups' =>
|
||||
array (
|
||||
0 => 'admin',
|
||||
<%- index = 1 -%>
|
||||
<%- @twofactor_enforced_groups.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
),
|
||||
array (
|
||||
0 => 'admin',
|
||||
1 => 'forcemfa',
|
||||
<%- if @twofactor_enforced_groups -%>
|
||||
<%- index = 2 -%>
|
||||
<%- @twofactor_enforced_groups.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
<%- end -%>
|
||||
),
|
||||
'twofactor_enforced_excluded_groups' =>
|
||||
array (
|
||||
<%- index = 0 -%>
|
||||
<%- @twofactor_enforced_excluded_groups.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
),
|
||||
array (
|
||||
<%- if @twofactor_enforced_excluded_groups -%>
|
||||
<%- index = 0 -%>
|
||||
<%- @twofactor_enforced_excluded_groups.each do |item| -%>
|
||||
<%= index %> => '<%= item %>',
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
<%- end -%>
|
||||
),
|
||||
'updatechecker' => false,
|
||||
'version' => '<%= @nextcloud_version_string %>',
|
||||
'app_install_overwrite' =>
|
||||
array (
|
||||
0 => 'globalsiteselector',
|
||||
),
|
||||
|
||||
<% if @environment == 'test' -%>
|
||||
'versions_retention_obligation' => '<%= @expiration_days_min %>, <%= @expiration_days_max %>',
|
||||
<% end -%>
|
||||
);
|
||||
|
|
|
@ -5,6 +5,10 @@ services:
|
|||
app:
|
||||
image: docker.sunet.se/drive/nextcloud-custom:<%= @nextcloud_version %>
|
||||
restart: always
|
||||
container_name: nextcloud_app_1
|
||||
<%- if @hostnet -%>
|
||||
network_mode: host
|
||||
<%- end -%>
|
||||
volumes:
|
||||
- /opt/nextcloud/000-default.conf:/etc/apache2/sites-enabled/000-default.conf
|
||||
- /opt/nextcloud/mpm_prefork.conf:/etc/apache2/mods-available/mpm_prefork.conf
|
||||
|
@ -15,28 +19,30 @@ services:
|
|||
- /opt/nextcloud/complete_reinstall.sh:/complete_reinstall.sh
|
||||
- /opt/nextcloud/config.php:/var/www/html/config/config.php
|
||||
- /opt/nextcloud/nextcloud.log:/var/www/html/data/nextcloud.log
|
||||
- /opt/nextcloud/audit.log:/var/www/html/data/audit.log
|
||||
- /opt/nextcloud/rclone.conf:/rclone.conf
|
||||
<%- if @skeletondirectory -%>
|
||||
- /opt/nextcloud/skeleton:<%= @skeletondirectory %>
|
||||
<%- end -%>
|
||||
<%- if @location =~ /^gss/ -%>
|
||||
- /opt/nextcloud/mappingfile.json:/var/www/html/mappingfile.json
|
||||
<%- end -%>
|
||||
<% if @location =~ /^kau/ -%>
|
||||
- /mnt:/opt/tmp/
|
||||
<%- end -%>
|
||||
<%- if ! @hostnet -%>
|
||||
networks:
|
||||
- default
|
||||
- proxysql_proxysql
|
||||
<%- end -%>
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
- 89.32.32.32
|
||||
<%- if !@hostnet -%>
|
||||
ports:
|
||||
- 443:443
|
||||
command: apachectl -D FOREGROUND
|
||||
<%- end -%>
|
||||
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
|
||||
tty: true
|
||||
|
||||
<%- if !@hostnet -%>
|
||||
networks:
|
||||
proxysql_proxysql:
|
||||
external: true
|
||||
<%- end -%>
|
||||
|
|
|
@ -1,57 +1,58 @@
|
|||
{
|
||||
"/antagning.se$/": "antagning.drive.sunet.se",
|
||||
"/bth.se$/": "bth.drive.sunet.se",
|
||||
"/chalmers.se$/": "chalmers.drive.sunet.se",
|
||||
"/du.se$/": "du.drive.sunet.se",
|
||||
"/eduid.se$/": "extern.drive.sunet.se",
|
||||
"/esh.se$/": "esh.drive.sunet.se",
|
||||
"/fhs.se$/": "fhs.drive.sunet.se",
|
||||
"/gih.se$/": "gih.drive.sunet.se",
|
||||
"/gu.se$/": "gu.drive.sunet.se",
|
||||
"/hb.se$/": "hb.drive.sunet.se",
|
||||
"/shh.se$/": "shh.drive.sunet.se",
|
||||
"/hh.se$/": "hh.drive.sunet.se",
|
||||
"/hhs.se$/": "hhs.drive.sunet.se",
|
||||
"/hig.se$/": "hig.drive.sunet.se",
|
||||
"/his.se$/": "his.drive.sunet.se",
|
||||
"/hj.se$/": "hj.drive.sunet.se",
|
||||
"/hkr.se$/": "hkr.drive.sunet.se",
|
||||
"/hv.se$/": "hv.drive.sunet.se",
|
||||
"/irf.se$/": "irf.drive.sunet.se",
|
||||
"/kb.se$/": "kb.drive.sunet.se",
|
||||
"/ki.se$/": "ki.drive.sunet.se",
|
||||
"/kkh.se$/": "kkh.drive.sunet.se",
|
||||
"/kmh.se$/": "kmh.drive.sunet.se",
|
||||
"/konstfack.se$/": "konstfack.drive.sunet.se",
|
||||
"/kth.se$/": "kth.drive.sunet.se",
|
||||
"/kva.se$/": "kva.drive.sunet.se",
|
||||
"/liu.se$/": "liu.drive.sunet.se",
|
||||
"/lnu.se$/": "lnu.drive.sunet.se",
|
||||
"/ltu.se$/": "ltu.drive.sunet.se",
|
||||
"/lu.se$/": "lu.drive.sunet.se",
|
||||
"/mah.se$/": "mau.drive.sunet.se",
|
||||
"/mau.se$/": "mau.drive.sunet.se",
|
||||
"/mdh.se$/": "mdu.drive.sunet.se",
|
||||
"/mdu.se$/": "mdu.drive.sunet.se",
|
||||
"/miun.se$/": "miun.drive.sunet.se",
|
||||
"/nordunet.se$/": "nordunet.drive.sunet.se",
|
||||
"/nrm.se$/": "nrm.drive.sunet.se",
|
||||
"/oru.se$/": "oru.drive.sunet.se",
|
||||
"/rkh.se$/": "rkh.drive.sunet.se",
|
||||
"/sics.se$/": "sics.drive.sunet.se",
|
||||
"/slu.se$/": "slu.drive.sunet.se",
|
||||
"/smhi.se$/": "smhi.drive.sunet.se",
|
||||
"/sp.se$/": "sp.drive.sunet.se",
|
||||
"/su.se$/": "su.drive.sunet.se",
|
||||
"/sunet.se$/": "sunet.drive.sunet.se",
|
||||
"/suni.se$/": "suni.drive.sunet.se",
|
||||
"/swamid.se$/": "swamid.drive.sunet.se",
|
||||
"/ths.se$/": "ths.drive.sunet.se",
|
||||
"/uhr.se$/": "uhr.drive.sunet.se",
|
||||
"/umu.se$/": "umu.drive.sunet.se",
|
||||
"/uniarts.se$/": "uniarts.drive.sunet.se",
|
||||
"/uu.se$/": "uu.drive.sunet.se",
|
||||
"/vinnova.se$/": "vinnova.drive.sunet.se",
|
||||
"/vr.se$/": "vr.drive.sunet.se",
|
||||
"/(\\w+\\.)*antagning.se$/": "antagning.drive.sunet.se",
|
||||
"/(\\w+\\.)*bth.se$/": "bth.drive.sunet.se",
|
||||
"/(\\w+\\.)*chalmers.se$/": "chalmers.drive.sunet.se",
|
||||
"/(\\w+\\.)*du.se$/": "du.drive.sunet.se",
|
||||
"/(\\w+\\.)*eduid.se$/": "extern.drive.sunet.se",
|
||||
"/(\\w+\\.)*esh.se$/": "esh.drive.sunet.se",
|
||||
"/(\\w+\\.)*fhs.se$/": "fhs.drive.sunet.se",
|
||||
"/(\\w+\\.)*gih.se$/": "gih.drive.sunet.se",
|
||||
"/(\\w+\\.)*gu.se$/": "gu.drive.sunet.se",
|
||||
"/(\\w+\\.)*hb.se$/": "hb.drive.sunet.se",
|
||||
"/(\\w+\\.)*shh.se$/": "shh.drive.sunet.se",
|
||||
"/(\\w+\\.)*hh.se$/": "hh.drive.sunet.se",
|
||||
"/(\\w+\\.)*hhs.se$/": "hhs.drive.sunet.se",
|
||||
"/(\\w+\\.)*hig.se$/": "hig.drive.sunet.se",
|
||||
"/(\\w+\\.)*his.se$/": "his.drive.sunet.se",
|
||||
"/(\\w+\\.)*hj.se$/": "hj.drive.sunet.se",
|
||||
"/(\\w+\\.)*hkr.se$/": "hkr.drive.sunet.se",
|
||||
"/(\\w+\\.)*hv.se$/": "hv.drive.sunet.se",
|
||||
"/(\\w+\\.)*irf.se$/": "irf.drive.sunet.se",
|
||||
"/(\\w+\\.)*kb.se$/": "kb.drive.sunet.se",
|
||||
"/(\\w+\\.)*ki.se$/": "ki.drive.sunet.se",
|
||||
"/(\\w+\\.)*kkh.se$/": "kkh.drive.sunet.se",
|
||||
"/(\\w+\\.)*kmh.se$/": "kmh.drive.sunet.se",
|
||||
"/(\\w+\\.)*konstfack.se$/": "konstfack.drive.sunet.se",
|
||||
"/(\\w+\\.)*kth.se$/": "kth.drive.sunet.se",
|
||||
"/(\\w+\\.)*kva.se$/": "kva.drive.sunet.se",
|
||||
"/(\\w+\\.)*liu.se$/": "liu.drive.sunet.se",
|
||||
"/(\\w+\\.)*lnu.se$/": "lnu.drive.sunet.se",
|
||||
"/(\\w+\\.)*ltu.se$/": "ltu.drive.sunet.se",
|
||||
"/(\\w+\\.)*lu.se$/": "lu.drive.sunet.se",
|
||||
"/(\\w+\\.)*mah.se$/": "mau.drive.sunet.se",
|
||||
"/(\\w+\\.)*mau.se$/": "mau.drive.sunet.se",
|
||||
"/(\\w+\\.)*mdh.se$/": "mdu.drive.sunet.se",
|
||||
"/(\\w+\\.)*mdu.se$/": "mdu.drive.sunet.se",
|
||||
"/(\\w+\\.)*miun.se$/": "miun.drive.sunet.se",
|
||||
"/(\\w+\\.)*nordunet.se$/": "nordunet.drive.sunet.se",
|
||||
"/(\\w+\\.)*nordu.net$/": "nordunet.drive.sunet.se",
|
||||
"/(\\w+\\.)*nrm.se$/": "nrm.drive.sunet.se",
|
||||
"/(\\w+\\.)*oru.se$/": "oru.drive.sunet.se",
|
||||
"/(\\w+\\.)*rkh.se$/": "rkh.drive.sunet.se",
|
||||
"/(\\w+\\.)*sics.se$/": "sics.drive.sunet.se",
|
||||
"/(\\w+\\.)*slu.se$/": "slu.drive.sunet.se",
|
||||
"/(\\w+\\.)*smhi.se$/": "smhi.drive.sunet.se",
|
||||
"/(\\w+\\.)*sp.se$/": "sp.drive.sunet.se",
|
||||
"/(\\w+\\.)*su.se$/": "su.drive.sunet.se",
|
||||
"/(\\w+\\.)*sunet.se$/": "sunet.drive.sunet.se",
|
||||
"/(\\w+\\.)*suni.se$/": "suni.drive.sunet.se",
|
||||
"/(\\w+\\.)*swamid.se$/": "swamid.drive.sunet.se",
|
||||
"/(\\w+\\.)*ths.se$/": "ths.drive.sunet.se",
|
||||
"/(\\w+\\.)*uhr.se$/": "uhr.drive.sunet.se",
|
||||
"/(\\w+\\.)*umu.se$/": "umu.drive.sunet.se",
|
||||
"/(\\w+\\.)*uniarts.se$/": "uniarts.drive.sunet.se",
|
||||
"/(\\w+\\.)*uu.se$/": "uu.drive.sunet.se",
|
||||
"/(\\w+\\.)*vinnova.se$/": "vinnova.drive.sunet.se",
|
||||
"/(\\w+\\.)*vr.se$/": "vr.drive.sunet.se",
|
||||
"/$/": "extern.drive.sunet.se"
|
||||
}
|
||||
|
|
|
@ -1,57 +1,58 @@
|
|||
{
|
||||
"/antagning.se$/": "antagning.drive.test.sunet.se",
|
||||
"/bth.se$/": "bth.drive.test.sunet.se",
|
||||
"/chalmers.se$/": "chalmers.drive.test.sunet.se",
|
||||
"/du.se$/": "du.drive.test.sunet.se",
|
||||
"/eduid.se$/": "extern.drive.test.sunet.se",
|
||||
"/esh.se$/": "esh.drive.test.sunet.se",
|
||||
"/fhs.se$/": "fhs.drive.test.sunet.se",
|
||||
"/gih.se$/": "gih.drive.test.sunet.se",
|
||||
"/gu.se$/": "gu.drive.test.sunet.se",
|
||||
"/hb.se$/": "hb.drive.test.sunet.se",
|
||||
"/shh.se$/": "shh.drive.test.sunet.se",
|
||||
"/hh.se$/": "hh.drive.test.sunet.se",
|
||||
"/hhs.se$/": "hhs.drive.test.sunet.se",
|
||||
"/hig.se$/": "hig.drive.test.sunet.se",
|
||||
"/his.se$/": "his.drive.test.sunet.se",
|
||||
"/hj.se$/": "hj.drive.test.sunet.se",
|
||||
"/hkr.se$/": "hkr.drive.test.sunet.se",
|
||||
"/hv.se$/": "hv.drive.test.sunet.se",
|
||||
"/irf.se$/": "irf.drive.test.sunet.se",
|
||||
"/kb.se$/": "kb.drive.test.sunet.se",
|
||||
"/ki.se$/": "ki.drive.test.sunet.se",
|
||||
"/kkh.se$/": "kkh.drive.test.sunet.se",
|
||||
"/kmh.se$/": "kmh.drive.test.sunet.se",
|
||||
"/konstfack.se$/": "konstfack.drive.test.sunet.se",
|
||||
"/kth.se$/": "kth.drive.test.sunet.se",
|
||||
"/kva.se$/": "kva.drive.test.sunet.se",
|
||||
"/liu.se$/": "liu.drive.test.sunet.se",
|
||||
"/lnu.se$/": "lnu.drive.test.sunet.se",
|
||||
"/ltu.se$/": "ltu.drive.test.sunet.se",
|
||||
"/lu.se$/": "lu.drive.test.sunet.se",
|
||||
"/mah.se$/": "mau.drive.test.sunet.se",
|
||||
"/mau.se$/": "mau.drive.test.sunet.se",
|
||||
"/mdh.se$/": "mdu.drive.test.sunet.se",
|
||||
"/mdu.se$/": "mdu.drive.test.sunet.se",
|
||||
"/miun.se$/": "miun.drive.test.sunet.se",
|
||||
"/nordunet.se$/": "nordunet.drive.test.sunet.se",
|
||||
"/nrm.se$/": "nrm.drive.test.sunet.se",
|
||||
"/oru.se$/": "oru.drive.test.sunet.se",
|
||||
"/rkh.se$/": "rkh.drive.test.sunet.se",
|
||||
"/sics.se$/": "sics.drive.test.sunet.se",
|
||||
"/slu.se$/": "slu.drive.test.sunet.se",
|
||||
"/smhi.se$/": "smhi.drive.test.sunet.se",
|
||||
"/sp.se$/": "sp.drive.test.sunet.se",
|
||||
"/su.se$/": "su.drive.test.sunet.se",
|
||||
"/sunet.se$/": "sunet.drive.test.sunet.se",
|
||||
"/suni.se$/": "suni.drive.test.sunet.se",
|
||||
"/swamid.se$/": "swamid.drive.test.sunet.se",
|
||||
"/ths.se$/": "ths.drive.test.sunet.se",
|
||||
"/uhr.se$/": "uhr.drive.test.sunet.se",
|
||||
"/umu.se$/": "umu.drive.test.sunet.se",
|
||||
"/uniarts.se$/": "uniarts.drive.test.sunet.se",
|
||||
"/uu.se$/": "uu.drive.test.sunet.se",
|
||||
"/vinnova.se$/": "vinnova.drive.test.sunet.se",
|
||||
"/vr.se$/": "vr.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*antagning.se$/": "antagning.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*bth.se$/": "bth.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*chalmers.se$/": "chalmers.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*du.se$/": "du.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*eduid.se$/": "extern.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*esh.se$/": "esh.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*fhs.se$/": "fhs.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*gih.se$/": "gih.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*gu.se$/": "gu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hb.se$/": "hb.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*shh.se$/": "shh.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hh.se$/": "hh.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hhs.se$/": "hhs.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hig.se$/": "hig.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*his.se$/": "his.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hj.se$/": "hj.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hkr.se$/": "hkr.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*hv.se$/": "hv.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*irf.se$/": "irf.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*kb.se$/": "kb.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*ki.se$/": "ki.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*kkh.se$/": "kkh.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*kmh.se$/": "kmh.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*konstfack.se$/": "konstfack.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*kth.se$/": "kth.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*kva.se$/": "kva.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*liu.se$/": "liu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*lnu.se$/": "lnu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*ltu.se$/": "ltu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*lu.se$/": "lu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*mah.se$/": "mau.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*mau.se$/": "mau.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*mdh.se$/": "mdu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*mdu.se$/": "mdu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*miun.se$/": "miun.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*nordunet.se$/": "nordunet.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*nordu.net$/": "nordunet.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*nrm.se$/": "nrm.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*oru.se$/": "oru.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*rkh.se$/": "rkh.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*sics.se$/": "sics.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*slu.se$/": "slu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*smhi.se$/": "smhi.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*sp.se$/": "sp.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*su.se$/": "su.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*sunet.se$/": "sunet.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*suni.se$/": "suni.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*swamid.se$/": "swamid.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*ths.se$/": "ths.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*uhr.se$/": "uhr.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*umu.se$/": "umu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*uniarts.se$/": "uniarts.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*uu.se$/": "uu.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*vinnova.se$/": "vinnova.drive.test.sunet.se",
|
||||
"/(\\w+\\.)*vr.se$/": "vr.drive.test.sunet.se",
|
||||
"/$/": "extern.drive.test.sunet.se"
|
||||
}
|
||||
|
|
21
templates/application/nocc.erb
Executable file
21
templates/application/nocc.erb
Executable file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [[ "${1}" =~ ^nextcloud ]]; then
|
||||
container=${1}
|
||||
shift
|
||||
else
|
||||
container="nextcloud_app_1"
|
||||
fi
|
||||
|
||||
|
||||
oc_list=$(env| grep 'OC_')
|
||||
if [[ "x${oc_list}" != "x" ]]; then
|
||||
for row in $(echo "${oc_list}"); do
|
||||
MY_VARS="${MY_VARS} -e ${row}"
|
||||
done
|
||||
fi
|
||||
|
||||
docker exec -i ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
|
||||
exit 0
|
||||
|
||||
|
|
@ -7,6 +7,7 @@ else
|
|||
container="nextcloud_app_1"
|
||||
fi
|
||||
|
||||
|
||||
oc_list=$(env| grep 'OC_')
|
||||
if [[ "x${oc_list}" != "x" ]]; then
|
||||
for row in $(echo "${oc_list}"); do
|
||||
|
@ -14,4 +15,19 @@ if [[ "x${oc_list}" != "x" ]]; then
|
|||
done
|
||||
fi
|
||||
|
||||
if [[ ${1} == 'config:editable' ]]; then
|
||||
echo "config:editable is deprecated"
|
||||
exit 0
|
||||
fi
|
||||
docker exec ${container} chmod u+w /var/www/html/config/
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => true,/config_is_read_only\1 => false,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp'
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} sh -c 'sed "s/config_is_read_only\(.\) => false,/config_is_read_only\1 => true,/" /var/www/html/config/config.php > /var/www/html/config/config.php.tmp'
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} cp /var/www/html/config/config.php.tmp /var/www/html/config/config.php
|
||||
docker exec -ti ${MY_VARS} -u www-data ${container} rm /var/www/html/config/config.php.tmp
|
||||
docker exec ${container} chmod u-w /var/www/html/config/
|
||||
exit 0
|
||||
|
||||
|
||||
|
|
79
templates/application/remount_user_bucket_as_project.sh
Executable file
79
templates/application/remount_user_bucket_as_project.sh
Executable file
|
@ -0,0 +1,79 @@
|
|||
#!/bin/bash
|
||||
|
||||
mountid="${1}"
|
||||
user="${2}"
|
||||
container="${3}"
|
||||
if [[ -z ${mountid} ]] || [[ -z ${user} ]]; then
|
||||
echo "We need a valid mount id and user to proceed"
|
||||
echo "Usage: ${0} <mountid> <user> [<container>]"
|
||||
exit
|
||||
fi
|
||||
|
||||
if [[ -z ${container} ]]; then
|
||||
container="nextcloud_app_1"
|
||||
fi
|
||||
|
||||
occ="/usr/local/bin/occ ${container}"
|
||||
function get_config {
|
||||
${occ} files_external:config ${mountid} ${1} | tr -d '\n\t\r'
|
||||
}
|
||||
|
||||
echo "Gathering information, hang tight."
|
||||
|
||||
echo -n "."
|
||||
bucket="$(get_config bucket)"
|
||||
echo -n "."
|
||||
hostname="$(get_config hostname)"
|
||||
echo -n "."
|
||||
key="$(get_config key)"
|
||||
echo -n "."
|
||||
region="$(get_config region)"
|
||||
echo -n "."
|
||||
secret="$(get_config secret)"
|
||||
jsonfile="/tmp/${user}-user-bucket.json"
|
||||
mount_point="${user/@/-}"
|
||||
mount_point="${mount_point/./-}-user-bucket"
|
||||
|
||||
echo "This will remount the user bucket with mountid ${mountid} for ${user} as project bucket with mountpoint ${mount_point}."
|
||||
read -r -p "Press enter to continue"
|
||||
|
||||
echo '
|
||||
[
|
||||
{
|
||||
"mount_point": "\/'${mount_point}'",
|
||||
"storage": "\\OCA\\Files_External\\Lib\\Storage\\AmazonS3",
|
||||
"authentication_type": "amazons3::accesskey",
|
||||
"configuration": {
|
||||
"bucket": "'${bucket}'",
|
||||
"hostname": "'${hostname}'",
|
||||
"key": "'${key}'",
|
||||
"legacy_auth": false,
|
||||
"port": "443",
|
||||
"region": "'${region}'",
|
||||
"secret": "'${secret}'",
|
||||
"storageClass": "",
|
||||
"useMultipartCopy": true,
|
||||
"use_path_style": true,
|
||||
"use_ssl": true
|
||||
},
|
||||
"options": {
|
||||
"encrypt": true,
|
||||
"previews": true,
|
||||
"enable_sharing": true,
|
||||
"filesystem_check_changes": 0,
|
||||
"encoding_compatibility": false,
|
||||
"readonly": false
|
||||
},
|
||||
"applicable_users": [
|
||||
],
|
||||
"applicable_groups": ["admin"]
|
||||
}
|
||||
]
|
||||
' > "${jsonfile}"
|
||||
|
||||
|
||||
docker cp ${jsonfile} ${container}:/${jsonfile}
|
||||
${occ} files_external:import /${jsonfile}
|
||||
docker exec ${container} rm /${jsonfile}
|
||||
rm ${jsonfile}
|
||||
${occ} files_external:delete ${mountid}
|
34
templates/application/scan_external_mounts.sh
Normal file
34
templates/application/scan_external_mounts.sh
Normal file
|
@ -0,0 +1,34 @@
|
|||
#!/bin/bash
|
||||
|
||||
error_ids=""
|
||||
# Only run if this is the only instance of this script running
|
||||
# note: since this script forks to run pgrep, we need -eq 2 here
|
||||
# shellcheck disable=SC2126
|
||||
if [[ $(pgrep -a -f "${0}" | grep -v scriptherder | wc -l) -eq 2 ]]; then
|
||||
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
|
||||
# a specific host, but will differ between hosts
|
||||
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
|
||||
errors=''
|
||||
for container in $(/usr/local/bin/get_containers); do
|
||||
error_ids="${error_ids} ${container}: "
|
||||
for id in $(/usr/local/bin/nocc "${container}" files_external:list --all --output json | jq '.[].mount_id' | jq .); do
|
||||
/usr/local/bin/nocc "${container}" files_external:scan "${id}" | grep Error
|
||||
# shellcheck disable=SC2181
|
||||
if [[ ${?} -eq 0 ]]; then
|
||||
errors="${errors} ${id}"
|
||||
error_ids="${error_ids} ${id}"
|
||||
fi
|
||||
done
|
||||
done
|
||||
else
|
||||
echo "Another instance of this script is already running, exiting"
|
||||
pgrep -a -f "${0}" | grep -v scriptherder
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -n "${errors}" ]]; then
|
||||
echo "Errors found in the following mounts: ${error_ids}"
|
||||
exit 1
|
||||
fi
|
||||
echo "No errors found"
|
||||
exit 0
|
69
templates/jupyter_site/docker-compose.erb.yaml
Normal file
69
templates/jupyter_site/docker-compose.erb.yaml
Normal file
|
@ -0,0 +1,69 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
nginx:
|
||||
image: docker.io/nginxproxy/nginx-proxy:latest
|
||||
container_name: nginx
|
||||
networks:
|
||||
- internal_network
|
||||
- external_network
|
||||
dns:
|
||||
- 89.32.32.32
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
labels:
|
||||
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: "true"
|
||||
volumes:
|
||||
- /opt/jupyter_site/nginx/certs:/etc/nginx/certs:ro
|
||||
- /opt/jupyter_site/nginx/conf:/etc/nginx/conf.d
|
||||
- /opt/jupyter_site/nginx/dhparam:/etc/nginx/dhparam
|
||||
- /opt/jupyter_site/nginx/html:/usr/share/nginx/html
|
||||
- /opt/jupyter_site/nginx/vhost:/etc/nginx/vhost.d
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
environment:
|
||||
- ENABLE_IPV6=true
|
||||
restart: unless-stopped
|
||||
|
||||
acme:
|
||||
image: docker.io/nginxproxy/acme-companion:latest
|
||||
container_name: acme
|
||||
networks:
|
||||
- external_network
|
||||
dns:
|
||||
- 89.32.32.32
|
||||
volumes:
|
||||
- /opt/jupyter_site/nginx/acme:/etc/acme.sh
|
||||
- /opt/jupyter_site/nginx/certs:/etc/nginx/certs:rw
|
||||
- /opt/jupyter_site/nginx/conf:/etc/nginx/conf.d
|
||||
- /opt/jupyter_site/nginx/dhparam:/etc/nginx/dhparam
|
||||
- /opt/jupyter_site/nginx/html:/usr/share/nginx/html
|
||||
- /opt/jupyter_site/nginx/vhost:/etc/nginx/vhost.d:rw
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
- NGINX_PROXY_CONTAINER=nginx
|
||||
- DEFAULT_EMAIL=drive@sunet.se
|
||||
depends_on:
|
||||
- nginx
|
||||
restart: unless-stopped
|
||||
|
||||
web:
|
||||
image: docker.sunet.se/drive/jupyter-site:<%= @site_version %>
|
||||
container_name: web
|
||||
restart: always
|
||||
networks:
|
||||
- internal_network
|
||||
ports:
|
||||
- "127.0.0.1:3000:3000"
|
||||
dns:
|
||||
- 89.32.32.32
|
||||
environment:
|
||||
- VIRTUAL_HOST=<%= @domain %>
|
||||
- VIRTUAL_PATH=/
|
||||
- VIRTUAL_PORT=3000
|
||||
- LETSENCRYPT_HOST=<%= @domain %>
|
||||
|
||||
networks:
|
||||
external_network:
|
||||
internal_network:
|
||||
internal: true
|
|
@ -1,16 +0,0 @@
|
|||
|
||||
<?php
|
||||
|
||||
|
||||
$CONFIG = [
|
||||
'DB' => [
|
||||
'host' => "<%= @dbhost %>",
|
||||
'db' => "lookup" ,
|
||||
'user' => "lookup",
|
||||
'pass' => "<%= @mysql_user_password %>",
|
||||
],
|
||||
|
||||
'GLOBAL_SCALE' => true,
|
||||
|
||||
'AUTH_KEY' => "<%= @gss_jwt_key %>",
|
||||
];
|
|
@ -1,24 +0,0 @@
|
|||
version: '3.2'
|
||||
|
||||
services:
|
||||
|
||||
app:
|
||||
image: docker.sunet.se/drive/nextcloud-lookup:<%= @lookup_version %>
|
||||
restart: always
|
||||
volumes:
|
||||
- /opt/lookup/config.php:/var/www/html/config/config.php
|
||||
networks:
|
||||
- default
|
||||
- proxysql_proxysql
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
- 89.32.32.32
|
||||
ports:
|
||||
- 443:443
|
||||
command: apache2-foreground
|
||||
tty: true
|
||||
|
||||
networks:
|
||||
proxysql_proxysql:
|
||||
external: true
|
|
@ -1,62 +0,0 @@
|
|||
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
|
||||
SET time_zone = "+00:00";
|
||||
|
||||
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
|
||||
/*!40101 SET NAMES utf8mb4 */;
|
||||
|
||||
CREATE DATABASE IF NOT EXISTS `lookup` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||
USE `lookup`;
|
||||
|
||||
CREATE USER 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
|
||||
GRANT ALL PRIVILEGES ON lookup.* TO 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
|
||||
|
||||
|
||||
DROP TABLE IF EXISTS `emailValidation`;
|
||||
CREATE TABLE IF NOT EXISTS `emailValidation` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`storeId` int(11) NOT NULL,
|
||||
`token` varchar(16) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `token` (`token`),
|
||||
KEY `storeId` (`storeId`)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
DROP TABLE IF EXISTS `store`;
|
||||
CREATE TABLE IF NOT EXISTS `store` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`userId` int(11) NOT NULL,
|
||||
`k` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`v` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`valid` tinyint(1) NOT NULL DEFAULT '0',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `key` (`k`(191)),
|
||||
KEY `value` (`v`(191)),
|
||||
KEY `userId` (`userId`)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=51 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
DROP TABLE IF EXISTS `users`;
|
||||
CREATE TABLE IF NOT EXISTS `users` (
|
||||
`id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||
`federationId` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `federationId` (`federationId`(191))
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=15 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
DROP TABLE IF EXISTS `toVerify`;
|
||||
CREATE TABLE IF NOT EXISTS `toVerify` (
|
||||
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`userId` int(11) NOT NULL,
|
||||
`storeId` int(11) NOT NULL,
|
||||
`property` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`location` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||
`tries` int(11) NOT NULL,
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
|
||||
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
3
templates/mariadb/05-roundcube.sql.erb
Normal file
3
templates/mariadb/05-roundcube.sql.erb
Normal file
|
@ -0,0 +1,3 @@
|
|||
CREATE SCHEMA roundcubemail;
|
||||
CREATE USER 'roundcube'@'%' IDENTIFIED BY '<%= @roundcube_password %>';
|
||||
GRANT ALL PRIVILEGES ON roundcubemail.* TO 'roundcube'@'%' IDENTIFIED BY '<%= @roundcube_password %>';
|
1
templates/mariadb/custconfig.json.erb
Normal file
1
templates/mariadb/custconfig.json.erb
Normal file
|
@ -0,0 +1 @@
|
|||
<%= @custdata.to_json %>
|
|
@ -4,6 +4,7 @@ services:
|
|||
|
||||
db:
|
||||
image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %>
|
||||
container_name: mariadb_db_1
|
||||
restart: always
|
||||
volumes:
|
||||
- /etc/mariadb/backups:/backups
|
||||
|
@ -20,11 +21,6 @@ services:
|
|||
- MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %>
|
||||
- BOOTSTRAP=<%= @bootstrap %>
|
||||
- FORCE_BOOTSTRAP=0
|
||||
ports:
|
||||
- 3306:3306
|
||||
- 4444:4444
|
||||
- 4567:4567
|
||||
- 4568:4568
|
||||
command: "--wsrep_cluster_address=gcomm://<%= @db_ip[0] %>,<%= @db_ip[1] %>,<%= @db_ip[2] %>"
|
||||
command: "--wsrep_cluster_address=gcomm://<%= @db_ip.join(',') %>"
|
||||
tty: true
|
||||
|
||||
|
|
32
templates/mariadb/genuserdeplists.sh.erb
Normal file
32
templates/mariadb/genuserdeplists.sh.erb
Normal file
|
@ -0,0 +1,32 @@
|
|||
#!/bin/bash
|
||||
|
||||
<% basedir="statistics:drive-server-coms" -%>
|
||||
<% cupath="/opt/mariadb/statistics/users/" -%>
|
||||
<% custdata="/opt/mariadb/statistics/custdata.json" -%>
|
||||
status=0
|
||||
|
||||
<% @custdata.each do |cust,data| -%>
|
||||
#Customer <%= cust %> has no billing departments.
|
||||
<% if defined?(data[@environment]["billdomains"]) && data[@environment]["billdomains"] -%>
|
||||
mkdir -p /opt/mariadb/statistics/users/<%= cust %>
|
||||
chmod '0700' /opt/mariadb/statistics/users/<%= cust %>
|
||||
<% data[@environment]["billdomains"].each do |dom| -%>
|
||||
/root/tasks/listusersbydep.sh <%= cust %> <%= dom %> > /opt/mariadb/statistics/users/<%= cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json
|
||||
if jq . <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json &>/dev/null
|
||||
then
|
||||
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= cupath + cust %>/users-<%= dom.gsub(/[.]/, '-') %>.json <%= basedir%>/<%= cust %>-<%= @environment%>/
|
||||
[[ $? -eq 0 ]] || { status=1 ; echo "Error: Upload of user data failed." ; }
|
||||
else
|
||||
echo "Error in json data"
|
||||
status=1
|
||||
fi
|
||||
<% end -%>
|
||||
<% end -%>
|
||||
<% end -%>
|
||||
|
||||
if [[ -f <%= custdata %> ]]
|
||||
then
|
||||
timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies <%= custdata %> <%= basedir%>/
|
||||
fi
|
||||
|
||||
exit ${status}
|
24
templates/mariadb/listusersdep.sh.erb
Normal file
24
templates/mariadb/listusersdep.sh.erb
Normal file
|
@ -0,0 +1,24 @@
|
|||
#!/bin/bash
|
||||
|
||||
function usage () {
|
||||
printf "Usage:\t%s <customer> <department email domain>\n" "${0##*/}"
|
||||
printf "Example:\t%s uu int.uu.se\n" "${0##*/}"
|
||||
}
|
||||
|
||||
[[ -z "${1}" || -z "${2}" || ! "${1}" =~ ^[a-zA-Z0-9.]{1,200}$ || ! "${2}" =~ ^[a-zA-Z0-9.]{1,200}$ ]] && usage && exit 1
|
||||
|
||||
depdom="${2}"
|
||||
customer="${1}"
|
||||
|
||||
docker exec mariadb_db_1 /bin/bash -c 'mysql -p${MYSQL_ROOT_PASSWORD} -NB -e '\
|
||||
$'\'select JSON_PRETTY(JSON_OBJECTAGG(uid,displayname)) from ('\
|
||||
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_global_scale_users.displayname'\
|
||||
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_global_scale_users'\
|
||||
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_global_scale_users.uid'\
|
||||
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\""'\
|
||||
$' UNION'\
|
||||
$' select nextcloud_'"${customer}"$'.oc_accounts.uid,nextcloud_'"${customer}"$'.oc_user_saml_users.displayname'\
|
||||
$' from nextcloud_'"${customer}"$'.oc_accounts,nextcloud_'"${customer}"$'.oc_user_saml_users'\
|
||||
$' where nextcloud_'"${customer}"$'.oc_accounts.uid = nextcloud_'"${customer}"$'.oc_user_saml_users.uid'\
|
||||
$' AND JSON_EXTRACT(nextcloud_'"${customer}"$'.oc_accounts.data, "$.email.value") like "%@'"${depdom}"$'\\\"") as users\'' \
|
||||
| sed 's/\\n/\n/g'
|
4
templates/mariadb/mysql.erb.sh
Normal file
4
templates/mariadb/mysql.erb.sh
Normal file
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
pw=$(yq -r '.services.db.environment[0]' /opt/mariadb/docker-compose.yml | awk -F '=' '{print $2}')
|
||||
|
||||
docker exec -ti mariadb_db_1 mysql -u root -p"${pw}" "${@}"
|
3
templates/mariadb/purge-binlogs.erb.sh
Normal file
3
templates/mariadb/purge-binlogs.erb.sh
Normal file
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
six_hours_ago=$(date -d "6 hours ago" "+%Y-%m-%d %H:%M:%S")
|
||||
docker exec mariadb_db_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "PURGE BINARY LOGS BEFORE '${six_hours_ago}'"
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
result="$(docker exec mariadbbackup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
|
||||
result="$(docker exec -u root mariadb_backup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
|
||||
if [[ "${result}" == "Slave_running ON" ]]; then
|
||||
echo "OK: Replica running"
|
||||
exit 0
|
||||
|
|
|
@ -8,10 +8,12 @@ mkdir -p "${backup_dir}"
|
|||
if [[ -z ${customer} ]]; then
|
||||
buopts="--slave-info --safe-slave-backup"
|
||||
dumpopts="--dump-slave"
|
||||
mysql -p${MYSQL_ROOT_PASSWORD} -e "stop slave"
|
||||
mysql -p"${MYSQL_ROOT_PASSWORD}" -u root -e "stop slave"
|
||||
fi
|
||||
mariadb-backup --backup ${buopts} -u root -p${MYSQL_ROOT_PASSWORD} --stream=xbstream | gzip >"${backup_dir}/${stream_name}"
|
||||
mysqldump --all-databases --single-transaction ${dumpopts} -u root -p${MYSQL_ROOT_PASSWORD} | gzip >"${backup_dir}/${dump_name}"
|
||||
# shellcheck disable=SC2086
|
||||
mariadb-backup --backup ${buopts} -u root -p"${MYSQL_ROOT_PASSWORD}" --stream=xbstream | gzip >"${backup_dir}/${stream_name}"
|
||||
# shellcheck disable=SC2086
|
||||
mysqldump --all-databases --single-transaction ${dumpopts} -u root -p"${MYSQL_ROOT_PASSWORD}" | gzip >"${backup_dir}/${dump_name}"
|
||||
if [[ -z ${customer} ]]; then
|
||||
mysql -p${MYSQL_ROOT_PASSWORD} -e "start slave"
|
||||
mysql -p"${MYSQL_ROOT_PASSWORD}" -u root -e "start slave"
|
||||
fi
|
||||
|
|
|
@ -3,7 +3,8 @@ version: '3.2'
|
|||
services:
|
||||
|
||||
mariadb_backup:
|
||||
image: docker.sunet.se/drive/mariadb
|
||||
image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %>
|
||||
container_name: mariadb_backup_mariadb_backup_1
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
container=${1}
|
||||
customer=${2}
|
||||
if [[ -z ${container} ]]; then
|
||||
container='mariadbbackup_mariadb_backup_1'
|
||||
container='mariadb_backup_mariadb_backup_1'
|
||||
fi
|
||||
if [[ -z ${customer} ]]; then
|
||||
location='<%= @location %>'
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
container=${1}
|
||||
customer=${2}
|
||||
if [[ -z ${container} ]]; then
|
||||
container='mariadbbackup_mariadb_backup_1'
|
||||
container='mariadb_backup_mariadb_backup_1'
|
||||
fi
|
||||
if [[ -z ${customer} ]]; then
|
||||
location='<%= @location %>'
|
||||
|
@ -13,11 +13,18 @@ dexec="docker exec ${container}"
|
|||
|
||||
password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}')
|
||||
|
||||
mysql="${dexec} mysql -p${password}"
|
||||
mysql="${dexec} mysql -p${password} -u root"
|
||||
|
||||
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
|
||||
users="${users}
|
||||
$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
|
||||
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_global_scale_users")')" == "1" ]]
|
||||
then
|
||||
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
|
||||
fi
|
||||
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
|
||||
if [[ "$(${mysql} -NB -e 'select exists(select * from information_schema.TABLES where TABLE_SCHEMA = "nextcloud" and TABLE_NAME = "oc_user_saml_users")')" == "1" ]]
|
||||
then
|
||||
users="${users}"$'\n'"$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_user_saml_users')"
|
||||
fi
|
||||
users="$(echo "${users}" | sort | uniq)"
|
||||
|
||||
project="statistics"
|
||||
bucket="drive-server-coms"
|
||||
|
@ -25,7 +32,7 @@ base_dir="${project}:${bucket}"
|
|||
mountpoint="/opt/statistics"
|
||||
customer_dir="${mountpoint}/${location}"
|
||||
mkdir -p "${customer_dir}"
|
||||
rclone mkdir "${base_dir}/${location}"
|
||||
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}"
|
||||
|
||||
echo "${users}" | awk 'BEGIN{print "{"} {print t "\""$1"\": \""$2"\""} {t=","} END{print "}"}' | jq . >"${customer_dir}/users.json"
|
||||
status=0
|
||||
|
@ -34,7 +41,7 @@ if ! jq . "${customer_dir}/users.json" &>/dev/null; then
|
|||
fi
|
||||
if [[ ${status} -eq 0 ]]; then
|
||||
# something is wrong if we cant copy the file in 30 seconds, so we should note that
|
||||
if ! timeout 30s rclone copy --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
|
||||
if ! timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
|
||||
status=1
|
||||
fi
|
||||
fi
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[statistics]
|
||||
type = webdav
|
||||
url = https://sunet.drive.sunet.se/remote.php/dav/files/_script/
|
||||
url = https://89.45.236.246/remote.php/dav/files/_script/
|
||||
vendor = nextcloud
|
||||
user = _script
|
||||
pass = <%= @statistics_secret %>
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
docker exec mariadbbackup_mariadb_backup_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "show status like 'Slave_running'"
|
||||
docker exec mariadb_backup_mariadb_backup_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "show status like 'Slave_running'"
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
#Modify these variables for your environment
|
||||
MY_NAEMON_HOSTNAME="monitor.drive.sunet.se"
|
||||
SLACK_URL="https://hooks.slack.com/services/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"
|
||||
SLACK_URL="<%= @slack_url %>"
|
||||
|
||||
#Set the message icon based on Nagios service state
|
||||
if [ "$SERVICESTATE" = "CRITICAL" ]; then
|
||||
|
|
8
templates/monitor/sunetdrive_extra_hostgroups.cfg.erb
Normal file
8
templates/monitor/sunetdrive_extra_hostgroups.cfg.erb
Normal file
|
@ -0,0 +1,8 @@
|
|||
<% @extra_host_groups.each do |group, members| -%>
|
||||
# <%= group %>
|
||||
define hostgroup {
|
||||
hostgroup_name <%= group %>
|
||||
alias <%= group %>
|
||||
members <%= members %>
|
||||
}
|
||||
<% end -%>
|
|
@ -131,39 +131,13 @@ define host {
|
|||
<% end -%>
|
||||
use monitor-site
|
||||
}
|
||||
<% if site.match('lookup') %>
|
||||
define service {
|
||||
notes_url https://<%= site %>
|
||||
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
|
||||
check_command check_https
|
||||
check_interval 5
|
||||
check_period 24x7
|
||||
<% if @environment == 'prod' %>
|
||||
contacts slack
|
||||
<% else -%>
|
||||
contact_groups naemon-admins
|
||||
<% end -%>
|
||||
host_name <%= site %>
|
||||
max_check_attempts 3
|
||||
notification_interval 60
|
||||
notification_period 24x7
|
||||
retry_interval 1
|
||||
service_description HTTPS
|
||||
<% if site.match('test') -%>
|
||||
servicegroups test-sites
|
||||
<% else -%>
|
||||
servicegroups prod-sites
|
||||
<% end -%>
|
||||
}
|
||||
<% end -%>
|
||||
<% unless site.match('lookup') %>
|
||||
define service {
|
||||
notes_url https://<%= site %>/status.php
|
||||
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
|
||||
check_command check_nextcloud
|
||||
check_interval 5
|
||||
check_period 24x7
|
||||
<% cur_cust = site.sub('/\.drive.*/','') %>
|
||||
<% cur_cust = site.gsub(/\.drive.*/,'') %>
|
||||
# 'check_nextcloud' command definition
|
||||
<% if @environment == 'prod' and not site.match('test') and @fullnodes.include?(cur_cust) %>
|
||||
contacts slack
|
||||
|
@ -186,10 +160,12 @@ define service {
|
|||
notes_url https://<%= site %>/status.php
|
||||
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true
|
||||
<% if site.match('test') -%>
|
||||
check_command check_nextcloud_version!'<%= @nextcloud_version_test %>'
|
||||
<% version_variable = 'nextcloud_version_test_' << cur_cust -%>
|
||||
<% else -%>
|
||||
check_command check_nextcloud_version!'<%= @nextcloud_version_prod %>'
|
||||
<% version_variable = 'nextcloud_version_prod_' << cur_cust -%>
|
||||
<% end -%>
|
||||
<% version = String(scope.lookupvar(version_variable)).gsub(/-[0-9]+/,'') -%>
|
||||
check_command check_nextcloud_version!'<%= version %>'
|
||||
check_interval 5
|
||||
check_period 24x7
|
||||
contact_groups naemon-admins
|
||||
|
@ -206,4 +182,3 @@ define service {
|
|||
<% end -%>
|
||||
}
|
||||
<% end -%>
|
||||
<% end -%>
|
||||
|
|
|
@ -3,10 +3,12 @@
|
|||
config_php='/var/www/html/config/config.php'
|
||||
dbhost="<%= @dbhost %>"
|
||||
mysql_user_password="<%= @mysql_user_password %>"
|
||||
admin_password="<%= @admin_password %>"
|
||||
location="<%= @location %>"
|
||||
bucket="<%= @s3_bucket %>"
|
||||
customer="<%= @customer %>"
|
||||
echo "Setting temp admin password"
|
||||
apt update && apt install -y apg
|
||||
admin_password="$(apg -m 40 | head -1)"
|
||||
|
||||
/usr/bin/mysql -e "drop database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
|
||||
/usr/bin/mysql -e "create database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}" >/dev/null 2>&1
|
||||
|
@ -35,6 +37,11 @@ EOF
|
|||
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
|
||||
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
|
||||
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
|
||||
echo "Now delete the admin user:"
|
||||
echo " occ <container> user:delete admin"
|
||||
echo "and then create a new admin user:"
|
||||
echo " /usr/local/bin/add_admin_user <username> <email address> <container>"
|
||||
echo ""
|
||||
echo "${customer}_instanceid: DEC::PKCS7[${instanceid}]!"
|
||||
echo "${customer}_secret: DEC::PKCS7[${secret}]!"
|
||||
echo "${customer}_passwordsalt: DEC::PKCS7[${passwordsalt}]!"
|
||||
|
|
|
@ -5,7 +5,7 @@ no_files=30 # Keep this many files as an archive, script is run once a week
|
|||
# a specific host, but will differ between hosts
|
||||
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
|
||||
|
||||
for logfile in $(ls /opt/multinode/*/{nextcloud.log,server/server.log}); do
|
||||
for logfile in $(ls /opt/multinode/*/{nextcloud.log,audit.log,server/server.log}); do
|
||||
if [[ -f ${logfile}.gz.${no_files} ]]; then
|
||||
rm ${logfile}.gz.${no_files}
|
||||
fi
|
||||
|
|
|
@ -11,11 +11,10 @@ services:
|
|||
volumes:
|
||||
- <%= @redis_conf_dir %>:/data
|
||||
command: redis-server /data/redis.conf --loglevel verbose
|
||||
restart: always
|
||||
networks:
|
||||
- mariadb<%= @customer %>_<%= @customer %>
|
||||
- proxysql_proxysql
|
||||
restart: always
|
||||
|
||||
networks:
|
||||
mariadb<%= @customer %>_<%= @customer %>:
|
||||
proxysql_proxysql:
|
||||
external: true
|
||||
|
||||
|
|
|
@ -13,19 +13,21 @@ services:
|
|||
- /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini
|
||||
- <%= @config_php_path %>:/var/www/html/config/config.php
|
||||
- <%= @nextcloud_log_path %>:/var/www/html/data/nextcloud.log
|
||||
- <%= @audit_log_path %>:/var/www/html/data/audit.log
|
||||
- <%= @rclone_conf_path %>:/rclone.conf
|
||||
|
||||
networks:
|
||||
- default
|
||||
- mariadb<%= @customer %>_<%= @customer %>
|
||||
- proxysql_proxysql
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
- 89.32.32.32
|
||||
ports:
|
||||
- <%= @https_port %>:443
|
||||
command: apachectl -D FOREGROUND
|
||||
command: sh -c 'tail -F /var/www/html/data/nextcloud.log /var/www/html/data/audit.log| tee -a /proc/1/fd/2 & apachectl -D FOREGROUND'
|
||||
tty: true
|
||||
|
||||
networks:
|
||||
mariadb<%= @customer %>_<%= @customer %>:
|
||||
proxysql_proxysql:
|
||||
external: true
|
||||
|
|
4
templates/multinode/get_non_paying_customers.erb.sh
Normal file
4
templates/multinode/get_non_paying_customers.erb.sh
Normal file
|
@ -0,0 +1,4 @@
|
|||
#!/bin/bash
|
||||
me=$(hostname -s)
|
||||
cat /etc/hiera/data/common.yaml | yq -r '.multinode_mapping| to_entries |map({name: .key} + .value)| map(select(.server == "'"${me}"'")) |.[] |.name' | \
|
||||
grep -Ev "$(cat /etc/hiera/data/common.yaml | yq -r '.singlenodes[]' | sed -e 's/^- //' -e 's/$/|/' | tr -d '\n' | sed 's/|$//')"
|
5
templates/multinode/get_paying_customers.erb.sh
Normal file
5
templates/multinode/get_paying_customers.erb.sh
Normal file
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash
|
||||
|
||||
me="$(hostname -s)"
|
||||
cat /etc/hiera/data/common.yaml | yq -r '.multinode_mapping| to_entries |map({name: .key} + .value)| map(select(.server == "'"${me}"'")) |.[] |.name' |
|
||||
grep -E "$(cat /etc/hiera/data/common.yaml | yq -r '.singlenodes[]' | sed -e 's/^- //' -e 's/$/|/' | tr -d '\n' | sed 's/|$//')"
|
142
templates/multinode/proxysql.cnf.erb
Normal file
142
templates/multinode/proxysql.cnf.erb
Normal file
|
@ -0,0 +1,142 @@
|
|||
datadir="/var/lib/proxysql"
|
||||
|
||||
# ProxySQL admin configuration section
|
||||
admin_variables=
|
||||
{
|
||||
admin_credentials="admin:<%= @admin_password%>;cluster_admin:<%= @cluster_admin_password %>"
|
||||
mysql_ifaces="0.0.0.0:6032"
|
||||
refresh_interval=2000
|
||||
web_enabled=true
|
||||
web_port=6080
|
||||
stats_credentials="stats:<%= @admin_password %>"
|
||||
cluster_username="cluster_admin"
|
||||
cluster_password="<%= @cluster_admin_password %>"
|
||||
cluster_check_interval_ms=200
|
||||
cluster_check_status_frequency=100
|
||||
cluster_mysql_query_rules_save_to_disk=true
|
||||
cluster_mysql_servers_save_to_disk=true
|
||||
cluster_mysql_users_save_to_disk=true
|
||||
cluster_proxysql_servers_save_to_disk=true
|
||||
cluster_mysql_query_rules_diffs_before_sync=3
|
||||
cluster_mysql_servers_diffs_before_sync=3
|
||||
cluster_mysql_users_diffs_before_sync=3
|
||||
cluster_proxysql_servers_diffs_before_sync=3
|
||||
}
|
||||
|
||||
# MySQL/MariaDB related section
|
||||
mysql_variables=
|
||||
{
|
||||
threads=4
|
||||
max_connections=2048
|
||||
default_query_delay=0
|
||||
default_query_timeout=36000000
|
||||
have_compress=true
|
||||
poll_timeout=2000
|
||||
interfaces="0.0.0.0:3306;/tmp/proxysql.sock"
|
||||
default_schema="information_schema"
|
||||
stacksize=1048576
|
||||
server_version="10.5.5"
|
||||
connect_timeout_server=10000
|
||||
monitor_history=60000
|
||||
monitor_connect_interval=2000
|
||||
monitor_ping_interval=2000
|
||||
ping_interval_server_msec=10000
|
||||
ping_timeout_server=200
|
||||
commands_stats=true
|
||||
sessions_sort=true
|
||||
monitor_username="proxysql"
|
||||
monitor_password="<%= @monitor_password %>"
|
||||
monitor_galera_healthcheck_interval=2000
|
||||
monitor_galera_healthcheck_timeout=800
|
||||
}
|
||||
|
||||
|
||||
# Specify all ProxySQL hosts here
|
||||
proxysql_servers =
|
||||
(
|
||||
<%- index = 0 -%>
|
||||
<%- @nextcloud_ip.each do |appserver| -%>
|
||||
<%- index += 1 -%>
|
||||
{
|
||||
hostname="<%= appserver %>"
|
||||
port=6032
|
||||
comment="proxysql<%= index %>"
|
||||
},
|
||||
<%- end -%>
|
||||
)
|
||||
|
||||
# HG10 - single-writer
|
||||
# HF30 - multi-writer
|
||||
mysql_galera_hostgroups =
|
||||
(
|
||||
{
|
||||
|
||||
writer_hostgroup=10
|
||||
backup_writer_hostgroup=20
|
||||
reader_hostgroup=30
|
||||
offline_hostgroup=9999
|
||||
max_writers=1
|
||||
writer_is_also_reader=1
|
||||
max_transactions_behind=0
|
||||
active=1
|
||||
}
|
||||
)
|
||||
|
||||
# List all MariaDB Galera nodes here
|
||||
mysql_servers =
|
||||
(
|
||||
<%- @db_ip.each do |db| -%>
|
||||
{
|
||||
address="<%= db %>"
|
||||
port=3306
|
||||
hostgroup=10
|
||||
max_connections=100
|
||||
},
|
||||
<%- end -%>
|
||||
)
|
||||
|
||||
# Default query rules:
|
||||
# - All writes -> HG10 (single-writer)
|
||||
# - All reads -> HG30 (multi-writer)
|
||||
mysql_query_rules =
|
||||
(
|
||||
{
|
||||
rule_id=100
|
||||
active=1
|
||||
match_pattern="^SELECT .* FOR UPDATE"
|
||||
destination_hostgroup=10
|
||||
apply=1
|
||||
},
|
||||
{
|
||||
rule_id=200
|
||||
active=1
|
||||
match_pattern="^SELECT .*"
|
||||
destination_hostgroup=30
|
||||
apply=1
|
||||
},
|
||||
{
|
||||
rule_id=300
|
||||
active=1
|
||||
match_pattern=".*"
|
||||
destination_hostgroup=10
|
||||
apply=1
|
||||
}
|
||||
)
|
||||
|
||||
# All MySQL user that you want to pass through this instance
|
||||
# - The MySQL user must be created first in the DB server and grant it to access from this ProxySQL host
|
||||
mysql_users =
|
||||
(
|
||||
|
||||
<%- index = 0 -%>
|
||||
<%- @allnames.each do |customer| -%>
|
||||
{
|
||||
username="nextcloud_<%= customer %>"
|
||||
password="<%= @passwords[index] %>"
|
||||
default_hostgroup=10
|
||||
transaction_persistent=<%= @transaction_persistent %>
|
||||
active=1
|
||||
},
|
||||
<%- index += 1 -%>
|
||||
<%- end -%>
|
||||
)
|
21
templates/multinode/restart_and_prune.erb.sh
Normal file
21
templates/multinode/restart_and_prune.erb.sh
Normal file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
|
||||
include_paying="${1}"
|
||||
customers="$(/usr/local/bin/get_non_paying_customers)"
|
||||
echo "Starting cleanup: $(date)"
|
||||
if [[ -n ${include_paying} ]]; then
|
||||
echo "Including paying customers: $(date)"
|
||||
customers="${customers}
|
||||
$(/usr/local/bin/get_paying_customers)"
|
||||
fi
|
||||
touch /etc/no-automatic-cosmos
|
||||
for customer in ${customers}; do
|
||||
echo "Stopping ${customer}: $(date)"
|
||||
systemctl stop sunet-{redis,nextcloud}-"${customer}"
|
||||
echo "Pruning docker: $(date)"
|
||||
docker system prune -af --volumes
|
||||
echo "Starting ${customer}: $(date)"
|
||||
systemctl start sunet-{redis,nextcloud}-"${customer}"
|
||||
done
|
||||
rm /etc/no-automatic-cosmos
|
||||
echo "Cleanup done: $(date)"
|
2
templates/portal/config.erb.yaml
Normal file
2
templates/portal/config.erb.yaml
Normal file
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
domain: "<%= @domain %>"
|
71
templates/portal/docker-compose.erb.yaml
Normal file
71
templates/portal/docker-compose.erb.yaml
Normal file
|
@ -0,0 +1,71 @@
|
|||
version: "3.7"
|
||||
|
||||
services:
|
||||
nginx:
|
||||
image: docker.io/nginxproxy/nginx-proxy:latest
|
||||
container_name: nginx
|
||||
networks:
|
||||
- internal_network
|
||||
- external_network
|
||||
dns:
|
||||
- 89.32.32.32
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
labels:
|
||||
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: "true"
|
||||
volumes:
|
||||
- /opt/portal/nginx/certs:/etc/nginx/certs:ro
|
||||
- /opt/portal/nginx/conf:/etc/nginx/conf.d
|
||||
- /opt/portal/nginx/dhparam:/etc/nginx/dhparam
|
||||
- /opt/portal/nginx/html:/usr/share/nginx/html
|
||||
- /opt/portal/nginx/vhost:/etc/nginx/vhost.d
|
||||
- /var/run/docker.sock:/tmp/docker.sock:ro
|
||||
environment:
|
||||
- ENABLE_IPV6=true
|
||||
restart: unless-stopped
|
||||
|
||||
acme:
|
||||
image: docker.io/nginxproxy/acme-companion:latest
|
||||
container_name: acme
|
||||
networks:
|
||||
- external_network
|
||||
dns:
|
||||
- 89.32.32.32
|
||||
volumes:
|
||||
- /opt/portal/nginx/acme:/etc/acme.sh
|
||||
- /opt/portal/nginx/certs:/etc/nginx/certs:rw
|
||||
- /opt/portal/nginx/conf:/etc/nginx/conf.d
|
||||
- /opt/portal/nginx/dhparam:/etc/nginx/dhparam
|
||||
- /opt/portal/nginx/html:/usr/share/nginx/html
|
||||
- /opt/portal/nginx/vhost:/etc/nginx/vhost.d:rw
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
environment:
|
||||
- NGINX_PROXY_CONTAINER=nginx
|
||||
- DEFAULT_EMAIL=noc@sunet.se
|
||||
depends_on:
|
||||
- nginx
|
||||
restart: unless-stopped
|
||||
|
||||
portal:
|
||||
image: docker.sunet.se/drive/portal:<%= @portal_version %>
|
||||
container_name: portal
|
||||
restart: always
|
||||
networks:
|
||||
- internal_network
|
||||
ports:
|
||||
- "127.0.0.1:8080:8080"
|
||||
dns:
|
||||
- 89.32.32.32
|
||||
volumes:
|
||||
- /opt/portal/config.yaml:/app/config.yaml
|
||||
environment:
|
||||
- VIRTUAL_HOST=portal.<%= @domain %>
|
||||
- VIRTUAL_PATH=/
|
||||
- VIRTUAL_PORT=8080
|
||||
- LETSENCRYPT_HOST=portal.<%= @domain %>
|
||||
|
||||
networks:
|
||||
external_network:
|
||||
internal_network:
|
||||
internal: true
|
|
@ -4,18 +4,25 @@ services:
|
|||
|
||||
proxysql:
|
||||
image: docker.sunet.se/drive/proxysql:<%= @proxysql_version %>
|
||||
container_name: proxysql_proxysql_1
|
||||
<%- if @hostnet -%>
|
||||
network_mode: host
|
||||
<%- else -%>
|
||||
ports:
|
||||
- 3306:3306
|
||||
- 6032:6032
|
||||
- 6080:6080
|
||||
<%- end -%>
|
||||
environment:
|
||||
INITIALIZE: 1
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
- 89.32.32.32
|
||||
<%- if !@hostnet -%>
|
||||
networks:
|
||||
- proxysql
|
||||
<%- end -%>
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- proxysql:/var/lib/proxysql
|
||||
|
@ -25,6 +32,8 @@ services:
|
|||
volumes:
|
||||
proxysql:
|
||||
|
||||
<%- if !@hostnet -%>
|
||||
networks:
|
||||
proxysql:
|
||||
driver: bridge
|
||||
<%- end -%>
|
||||
|
|
|
@ -1,8 +1,14 @@
|
|||
#!/bin/bash
|
||||
PATH="${PATH}:/usr/local/bin"
|
||||
restarted="false"
|
||||
domain=$(hostname -d)
|
||||
prefix="intern-db"
|
||||
if [[ ${domain} =~ ^drive ]]; then
|
||||
prefix="multinode-db"
|
||||
fi
|
||||
|
||||
for index in 1 2 3; do
|
||||
db_ip=$(host "intern-db${index}.$(hostname -d)" | awk '/has address/ {print $NF}')
|
||||
db_ip=$(host "${prefix}${index}.${domain}" | awk '/has address/ {print $NF}')
|
||||
result=$(proxysql "select * from main.mysql_servers where hostname = '${db_ip}' and hostgroup_id = 10")
|
||||
if [[ -z ${result} ]]; then
|
||||
query="INSERT INTO main.mysql_servers (hostgroup_id, hostname, max_connections, comment) VALUES( 10, '${db_ip}', 100, 'Inserted by script at $(date)')"
|
||||
|
|
|
@ -1,58 +0,0 @@
|
|||
# ~/.bashrc: executed by bash(1) for non-login shells.
|
||||
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
|
||||
# for examples
|
||||
|
||||
# If not running interactively, don't do anything
|
||||
[ -z "$PS1" ] && return
|
||||
|
||||
# don't put duplicate lines in the history. See bash(1) for more options
|
||||
# ... or force ignoredups and ignorespace
|
||||
HISTCONTROL=ignoredups:ignorespace
|
||||
|
||||
# append to the history file, don't overwrite it
|
||||
shopt -s histappend
|
||||
|
||||
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
|
||||
HISTSIZE=1000
|
||||
HISTFILESIZE=2000
|
||||
|
||||
# check the window size after each command and, if necessary,
|
||||
# update the values of LINES and COLUMNS.
|
||||
shopt -s checkwinsize
|
||||
|
||||
# make less more friendly for non-text input files, see lesspipe(1)
|
||||
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
|
||||
|
||||
# set variable identifying the chroot you work in (used in the prompt below)
|
||||
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
|
||||
debian_chroot=$(cat /etc/debian_chroot)
|
||||
fi
|
||||
|
||||
# set a fancy prompt (non-color, unless we know we "want" color)
|
||||
case "$TERM" in
|
||||
xterm-color) color_prompt=yes;;
|
||||
esac
|
||||
|
||||
# uncomment for a colored prompt, if the terminal has the capability; turned
|
||||
# off by default to not distract the user: the focus in a terminal window
|
||||
# should be on the output of commands, not on the prompt
|
||||
#force_color_prompt=yes
|
||||
|
||||
if [ -n "$force_color_prompt" ]; then
|
||||
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
|
||||
# We have color support; assume it's compliant with Ecma-48
|
||||
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
|
||||
# a case would tend to support setf rather than setaf.)
|
||||
color_prompt=yes
|
||||
else
|
||||
color_prompt=
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$color_prompt" = yes ]; then
|
||||
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
|
||||
else
|
||||
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
|
||||
fi
|
||||
unset color_prompt force_color_prompt
|
||||
alias redis-cli="redis-cli -a $(grep requirepass /opt/redis/node-0/server.conf | awk '{print $2}' | sed 's/"//g') --no-auth-warning"
|
|
@ -5,7 +5,7 @@ redis_password="<%= @redis_password %>"
|
|||
for index in 1 2 3; do
|
||||
cur_host="redis${index}.$(hostname -d)"
|
||||
if [[ "${my_host}" == "${cur_host}" ]]; then
|
||||
ip="$(hostname -I | awk '{print $1}')"
|
||||
ip="$(facter networking.ip)"
|
||||
else
|
||||
ip="$(host "${cur_host}" | grep "has address" | awk '{print $NF}')"
|
||||
fi
|
||||
|
|
|
@ -3,17 +3,20 @@ version: '3.2'
|
|||
services:
|
||||
|
||||
reva-server:
|
||||
# image: docker.sunet.se/drive/sciencemesh-reva:v2.12.0
|
||||
image: michielbdejong/reva:mentix-fixes
|
||||
image: docker.sunet.se/drive/sciencemesh-reva:<%= @reva_version %>
|
||||
network_mode: host
|
||||
security_opt:
|
||||
- seccomp:unconfined
|
||||
dns:
|
||||
- 89.46.20.75
|
||||
- 89.46.21.29
|
||||
- 89.32.32.32
|
||||
environment:
|
||||
- CGO_ENABLED=1
|
||||
volumes:
|
||||
- /opt/reva/revad.toml:/etc/revad/revad.toml
|
||||
- /opt/reva/data:/var/tmp/reva
|
||||
- /opt/reva/ocm-providers.json:/etc/revad/ocm-providers.json
|
||||
- /opt/reva/test/drive.test.sunet.se.crt:/etc/revad/tls/drive.test.sunet.se.crt
|
||||
- /opt/reva/test/drive.test.sunet.se.key:/etc/revad/tls/drive.test.sunet.se.key
|
||||
- /opt/reva/rclone.conf:/root/.rclone.conf
|
||||
- /opt/reva/<%= @environment %>/<%= @domain %>.crt:/etc/revad/tls/<%= @domain %>.crt
|
||||
- /opt/reva/<%= @environment %>/<%= @domain %>.key:/etc/revad/tls/<%= @domain %>.key
|
||||
restart: always
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
[
|
||||
{ "domain": "mesh.pondersource.org", "services": [
|
||||
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://mesh.pondersource.org/ocm/" }, "host": "https://mesh.pondersource.org" },
|
||||
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://cloud.pondersource.org/remote.php/webdav/" }, "host": "https://cloud.pondersource.org" }
|
||||
] },
|
||||
{ "domain": "cs3mesh-iop.apps.dcw1.paas.psnc.pl", "services": [
|
||||
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://cs3mesh-iop.apps.dcw1.paas.psnc.pl/ocm/" }, "host": "https://cs3mesh-iop.apps.dcw1.paas.psnc.pl" },
|
||||
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://cs3mesh-drive.apps.dcw1.paas.psnc.pl/remote.php/webdav/" }, "host": "https://cs3mesh-drive.apps.dcw1.paas.psnc.pl" }
|
||||
] },
|
||||
{ "domain": "<%= @reva_domain %>", "services": [
|
||||
{ "endpoint": { "type": { "name": "OCM" }, "path": "https://<%= @reva_domain%>/ocm/" }, "host": "https://<%= @reva_domain %>" },
|
||||
{ "endpoint": { "type": { "name": "Webdav" }, "path": "https://<%= @domain %>/remote.php/webdav/" }, "host": "https://<%= @domain %>" }
|
||||
] }
|
||||
]
|
6
templates/reva/rclone.conf.erb
Normal file
6
templates/reva/rclone.conf.erb
Normal file
|
@ -0,0 +1,6 @@
|
|||
[statistics]
|
||||
type = webdav
|
||||
url = https://89.45.236.246/remote.php/dav/files/_script/
|
||||
vendor = nextcloud
|
||||
user = _script
|
||||
pass = <%= @statistics_secret %>
|
|
@ -1,48 +1,83 @@
|
|||
[vars]
|
||||
internal_gateway = "<%= @reva_domain %>"
|
||||
provider_domain = "<%= @reva_domain %>"
|
||||
external_reva_endpoint = "https://<%= @reva_domain %>" # append here any route if applicable
|
||||
efss_sciencemesh_endpoint = "https://<%= @customer %>.<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
machine_api_key = "<%= @iopsecret %>"
|
||||
efss_shared_secret = "<%= @shared_secret %>"
|
||||
|
||||
[http]
|
||||
certfile = "/etc/revad/tls/<%= @domain %>.crt"
|
||||
keyfile = "/etc/revad/tls/<%= @domain %>.key"
|
||||
|
||||
[log]
|
||||
level = "debug"
|
||||
|
||||
[shared]
|
||||
gatewaysvc = "<%= @reva_domain %>:19000"
|
||||
# gatewaycertfile = "/etc/revad/tls/revanc1.crt"
|
||||
|
||||
# [registry]
|
||||
# driver = "static"
|
||||
#
|
||||
# [registry.static]
|
||||
# services = ["authprovider","userprovider"]
|
||||
#
|
||||
# [registry.static.authprovider]
|
||||
# bearer = ["localhost:0123"]
|
||||
# basic = ["localhost:1234"]
|
||||
# publiclink = ["localhost:9876"]
|
||||
|
||||
[grpc]
|
||||
address = "0.0.0.0:19000"
|
||||
# certfile = "/etc/revad/tls/revanc1.crt"
|
||||
# keyfile = "/etc/revad/tls/revanc1.key"
|
||||
gatewaysvc = "{{ vars.internal_gateway }}:19000"
|
||||
|
||||
[grpc.services.gateway]
|
||||
authregistrysvc = "<%= @reva_domain %>:19000"
|
||||
appprovidersvc = "<%= @reva_domain %>:19000"
|
||||
appregistry = "<%= @reva_domain %>:19000"
|
||||
storageregistrysvc = "<%= @reva_domain %>:19000"
|
||||
preferencessvc = "<%= @reva_domain %>:19000"
|
||||
userprovidersvc = "<%= @reva_domain %>:19000"
|
||||
usershareprovidersvc = "<%= @reva_domain %>:19000"
|
||||
publicshareprovidersvc = "<%= @reva_domain %>:19000"
|
||||
ocmcoresvc = "<%= @reva_domain %>:19000"
|
||||
ocmshareprovidersvc = "<%= @reva_domain %>:19000"
|
||||
ocminvitemanagersvc = "<%= @reva_domain %>:19000"
|
||||
ocmproviderauthorizersvc = "<%= @reva_domain %>:19000"
|
||||
commit_share_to_storage_grant = false
|
||||
datagateway = "https://<%= @reva_domain %>/data"
|
||||
transfer_expires = 6 # give it a moment
|
||||
address = ":19000"
|
||||
authregistrysvc = "{{ grpc.services.authregistry.address }}"
|
||||
appregistrysvc = "{{ grpc.services.appregistry.address }}"
|
||||
storageregistrysvc = "{{ grpc.services.storageregistry.address }}"
|
||||
preferencessvc = "{{ grpc.services.userprovider.address }}"
|
||||
userprovidersvc = "{{ grpc.services.userprovider.address }}"
|
||||
usershareprovidersvc = "{{ grpc.services.usershareprovider.address }}"
|
||||
ocmcoresvc = "{{ grpc.services.ocmcore.address }}"
|
||||
ocmshareprovidersvc = "{{ grpc.services.ocmshareprovider.address }}"
|
||||
ocminvitemanagersvc = "{{ grpc.services.ocminvitemanager.address }}"
|
||||
ocmproviderauthorizersvc = "{{ grpc.services.ocmproviderauthorizer.address }}"
|
||||
datagateway = "https://{{ http.services.datagateway.address }}/data"
|
||||
|
||||
transfer_expires = 6 # give it a moment
|
||||
commit_share_to_storage_grant = true
|
||||
commit_share_to_storage_ref = true
|
||||
|
||||
[grpc.services.appregistry]
|
||||
driver = "static"
|
||||
|
||||
[grpc.services.appregistry.drivers.static]
|
||||
mime_types = [
|
||||
{"mime_type" = "text/plain", "extension" = "txt", "name" = "Text file", "description" = "Text file", "allow_creation" = true},
|
||||
{"mime_type" = "text/markdown", "extension" = "md", "name" = "Markdown file", "description" = "Markdown file", "allow_creation" = true},
|
||||
{"mime_type" = "application/vnd.oasis.opendocument.text", "extension" = "odt", "name" = "OpenDocument", "description" = "OpenDocument text document", "default_app" = "Collabora", "allow_creation" = true},
|
||||
{"mime_type" = "application/vnd.oasis.opendocument.spreadsheet", "extension" = "ods", "name" = "OpenSpreadsheet", "description" = "OpenDocument spreadsheet document", "default_app" = "Collabora", "allow_creation" = true},
|
||||
{"mime_type" = "application/vnd.oasis.opendocument.presentation", "extension" = "odp", "name" = "OpenPresentation", "description" = "OpenDocument presentation document", "default_app" = "Collabora", "allow_creation" = true},
|
||||
{"mime_type" = "application/vnd.jupyter", "extension" = "ipynb", "name" = "Jupyter Notebook", "description" = "Jupyter Notebook"}
|
||||
]
|
||||
|
||||
|
||||
### AUTH PROVIDERS ###
|
||||
|
||||
[grpc.services.authregistry]
|
||||
driver = "static"
|
||||
|
||||
[grpc.services.authregistry.drivers.static.rules]
|
||||
basic = "<%= @reva_domain %>:19000"
|
||||
basic = "{{ grpc.services.authprovider[0].address }}"
|
||||
machine = "{{ grpc.services.authprovider[1].address }}"
|
||||
ocmshares = "{{ grpc.services.authprovider[2].address }}"
|
||||
|
||||
[[grpc.services.authprovider]]
|
||||
auth_manager = "nextcloud"
|
||||
|
||||
[grpc.services.authprovider.auth_managers.nextcloud]
|
||||
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
|
||||
shared_secret = "{{ vars.efss_shared_secret }}"
|
||||
mock_http = false
|
||||
|
||||
[[grpc.services.authprovider]]
|
||||
auth_manager = "machine"
|
||||
|
||||
[grpc.services.authprovider.auth_managers.machine]
|
||||
api_key = "{{ vars.machine_api_key }}"
|
||||
gateway_addr = "{{ vars.internal_gateway }}:19000"
|
||||
|
||||
[[grpc.services.authprovider]]
|
||||
auth_manager = "ocmshares"
|
||||
|
||||
|
||||
### STORAGE PROVIDERS ###
|
||||
|
||||
[grpc.services.storageregistry]
|
||||
driver = "static"
|
||||
|
@ -51,8 +86,36 @@ driver = "static"
|
|||
home_provider = "/home"
|
||||
|
||||
[grpc.services.storageregistry.drivers.static.rules]
|
||||
"/home" = {"address" = "<%= @reva_domain %>:19000"}
|
||||
"123e4567-e89b-12d3-a456-426655440000" = {"address" = "<%= @reva_domain %>:19000"}
|
||||
"/home" = {"address" = "{{ grpc.services.storageprovider[0].address }}"}
|
||||
"nextcloud" = {"address" = "{{ grpc.services.storageprovider[0].address }}"}
|
||||
"/ocm" = {"address" = "{{ grpc.services.storageprovider[1].address }}"}
|
||||
"ocm" = {"address" = "{{ grpc.services.storageprovider[1].address }}"}
|
||||
|
||||
[[grpc.services.storageprovider]]
|
||||
driver = "nextcloud"
|
||||
mount_id = "nextcloud"
|
||||
expose_data_server = true
|
||||
enable_home_creation = false
|
||||
data_server_url = "https://localhost:{{ http.services.dataprovider[0].address.port }}/data"
|
||||
|
||||
[grpc.services.storageprovider.drivers.nextcloud]
|
||||
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
|
||||
shared_secret = "{{ vars.efss_shared_secret }}"
|
||||
mock_http = false
|
||||
|
||||
[[grpc.services.storageprovider]]
|
||||
driver = "ocmoutcoming"
|
||||
mount_id = "ocm"
|
||||
mount_path = "/ocm"
|
||||
expose_data_server = true
|
||||
enable_home_creation = false
|
||||
data_server_url = "{{ vars.external_reva_endpoint }}/data"
|
||||
|
||||
[grpc.services.storageprovider.drivers.ocmoutcoming]
|
||||
machine_secret = "{{ vars.machine_api_key }}"
|
||||
|
||||
|
||||
### OTHER PROVIDERS ###
|
||||
|
||||
[grpc.services.usershareprovider]
|
||||
driver = "memory"
|
||||
|
@ -61,121 +124,148 @@ driver = "memory"
|
|||
driver = "nextcloud"
|
||||
|
||||
[grpc.services.ocmcore.drivers.nextcloud]
|
||||
webdav_host = "https://<%= @domain %>/"
|
||||
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
shared_secret = "<%= @shared_secret %>"
|
||||
host = "{{ vars.external_reva_endpoint }}"
|
||||
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
|
||||
shared_secret = "{{ vars.efss_shared_secret }}"
|
||||
mock_http = false
|
||||
|
||||
[grpc.services.ocminvitemanager]
|
||||
# TODO the driver should be "nextcloud" once it is implemented
|
||||
driver = "json"
|
||||
provider_domain = "{{ vars.provider_domain }}"
|
||||
|
||||
[grpc.services.ocmshareprovider]
|
||||
driver = "nextcloud"
|
||||
provider_domain = "{{ vars.provider_domain }}"
|
||||
webdav_endpoint = "{{ vars.external_reva_endpoint }}"
|
||||
webdav_prefix = "{{ vars.external_reva_endpoint }}/remote.php/dav/files"
|
||||
# TODO the following should become {{ vars.external_reva_endpoint }}/external/{{.Token}}/...
|
||||
webapp_template = "https://your.revad.org/external/sciencemesh/{{.Token}}/{relative-path-to-shared-resource}"
|
||||
|
||||
[grpc.services.ocmshareprovider.drivers.nextcloud]
|
||||
webdav_host = "https://<%= @domain %>/"
|
||||
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
shared_secret = "<%= @shared_secret %>"
|
||||
webdav_host = "{{ vars.external_reva_endpoint }}"
|
||||
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
|
||||
shared_secret = "{{ vars.efss_shared_secret }}"
|
||||
mock_http = false
|
||||
mount_id = "nextcloud"
|
||||
|
||||
[grpc.services.ocmproviderauthorizer]
|
||||
#driver = "mentix"
|
||||
driver = "open"
|
||||
driver = "mentix"
|
||||
|
||||
[grpc.services.ocmproviderauthorizer.drivers.mentix]
|
||||
url = "https://iop.sciencemesh.uni-muenster.de/iop/mentix/cs3"
|
||||
verify_request_hostname = false
|
||||
verify_request_hostname = true
|
||||
insecure = false
|
||||
timeout = 10
|
||||
refresh = 900
|
||||
|
||||
[grpc.services.publicshareprovider]
|
||||
driver = "memory"
|
||||
|
||||
[grpc.services.appprovider]
|
||||
driver = "demo"
|
||||
iopsecret = "<%= @iopsecret %>"
|
||||
wopiurl = "http://0.0.0.0:8880/"
|
||||
wopibridgeurl = "http://localhost:8000/wopib"
|
||||
|
||||
[grpc.services.appregistry]
|
||||
driver = "static"
|
||||
|
||||
[grpc.services.appregistry.static.rules]
|
||||
"text/plain" = "<%= @reva_domain %>:19000"
|
||||
"text/markdown" = "<%= @reva_domain %>:19000"
|
||||
"application/compressed-markdown" = "<%= @reva_domain %>:19000"
|
||||
"application/vnd.oasis.opendocument.text" = "<%= @reva_domain %>:19000"
|
||||
"application/vnd.oasis.opendocument.spreadsheet" = "<%= @reva_domain %>:19000"
|
||||
"application/vnd.oasis.opendocument.presentation" = "<%= @reva_domain %>:19000"
|
||||
|
||||
[grpc.services.storageprovider]
|
||||
driver = "nextcloud"
|
||||
expose_data_server = true
|
||||
data_server_url = "https://<%= @reva_domain %>/data"
|
||||
enable_home_creation = true
|
||||
|
||||
[grpc.services.storageprovider.drivers.nextcloud]
|
||||
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
shared_secret = "<%= @shared_secret %>"
|
||||
mock_http = false
|
||||
|
||||
[grpc.services.authprovider]
|
||||
auth_manager = "nextcloud"
|
||||
|
||||
[grpc.services.authprovider.auth_managers.nextcloud]
|
||||
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
shared_secret = "<%= @shared_secret %>"
|
||||
mock_http = false
|
||||
[grpc.services.ocmproviderauthorizer.drivers.json]
|
||||
# this is used by the docker-based test deployment, not in production
|
||||
providers = "providers.testnet.json"
|
||||
verify_request_hostname = true
|
||||
|
||||
[grpc.services.userprovider]
|
||||
driver = "nextcloud"
|
||||
|
||||
[grpc.services.userprovider.drivers.nextcloud]
|
||||
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
shared_secret = "<%= @shared_secret %>"
|
||||
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
|
||||
shared_secret = "{{ vars.efss_shared_secret }}"
|
||||
mock_http = false
|
||||
|
||||
[http]
|
||||
enabled_services = ["ocmd"]
|
||||
enabled_middlewares = ["providerauthorizer", "cors"]
|
||||
address = "0.0.0.0:443"
|
||||
certfile = "/etc/revad/tls/drive.test.sunet.se.crt"
|
||||
keyfile = "/etc/revad/tls/drive.test.sunet.se.key"
|
||||
[grpc.services.datatx]
|
||||
txdriver = "rclone"
|
||||
storagedriver = "json"
|
||||
remove_transfer_on_cancel = true
|
||||
|
||||
[http.services.dataprovider]
|
||||
[grpc.services.datatx.txdrivers.rclone]
|
||||
# rclone endpoint
|
||||
endpoint = "http://rclone.docker"
|
||||
# basic auth is used
|
||||
auth_user = "rcloneuser"
|
||||
auth_pass = "eilohtho9oTahsuongeeTh7reedahPo1Ohwi3aek"
|
||||
auth_header = "x-access-token"
|
||||
job_status_check_interval = 2000
|
||||
job_timeout = 120000
|
||||
storagedriver = "json"
|
||||
remove_transfer_job_on_cancel = true
|
||||
|
||||
[grpc.services.datatx.storagedrivers.json]
|
||||
file = ""
|
||||
|
||||
[grpc.services.datatx.txdrivers.rclone.storagedrivers.json]
|
||||
file = ""
|
||||
|
||||
|
||||
### HTTP ENDPOINTS ###
|
||||
|
||||
[http.services.appprovider]
|
||||
address = ":443"
|
||||
insecure = true
|
||||
|
||||
[http.services.datagateway]
|
||||
address = ":443"
|
||||
|
||||
[[http.services.dataprovider]]
|
||||
driver = "nextcloud"
|
||||
|
||||
[http.services.prometheus]
|
||||
[http.services.sysinfo]
|
||||
|
||||
[http.services.dataprovider.drivers.nextcloud]
|
||||
endpoint = "https://<%= @domain %>/index.php/apps/sciencemesh/"
|
||||
shared_secret = "<%= @shared_secret %>"
|
||||
endpoint = "{{ vars.efss_sciencemesh_endpoint }}"
|
||||
shared_secret = "{{ vars.efss_shared_secret }}"
|
||||
mock_http = false
|
||||
|
||||
[[http.services.dataprovider]]
|
||||
address = ":443"
|
||||
driver = "ocmoutcoming"
|
||||
|
||||
[http.services.dataprovider.drivers.ocmoutcoming]
|
||||
machine_secret = "{{ vars.machine_api_key }}"
|
||||
|
||||
[http.services.sciencemesh]
|
||||
address = ":443"
|
||||
provider_domain = "{{ vars.provider_domain }}"
|
||||
mesh_directory_url = "https://sciencemesh.cesnet.cz/iop/meshdir"
|
||||
ocm_mount_point = "/sciencemesh"
|
||||
|
||||
[http.services.sciencemesh.smtp_credentials]
|
||||
disable_auth = false
|
||||
sender_mail = "noreply@<%= @domain %>"
|
||||
sender_login = "noreply@<%= @domain %>"
|
||||
sender_password = "<%= @smtp_credentials %>"
|
||||
smtp_server = "smtp.sunet.se"
|
||||
smtp_port = 587
|
||||
|
||||
[http.services.ocmprovider]
|
||||
address = ":443"
|
||||
ocm_prefix = "ocm"
|
||||
provider = "Reva for ownCloud/Nextcloud"
|
||||
endpoint = "{{ vars.external_reva_endpoint }}"
|
||||
enable_webapp = true
|
||||
enable_datatx = true
|
||||
|
||||
[http.services.ocmd]
|
||||
address = ":443"
|
||||
prefix = "ocm"
|
||||
|
||||
[http.services.ocmd.config]
|
||||
host = "<%= @reva_domain %>"
|
||||
provider = "test-revanc1"
|
||||
|
||||
[http.middlewares.providerauthorizer]
|
||||
#driver = "mentix"
|
||||
driver = "open"
|
||||
|
||||
[http.middlewares.providerauthorizer.drivers.mentix]
|
||||
url = "https://iop.sciencemesh.uni-muenster.de/iop/mentix/cs3"
|
||||
verify_request_hostname = false
|
||||
insecure = false
|
||||
timeout = 10
|
||||
refresh = 900
|
||||
host = "{{ vars.provider_domain }}"
|
||||
|
||||
[http.services.ocs]
|
||||
address = ":443"
|
||||
prefix = "ocs"
|
||||
|
||||
[http.services.ocdav]
|
||||
prefix = "ocdav"
|
||||
address = ":443"
|
||||
|
||||
[http.services.prometheus]
|
||||
address = ":443"
|
||||
|
||||
[http.services.metrics]
|
||||
address = ":443"
|
||||
metrics_data_driver_type = "json"
|
||||
metrics_data_location = "/etc/revad/metrics.json"
|
||||
metrics_record_interval = 5000
|
||||
|
||||
[http.services.sysinfo]
|
||||
|
||||
[http.middlewares.cors]
|
||||
[http.middlewares.log]
|
||||
|
|
23
templates/satosa/docker-compose.yml.erb
Normal file
23
templates/satosa/docker-compose.yml.erb
Normal file
|
@ -0,0 +1,23 @@
|
|||
services:
|
||||
satosa:
|
||||
environment:
|
||||
- "METADATA_DIR=/etc/satosa/metadata"
|
||||
- "WORKER_TIMEOUT=120"
|
||||
dns:
|
||||
- "89.32.32.32"
|
||||
image: "<%= @image %><% if @tag %>:<%= @tag %><% end %>"
|
||||
pull_policy: "always"
|
||||
ports:
|
||||
- "443:8000"
|
||||
volumes:
|
||||
- "/etc/satosa:/etc/satosa"
|
||||
- "/etc/dehydrated:/etc/dehydrated"
|
||||
alwayshttps:
|
||||
environment:
|
||||
- "ACME_URL=http://acme-c.sunet.se"
|
||||
dns:
|
||||
- "89.32.32.32"
|
||||
image: "docker.sunet.se/always-https"
|
||||
pull_policy: "always"
|
||||
ports:
|
||||
- "80:80"
|
|
@ -27,21 +27,21 @@ Customer;Total GB;Users;Product"
|
|||
for customer in $(${yq} -r '.fullnodes | .[]' ${commonyaml}); do
|
||||
product=1 # Prisplan 1
|
||||
csv="${csv}
|
||||
$(rclone cat "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
|
||||
$(rclone cat --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
|
||||
grep -E -v '^DATE|^Customer' |
|
||||
sed 's/$/;1/')"
|
||||
done
|
||||
for customer in $(${yq} -r '.singlenodes | .[]' ${commonyaml}); do
|
||||
product=2 # Prisplan 2
|
||||
csv="${csv}
|
||||
$(rclone cat "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
|
||||
$(rclone cat --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${billingbucket}/${customer}-usage/${customer}-latest.csv" |
|
||||
grep -E -v '^DATE|^Customer' |
|
||||
sed 's/$/;'${product}'/')"
|
||||
done
|
||||
echo "${csv}" >"${aggregatefile}"
|
||||
|
||||
rclone copy "${aggregatefile}" "${aggregatedir}/"
|
||||
rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${aggregatefile}" "${aggregatedir}/"
|
||||
mv "${aggregatefile}" "latest.csv"
|
||||
rclone move "latest.csv" "${latestdir}/"
|
||||
rclone move -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "latest.csv" "${latestdir}/"
|
||||
cd "${olddir}" || (echo "Could not switch back to old dir" && exit 1)
|
||||
rmdir "${tempdir}"
|
||||
|
|
75
templates/script/announce.erb.sh
Executable file
75
templates/script/announce.erb.sh
Executable file
|
@ -0,0 +1,75 @@
|
|||
#!/bin/bash
|
||||
|
||||
VALID_ARGS=$(getopt -o cdghi:m:s: --long create,delete,get,help,id:,message:,subject: -- "$@")
|
||||
# shellcheck disable=SC2181
|
||||
if [[ ${?} -ne 0 ]]; then
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
usage () {
|
||||
echo "${0}: -c|--create -m|--message <'Your announcement goes here'> -s|--subject <Your subject goes here>"
|
||||
echo "${0}: -d|--delete -i|--id <announcement_id>"
|
||||
echo "${0}: -g|--get"
|
||||
exit 1
|
||||
}
|
||||
|
||||
eval set -- "${VALID_ARGS}"
|
||||
# shellcheck disable=SC2078
|
||||
while [ : ]; do
|
||||
case "$1" in
|
||||
-c | --create)
|
||||
method='POST'
|
||||
shift
|
||||
;;
|
||||
-d | --delete)
|
||||
method='DELETE'
|
||||
shift
|
||||
;;
|
||||
-g | --get)
|
||||
method='GET'
|
||||
shift
|
||||
;;
|
||||
-h | --help)
|
||||
usage
|
||||
;;
|
||||
-i | --id)
|
||||
argument="${2}"
|
||||
shift 2
|
||||
;;
|
||||
-m | --message)
|
||||
message="${2}"
|
||||
shift 2
|
||||
;;
|
||||
-s | --subject)
|
||||
subject="${2}"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ${method} == 'DELETE' ]] && [[ -z ${argument} ]]; then
|
||||
usage
|
||||
fi
|
||||
if [[ ${method} == 'POST' ]]; then
|
||||
if [[ -z ${message} ]] || [[ -z ${subject} ]]; then
|
||||
usage
|
||||
fi
|
||||
argument='{"subject":"'${subject}'","message":"'${message}'", "plainMessage":"'${message}'", "groups": [], "userId": "admin", "activities": false, "notifications": true, "emails": false, "comments": false }'
|
||||
fi
|
||||
|
||||
curl_cmd(){
|
||||
local method="${1}"
|
||||
if [[ ${method} == 'POST' ]] && [[ -n ${2} ]]; then
|
||||
local payload=(-d "${2}" -H "Content-Type: application/json")
|
||||
elif [[ ${method} == 'DELETE' ]] && [[ -n ${2} ]]; then
|
||||
local id="/${2}"
|
||||
fi
|
||||
local admin_app_password="<%= @admin_app_password %>"
|
||||
domain="$(hostname -d)"
|
||||
curl -s -X "${method}" -u "admin:${admin_app_password}" "${payload[@]}" -H 'OCS-APIRequest: true' "https://${domain}/ocs/v2.php/apps/announcementcenter/api/v1/announcements${id}"
|
||||
}
|
||||
curl_cmd "${method}" "${argument}"
|
||||
|
|
@ -6,63 +6,78 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
|
|||
number_of_full_to_keep='<%= @full_backup_retention %>'
|
||||
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
|
||||
customer="<%= @customer %>"
|
||||
environment="<%= @environment %>"
|
||||
declare -A extra_backup_jobs
|
||||
|
||||
#<% if @extra_backup_jobs.any? %>
|
||||
#<% @extra_backup_jobs.each do |client, job| %>
|
||||
extra_backup_jobs["<%= client %>"]="<%= job %>"
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
#<% if @location.start_with?('common') %>
|
||||
declare -a sixmonths=('mau')
|
||||
if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
|
||||
number_of_full_to_keep=6
|
||||
number_of_full_to_keep=6
|
||||
fi
|
||||
declare -a projects
|
||||
#<% @singlenodes.each do |singlenode| %>
|
||||
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %>")
|
||||
projects+=("<%= @full_project_mapping[singlenode][@environment]['primary_project'] %> <%= @full_project_mapping[singlenode][@environment]['mirror_project'] %> <%= singlenode %>")
|
||||
#<% @full_project_mapping[singlenode][@environment]['assigned'].each do |project| %>
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> <%= singlenode %>")
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
#<% else %>
|
||||
declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
|
||||
declare -a projects=("<%= @primary_project %> <%= @mirror_project %> ${customer}")
|
||||
#<% @assigned_projects.each do |project| %>
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %> ${customer}")
|
||||
#<% end %>
|
||||
#<% end %>
|
||||
|
||||
if [[ ${customer} == 'common' ]]; then
|
||||
projects+=("<%= @location %> <%= @location %>-mirror")
|
||||
projects+=("<%= @location %> <%= @location %>-mirror ${customer}")
|
||||
fi
|
||||
|
||||
|
||||
function do_backup {
|
||||
local project="${1}"
|
||||
local mirror="${2}"
|
||||
local bucket="${3}"
|
||||
local mirrorbucket="${bucket}-mirror"
|
||||
local mountpoint="/opt/backupmounts/${bucket}"
|
||||
mkdir -p ${mountpoint}
|
||||
rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
|
||||
rclone mkdir ${mirror}:${mirrorbucket}
|
||||
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt \
|
||||
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
|
||||
umount ${mountpoint}
|
||||
rmdir ${mountpoint}
|
||||
# Clean up
|
||||
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
|
||||
--force rclone://${mirror}:/${mirrorbucket}
|
||||
local project="${1}"
|
||||
local mirror="${2}"
|
||||
local bucket="${3}"
|
||||
local customer="${4}"
|
||||
local mirrorbucket="${bucket}-mirror"
|
||||
local mountpoint="/opt/backupmounts/${bucket}"
|
||||
ps aux | grep duplicity | grep "[^a-zA-Z]${bucket}" > /dev/null
|
||||
local oktorun=$? # 1 == this bucket has no other bakup process in progress
|
||||
if [[ ${oktorun} -ne 0 ]]; then
|
||||
mkdir -p ${mountpoint}
|
||||
rclone mount ${project}:${bucket} ${mountpoint}/ --daemon --allow-other --dir-cache-time 24h
|
||||
rclone mkdir ${mirror}:${mirrorbucket}
|
||||
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt --no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}
|
||||
if [[ "${extra_backup_jobs[${customer}]:+found}" == "found" ]] && [[ -f "${extra_backup_jobs[${customer}]}" ]]; then
|
||||
${extra_backup_jobs[${customer}]} ${project} ${mirror} ${bucket} ${customer} ${environment}
|
||||
fi
|
||||
umount ${mountpoint}
|
||||
rmdir ${mountpoint}
|
||||
# Clean up
|
||||
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${mirrorbucket}
|
||||
fi
|
||||
}
|
||||
|
||||
for entry in "${projects[@]}"; do
|
||||
project=$(echo ${entry} | awk '{print $1}')
|
||||
mirror=$(echo ${entry} | awk '{print $2}')
|
||||
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
|
||||
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
|
||||
project=$(echo ${entry} | awk '{print $1}')
|
||||
mirror=$(echo ${entry} | awk '{print $2}')
|
||||
customer=$(echo ${entry} | awk '{print $3}')
|
||||
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
|
||||
maybesize=$(timeout 30s rclone size --json ${project}:${bucket})
|
||||
if [[ ${?} -eq 124 ]]; then
|
||||
size=$((${fork_limit} * 1000000001))
|
||||
else
|
||||
size=$(echo ${maybesize} | jq -r '.bytes' )
|
||||
fi
|
||||
# If bucket is above 50 GB we fork
|
||||
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
|
||||
do_backup ${project} ${mirror} ${bucket} &
|
||||
else
|
||||
do_backup ${project} ${mirror} ${bucket}
|
||||
fi
|
||||
done
|
||||
# If bucket is above 50 GB we fork
|
||||
if [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer} &
|
||||
else
|
||||
do_backup ${project} ${mirror} ${bucket} ${customer}
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
|
|
@ -1,86 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Backup all buckets
|
||||
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
|
||||
# a specific host, but will differ between hosts
|
||||
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
|
||||
number_of_full_to_keep="<%= @full_backup_retention %>"
|
||||
fork_limit=30 #in GB, if bigger than this number, we fork the backup to it's own process
|
||||
split_limit=1000 #in GB, if bigger than this number, we fork backup of each directory to it's own process
|
||||
|
||||
declare -a projects=("<%= @primary_project %> <%= @mirror_project %>")
|
||||
#<% @assigned_projects.each do |project| %>
|
||||
projects+=("<%= project['project'] %> <%= project['mirror_project'] %>")
|
||||
#<% end %>
|
||||
|
||||
function do_huge_backup {
|
||||
local project="${1}"
|
||||
local mirror="${2}"
|
||||
local bucket="${3}"
|
||||
declare -a directories
|
||||
declare -a empty
|
||||
for dir in $(rclone lsd ${project}:${bucket} | awk '{print $NF}'); do
|
||||
directories+=("${dir}")
|
||||
mountpoint="/opt/backupmounts/${bucket}-${dir}"
|
||||
do_backup ${project} ${mirror} ${bucket} ${mountpoint} ${dir} ${empty} &
|
||||
done
|
||||
mountpoint="/opt/backupmounts/${bucket}"
|
||||
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${directories[@]} &
|
||||
|
||||
}
|
||||
|
||||
function do_backup {
|
||||
local project="${1}"
|
||||
shift
|
||||
local mirror="${1}"
|
||||
shift
|
||||
local bucket="${1}"
|
||||
shift
|
||||
local mountpoint="${1}"
|
||||
shift
|
||||
local dire="${1}"
|
||||
shift
|
||||
declare -a exclude
|
||||
exclude=( "${@}" )
|
||||
suffix=""
|
||||
opts=""
|
||||
if [[ "${dire}" != "none" ]]; then
|
||||
suffix="/${dire}"
|
||||
fi
|
||||
if ((${#exclude[@]})); then
|
||||
for dir in "${exclude[@]}"; do
|
||||
opts="${opts} --exclude /${dir}"
|
||||
done
|
||||
fi
|
||||
local mirrorbucket="${bucket}-mirror"
|
||||
mkdir -p ${mountpoint}
|
||||
rclone mount ${project}:${bucket}${suffix} ${mountpoint}/ --daemon --allow-other
|
||||
rclone mkdir ${mirror}:${mirrorbucket}${suffix}
|
||||
duplicity --full-if-older-than 1M --asynchronous-upload --tempdir /mnt --archive-dir /mnt ${opts} \
|
||||
--no-encryption ${mountpoint} rclone://${mirror}:/${mirrorbucket}${suffix}
|
||||
umount ${mountpoint}
|
||||
rmdir ${mountpoint}
|
||||
# Clean up
|
||||
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt \
|
||||
--force rclone://${mirror}:/${mirrorbucket}${suffix}
|
||||
}
|
||||
|
||||
for entry in "${projects[@]}"; do
|
||||
project=$(echo ${entry} | awk '{print $1}')
|
||||
mirror=$(echo ${entry} | awk '{print $2}')
|
||||
declare -a empty
|
||||
for bucket in $(rclone lsd ${project}:/ | awk '{print $5}'); do
|
||||
size=$(rclone size --json ${project}:${bucket} | jq -r '.bytes')
|
||||
mirrorbucket="${bucket}-mirror"
|
||||
mountpoint="/opt/backupmounts/${bucket}"
|
||||
# If bucket is above ${split_limit} we fork and do backup per directory
|
||||
if [[ ${size} -gt $((${split_limit} * 1000000000)) ]]; then
|
||||
do_huge_backup ${project} ${mirror} ${bucket} &
|
||||
# If bucket is above ${fork_limit} we fork and do backup for bucket
|
||||
elif [[ ${size} -gt $((${fork_limit} * 1000000000)) ]]; then
|
||||
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${empty} &
|
||||
else
|
||||
# If bucket is below ${fork_limit} we do not fork and do backup for bucket
|
||||
do_backup ${project} ${mirror} ${bucket} ${mountpoint} none ${empty}
|
||||
fi
|
||||
done
|
||||
done
|
8
templates/script/backup-hb.erb.sh
Executable file
8
templates/script/backup-hb.erb.sh
Executable file
|
@ -0,0 +1,8 @@
|
|||
#!/bin/bash
|
||||
project="${1}"
|
||||
mirror="${2}"
|
||||
bucket="${3}"
|
||||
customer="${4}"
|
||||
environment="${5}"
|
||||
|
||||
rsync -e "ssh -i ${HOME}/.ssh/id_script" -avz "/opt/backupmounts/${bucket}" "sd-${environment}@sd-${environment}-backup.hb.se:~/sd-${environment}/${bucket}"
|
|
@ -6,11 +6,14 @@ sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk
|
|||
number_of_full_to_keep="<%= @full_backup_retention %>"
|
||||
|
||||
backup="${1}"
|
||||
if [[ -z ${backup} ]]; then
|
||||
backup="backup1.$(hostname -d)"
|
||||
fi
|
||||
if ! [[ ${backup} =~ backup1.*sunet.se$ ]]; then
|
||||
echo "Usage: ${0} <fqdn of backup server>"
|
||||
echo "Example: ${0} backup1.sunet.drive.sunet.se"
|
||||
fi
|
||||
backup_dir="/opt/backups"
|
||||
backup_dir="/opt/backups/backup-files"
|
||||
bucket="db-backups"
|
||||
mirror="<%= @customer %>-<%= @environment %>-mirror"
|
||||
if [[ ${mirror} =~ common-(test|prod)-mirror ]]; then
|
||||
|
@ -18,16 +21,15 @@ if [[ ${mirror} =~ common-(test|prod)-mirror ]]; then
|
|||
bucket="${bucket}-${suffix}"
|
||||
backup_dir="${backup_dir}-${suffix}"
|
||||
fi
|
||||
mkdir -p ${backup_dir}
|
||||
echo "Backing up database for ${backup}"
|
||||
ssh ${backup} "sudo /home/script/bin/backup_db.sh"
|
||||
echo "Cleaning up old backups for ${backup}"
|
||||
ssh ${backup} "sudo /home/script/bin/purge_backups.sh /opt/mariadb_backup/backups/"
|
||||
echo "Copying backups here"
|
||||
mkdir -p ${backup_dir}
|
||||
scp script@${backup}:/opt/mariadb_backup/backups/$(date +%Y/%m/%d)/*.gz ${backup_dir}
|
||||
echo "Copying backups to remote bucket"
|
||||
rclone mkdir ${mirror}:${bucket}
|
||||
duplicity --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption ${backup_dir} rclone://${mirror}:/${bucket}
|
||||
duplicity --allow-source-mismatch --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption ${backup_dir} rclone://${mirror}:/${bucket}
|
||||
duplicity remove-all-but-n-full ${number_of_full_to_keep} --tempdir /mnt --archive-dir /mnt --force rclone://${mirror}:/${bucket}
|
||||
echo "cleaning up"
|
||||
rm -r ${backup_dir}
|
||||
rm -rf "${backup_dir}"
|
||||
|
|
22
templates/script/backupmultinodedb.erb.sh
Normal file
22
templates/script/backupmultinodedb.erb.sh
Normal file
|
@ -0,0 +1,22 @@
|
|||
#!/bin/bash
|
||||
# Backup all databases
|
||||
# We sleep a deterministic amount of time, which will be between 0 an 128 m and allways the same within
|
||||
# a specific host, but will differ between hosts
|
||||
sleep $((16#$(ip a | grep "link/ether" | head -1 | awk -F ':' '{print $6}' | awk '{print $1}') / 2))m
|
||||
number_of_full_to_keep=7
|
||||
backup="multinode-db1.$(hostname -d)"
|
||||
remote_backup_dir="/etc/mariadb/backups"
|
||||
backup_dir="/opt/backups/multinode"
|
||||
bucket="db-backups-multinode"
|
||||
mirror="common-<%= @environment %>-mirror"
|
||||
echo "Backing up all databases for for multinode customer"
|
||||
ssh "${backup}" "sudo /home/script/bin/backup_multinode_db.sh"
|
||||
echo "Copying backups here"
|
||||
mkdir -p ${backup_dir}
|
||||
scp "script@${backup}:${remote_backup_dir}/mariadb-dump*.sql.gz" "${backup_dir}"
|
||||
echo "Copying backups to remote bucket"
|
||||
rclone mkdir "${mirror}:${bucket}"
|
||||
duplicity --allow-source-mismatch --full-if-older-than 1M --tempdir /mnt --archive-dir /mnt --no-encryption "${backup_dir}" "rclone://${mirror}:/${bucket}"
|
||||
duplicity remove-all-but-n-full "${number_of_full_to_keep}" --tempdir /mnt --archive-dir /mnt --force "rclone://${mirror}:/${bucket}"
|
||||
echo "cleaning up"
|
||||
rm -r "${backup_dir}"
|
|
@ -25,7 +25,7 @@ if [[ " ${sixmonths[*]} " =~ " ${customer} " ]]; then
|
|||
else
|
||||
number_of_full_to_keep=1
|
||||
fi
|
||||
container="mariadb${customer}_db_1"
|
||||
container="mariadb-${customer}_db_1"
|
||||
backup_dir="/opt/backups"
|
||||
bucket="db-backups"
|
||||
mirror="${customer}-<%= @environment %>-mirror"
|
||||
|
|
|
@ -2,9 +2,7 @@
|
|||
|
||||
project="${1}"
|
||||
bucket="${2}"
|
||||
number_of_full_to_keep='<%= @full_backup_retention %>'
|
||||
max_num_inc=$((32 * number_of_full_to_keep))
|
||||
max_num_full=$((2 * number_of_full_to_keep))
|
||||
declare -a sixmonths=('multinode')
|
||||
output_status="OK"
|
||||
exit_status=0
|
||||
problems=""
|
||||
|
@ -12,6 +10,19 @@ num_problems=0
|
|||
data_dir='/opt/backups/data'
|
||||
for project in $(ls ${data_dir}); do
|
||||
for bucket in $(ls ${data_dir}/${project}/ | sed 's/\.dat$//'); do
|
||||
issixmonths="false"
|
||||
for customer in "${sixmonths[@]}"; do
|
||||
if [[ "${bucket}" =~ ${customer} ]]; then
|
||||
issixmonths="true"
|
||||
fi
|
||||
done
|
||||
number_of_full_to_keep='<%= @full_backup_retention %>'
|
||||
if [[ "${issixmonths}" == "true" ]]; then
|
||||
number_of_full_to_keep=6
|
||||
fi
|
||||
max_num_inc=$((50 * number_of_full_to_keep))
|
||||
max_num_full=$((2 * number_of_full_to_keep))
|
||||
|
||||
tabular_data=$(cat "${data_dir}/${project}/${bucket}.dat")
|
||||
# We warn if there are too many old backups
|
||||
num_full=$(echo "${tabular_data}" | grep -c full)
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
project="${1}"
|
||||
bucket="${2}"
|
||||
data_dir='/opt/backups/data'
|
||||
for project in $(rclone listremotes | grep -v 'mirror'); do
|
||||
for project in $(rclone listremotes | grep -v 'mirror' | grep -v 'statistics'); do
|
||||
for bucket in $(rclone lsd "${project}" | awk '{print $NF}' | grep -E '\-mirror|db-backups'); do
|
||||
mkdir -p "${data_dir}/${project}"
|
||||
duplicity collection-status --log-file /dev/stdout --no-encryption "rclone://${project}${bucket}" | grep -E '^ inc|^ full' > "${data_dir}/${project}/${bucket}.dat"
|
||||
|
|
60
templates/script/create_folders_in_fullnode_buckets.erb.sh
Normal file
60
templates/script/create_folders_in_fullnode_buckets.erb.sh
Normal file
|
@ -0,0 +1,60 @@
|
|||
#!/bin/bash
|
||||
|
||||
customer="<%= @customer %>"
|
||||
environment="<%= @environment %>"
|
||||
eppn_suffix="<%= @eppn_suffix %>"
|
||||
include_userbuckets="<%= @include_userbuckets %>"
|
||||
container="nextcloud_app_1"
|
||||
yq="/usr/local/bin/yq"
|
||||
if ! [[ -x ${yq} ]]; then
|
||||
pip install yq
|
||||
fi
|
||||
|
||||
declare -a directories
|
||||
if [[ -n ${1} ]]; then
|
||||
directories=("${@}")
|
||||
else
|
||||
directories+=("Arbetsmaterial")
|
||||
directories+=("Bevarande")
|
||||
directories+=("Gallringsbart")
|
||||
fi
|
||||
|
||||
olddir="${PWD}"
|
||||
tempdir=$(mktemp -d)
|
||||
dirty=0
|
||||
primary=''
|
||||
declare -a users=( 'admin' )
|
||||
cd "${tempdir}" || echo "Could not cd to tempdir"
|
||||
declare -a projects=( $("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml) )
|
||||
if [[ "${include_userbuckets}" == "true" ]]; then
|
||||
primary=$("${yq}" -r '.project_mapping.'"${customer}"'.'"${environment}"'.primary_project' /etc/hiera/data/common.yaml)
|
||||
projects+=( "${primary}" )
|
||||
fi
|
||||
for project in "${projects[@]}"; do
|
||||
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}' | grep -E -v '^primary'); do
|
||||
count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
|
||||
if [[ ${count} -gt 0 ]]; then
|
||||
echo "Skipping ${project}:${bucket} because it has stuff in it already"
|
||||
continue
|
||||
fi
|
||||
for directory in "${directories[@]}"; do
|
||||
dirty=1
|
||||
if [[ -n ${primary} ]] && [[ ${project} == "${primary}" ]] ; then
|
||||
user=$(echo "${bucket}" | awk -F '-' '{print $1}')
|
||||
users+=( "${user}@${eppn_suffix}" )
|
||||
fi
|
||||
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
|
||||
temp="README.md"
|
||||
echo "**${directory}**" >"${temp}"
|
||||
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
|
||||
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
|
||||
done
|
||||
done
|
||||
done
|
||||
cd "${olddir}" || echo "could not cd to home dir"
|
||||
rmdir "${tempdir}"
|
||||
if [[ ${dirty} -gt 0 ]]; then
|
||||
for user in "${users[@]}"; do
|
||||
ssh -t "node3.$(hostname -d)" -l script -i .ssh/id_script "sudo /usr/local/bin/occ ${container} files:scan ${user}"
|
||||
done
|
||||
fi
|
|
@ -1,44 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
customer="<%= @customer %>"
|
||||
environment="<%= @environment %>"
|
||||
container="nextcloud_app_1"
|
||||
yq="/usr/local/bin/yq"
|
||||
if ! [[ -x ${yq} ]]; then
|
||||
pip install yq
|
||||
fi
|
||||
|
||||
declare -a directories
|
||||
if [[ -n ${1} ]]; then
|
||||
directories=("${@}")
|
||||
else
|
||||
directories+=("Arbetsmaterial")
|
||||
directories+=("Bevarande")
|
||||
directories+=("Gallringsbart")
|
||||
fi
|
||||
olddir="${PWD}"
|
||||
tempdir=$(mktemp -d)
|
||||
dirty=0
|
||||
cd "${tempdir}" || echo "Could not cd to tempdir"
|
||||
for project in $(${yq} -r '.project_mapping.'"${customer}"'.'"${environment}"'.assigned | "\(.[].project)"' /etc/hiera/data/common.yaml); do
|
||||
for bucket in $(rclone lsd "${project}:" | awk '{print $NF}'); do
|
||||
count=$(rclone size --json "${project}:${bucket}" | jq -r .count)
|
||||
if [[ ${count} -gt 0 ]]; then
|
||||
echo "Skipping ${project}:${bucket} because it has stuff in it already"
|
||||
continue
|
||||
fi
|
||||
for directory in "${directories[@]}"; do
|
||||
dirty=1
|
||||
echo "Creating ${project}:${bucket}/${directory} because it looks nice and empty"
|
||||
temp="README.md"
|
||||
echo "**${directory}**" >"${temp}"
|
||||
echo "Var god lämna kvar denna fil/Please leave this file" >>"${temp}"
|
||||
rclone --no-traverse move "${temp}" "${project}:${bucket}/${directory}"
|
||||
done
|
||||
done
|
||||
done
|
||||
cd "${olddir}" || echo "could not cd to home dir"
|
||||
rmdir "${tempdir}"
|
||||
if [[ ${dirty} -gt 0 ]]; then
|
||||
ssh -t "node3.$(hostname -d)" -l script -i .ssh/id_script "sudo /usr/local/bin/occ ${container} files:scan admin"
|
||||
fi
|
|
@ -5,7 +5,7 @@ shift
|
|||
include_userbuckets="${1}"
|
||||
shift
|
||||
environment="<%= @environment %>"
|
||||
container="nextcloud${customer}_app_1"
|
||||
container="nextcloud-${customer}-app-1"
|
||||
|
||||
yq="/usr/local/bin/yq"
|
||||
if ! [[ -x ${yq} ]]; then
|
||||
|
|
35
templates/script/delete_announcement_with_subject.erb.sh
Executable file
35
templates/script/delete_announcement_with_subject.erb.sh
Executable file
|
@ -0,0 +1,35 @@
|
|||
#!/bin/bash
|
||||
|
||||
VALID_ARGS=$(getopt -o s: --long subject: -- "$@")
|
||||
# shellcheck disable=SC2181
|
||||
if [[ ${?} -ne 0 ]]; then
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
usage () {
|
||||
echo "${0}: -s|--subject <subject>"
|
||||
exit 1
|
||||
}
|
||||
|
||||
eval set -- "${VALID_ARGS}"
|
||||
# shellcheck disable=SC2078
|
||||
while [ : ]; do
|
||||
case "$1" in
|
||||
-s | --subject)
|
||||
subject="${2}"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z ${subject} ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
id=$(/root/tasks/announce.sh --get | xmlstarlet sel -t -i '//subject="'"${subject}"'"' -m "/ocs/data/element/id" -v .)
|
||||
if [[ -n ${id} ]]; then
|
||||
/root/tasks/announce.sh --delete --id "${id}"
|
||||
fi
|
|
@ -4,19 +4,19 @@ customer="${1}"
|
|||
multinode="${2}"
|
||||
environment="<%= @environment %>"
|
||||
location="${customer}-${environment}"
|
||||
userjson=$(ssh "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud${customer}_app_1")
|
||||
userjson=$(ssh -o StrictHostKeyChecking=no "script@${multinode}" "sudo /home/script/bin/list_users.sh nextcloud-${customer}-app-1")
|
||||
project="statistics"
|
||||
bucket="drive-server-coms"
|
||||
base_dir="${project}:${bucket}"
|
||||
stat_dir="/opt/statistics"
|
||||
customer_dir="${stat_dir}/${location}"
|
||||
mkdir -p "${customer_dir}"
|
||||
rclone mkdir "${base_dir}/${location}"
|
||||
rclone mkdir --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${base_dir}/${location}"
|
||||
echo "${userjson}" | jq . >"${customer_dir}/users.json"
|
||||
status=${?}
|
||||
if [[ ${status} -eq 0 ]]; then
|
||||
# something is wrong if we cant copy the file in 30 seconds, so we should note that
|
||||
if ! timeout 30s rclone copy --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
|
||||
if ! timeout 30s rclone copy -c --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
|
||||
status=1
|
||||
fi
|
||||
fi
|
||||
|
|
|
@ -17,7 +17,7 @@ if [[ "${ENVIRONMENT}" == "prod" ]]; then
|
|||
fi
|
||||
if [[ "${CUSTOMER}" == "common" ]]; then
|
||||
customer=""
|
||||
types="multinode gss"
|
||||
types="multinode"
|
||||
fi
|
||||
|
||||
domain="${customer}drive.${env}sunet.se"
|
||||
|
|
|
@ -38,12 +38,16 @@ rclone="rclone --config /root/.rclone.conf"
|
|||
|
||||
# These are dynamic
|
||||
buckets="$(${rclone} lsd "${rcp}:" | awk '{print $NF}')"
|
||||
users=$(${rclone} cat "statistics:drive-server-coms/${rcp}/users.json" | jq '. | with_entries( select(.key | match("@") ) )')
|
||||
users=$(${rclone} cat --no-check-certificate --webdav-headers "Host,sunet.drive.sunet.se" --use-cookies "statistics:drive-server-coms/${rcp}/users.json" | jq '. | with_entries( select(.key | match("@") ) )')
|
||||
for eppn in $(echo "${users}" | jq -r keys[]); do
|
||||
user=${eppn%@*}
|
||||
username=${eppn%@*}
|
||||
# Remove underscore from username
|
||||
user=${username//_/-}
|
||||
# convert user to lower case for bucket naming rules
|
||||
user_lower=${user,,}
|
||||
|
||||
echo "$(date) - Check bucket status for ${eppn}"
|
||||
bucketname="${user}-${site_name//./-}"
|
||||
bucketname="${user_lower}-${site_name//./-}"
|
||||
if ! echo "${buckets}" | grep "${bucketname}" &> /dev/null; then
|
||||
echo "$(date) - ${eppn} has no mounts configured, adding bucket and mounts..."
|
||||
${rclone} mkdir "${rcp}:${bucketname}"
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue