Import manifests from ops-repo
This commit is contained in:
parent
f08b71acc8
commit
f4e8786f0b
17
functions/get_customer.pp
Normal file
17
functions/get_customer.pp
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
# Lets determin who the customer is by looking at the hostname
|
||||||
|
function sunetdrive::get_customer() >> String {
|
||||||
|
$hostnameparts = split($facts['fqdn'],'\.')
|
||||||
|
if $hostnameparts[1] == 'drive' {
|
||||||
|
if $hostnameparts[0] =~ /^gss/ {
|
||||||
|
return 'gss'
|
||||||
|
} elsif $hostnameparts[0] =~ /^lookup/ {
|
||||||
|
return 'lookup'
|
||||||
|
} else {
|
||||||
|
return 'common'
|
||||||
|
}
|
||||||
|
} elsif $hostnameparts[0] =~ /idp-proxy/ {
|
||||||
|
return 'common'
|
||||||
|
}
|
||||||
|
|
||||||
|
return $hostnameparts[1]
|
||||||
|
}
|
13
functions/get_environment.pp
Normal file
13
functions/get_environment.pp
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
# Lets determin where we are by looking at the hostname
|
||||||
|
function sunetdrive::get_environment() >> String {
|
||||||
|
$hostname = $facts['fqdn']
|
||||||
|
if $hostname =~ /^.*\.drive\.sunet\.se$/ {
|
||||||
|
if $hostname =~ /^.*\.pilot\.drive\.sunet\.se$/ {
|
||||||
|
return 'pilot'
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
return 'prod'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
'test'
|
||||||
|
}
|
4
functions/get_node_number.pp
Normal file
4
functions/get_node_number.pp
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
# Lets determin where we are by looking at the hostname
|
||||||
|
function sunetdrive::get_node_number() >> Integer {
|
||||||
|
Integer(regsubst($::fqdn, /^[a-zA-Z\-]+(\d).*$/, '\\1'))
|
||||||
|
}
|
349
manifests/app_type.pp
Normal file
349
manifests/app_type.pp
Normal file
|
@ -0,0 +1,349 @@
|
||||||
|
#Resource type for SUNET-Drive-Application
|
||||||
|
define sunetdrive::app_type (
|
||||||
|
$bootstrap = undef,
|
||||||
|
$location = undef,
|
||||||
|
$override_config = undef,
|
||||||
|
$override_compose = undef
|
||||||
|
) {
|
||||||
|
# Config from group.yaml and customer specific conf
|
||||||
|
$environment = sunetdrive::get_environment()
|
||||||
|
$customer = sunetdrive::get_customer()
|
||||||
|
$nodenumber = sunetdrive::get_node_number()
|
||||||
|
$is_multinode = (($override_config != undef) and ($override_compose != undef))
|
||||||
|
if $is_multinode {
|
||||||
|
# The config used
|
||||||
|
$config = $override_config
|
||||||
|
# Other settings
|
||||||
|
$admin_password = $config[ 'admin_password' ]
|
||||||
|
$dbhost = $config[ 'dbhost' ]
|
||||||
|
$instanceid = $config[ 'instanceid' ]
|
||||||
|
$mysql_user_password = $config[ 'mysql_user_password' ]
|
||||||
|
$passwordsalt = $config[ 'passwordsalt' ]
|
||||||
|
$redis_host_password = $config[ 'redis_host_password' ]
|
||||||
|
$s3_key = $config[ 's3_key' ]
|
||||||
|
$s3_secret = $config[ 's3_secret' ]
|
||||||
|
$secret = $config[ 'secret' ]
|
||||||
|
} else {
|
||||||
|
# The config used
|
||||||
|
$config = hiera_hash($environment)
|
||||||
|
$skeletondirectory = $config['skeletondirectory']
|
||||||
|
# Other settings
|
||||||
|
$admin_password = safe_hiera('admin_password')
|
||||||
|
$dbhost = 'proxysql_proxysql_1'
|
||||||
|
$instanceid = safe_hiera('instanceid')
|
||||||
|
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||||
|
$passwordsalt = safe_hiera('passwordsalt')
|
||||||
|
$redis_host_password = safe_hiera('redis_host_password')
|
||||||
|
$redis_cluster_password = safe_hiera('redis_cluster_password')
|
||||||
|
$s3_key = safe_hiera('s3_key')
|
||||||
|
$s3_secret = safe_hiera('s3_secret')
|
||||||
|
$secret = safe_hiera('secret')
|
||||||
|
}
|
||||||
|
$twofactor_enforced_groups = hiera_array('twofactor_enforced_groups')
|
||||||
|
$twofactor_enforced_excluded_groups = hiera_array('twofactor_enforced_excluded_groups')
|
||||||
|
$nextcloud_version = hiera("nextcloud_version_${environment}")
|
||||||
|
$nextcloud_version_string = split($nextcloud_version, '[-]')[0]
|
||||||
|
# Common settings for multinode and full nodes
|
||||||
|
$nextcloud_ip = $config['app']
|
||||||
|
$redis_host = $config['redis_host']
|
||||||
|
$s3_bucket = $config['s3_bucket']
|
||||||
|
$s3_host = $config['s3_host']
|
||||||
|
$site_name = $config['site_name']
|
||||||
|
$trusted_domains = $config['trusted_domains']
|
||||||
|
$trusted_proxies = $config['trusted_proxies']
|
||||||
|
|
||||||
|
# These are encrypted values from local.eyaml
|
||||||
|
|
||||||
|
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||||
|
$smtppassword = safe_hiera('smtp_password')
|
||||||
|
|
||||||
|
#These are global values from common.yaml
|
||||||
|
$gs_enabled = hiera('gs_enabled')
|
||||||
|
$gs_federation = hiera('gs_federation')
|
||||||
|
$gss_master_admin = hiera_array('gss_master_admin')
|
||||||
|
$gss_master_url = hiera("gss_master_url_${environment}")
|
||||||
|
$lookup_server = hiera("lookup_server_${environment}")
|
||||||
|
$mail_domain = hiera("mail_domain_${environment}")
|
||||||
|
$mail_smtphost = hiera("mail_smtphost_${environment}")
|
||||||
|
$mail_from_address = hiera("mail_from_address_${environment}")
|
||||||
|
$s3_usepath = hiera('s3_usepath')
|
||||||
|
$smtpuser = hiera("smtp_user_${environment}")
|
||||||
|
$tug_office = hiera_array('tug_office')
|
||||||
|
|
||||||
|
# This is a global value from common.yaml but overridden in the gss-servers local.yaml
|
||||||
|
$gss_mode = hiera('gss_mode')
|
||||||
|
|
||||||
|
# These are global values from common.yaml but can be overridden in group.yaml
|
||||||
|
$drive_email_template_text_left = $config['drive_email_template_text_left']
|
||||||
|
$drive_email_template_plain_text_left = $config['drive_email_template_plain_text_left']
|
||||||
|
$drive_email_template_url_left = $config['drive_email_template_url_left']
|
||||||
|
$lb_servers = hiera_hash($environment)['lb_servers']
|
||||||
|
$document_servers = hiera_hash($environment)['document_servers']
|
||||||
|
# set up cronjob on node3
|
||||||
|
if $::fqdn[0,5] == 'node3' {
|
||||||
|
file { '/opt/nextcloud/cron.sh':
|
||||||
|
ensure => file,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
content => template('sunetdrive/application/cron.erb.sh'),
|
||||||
|
}
|
||||||
|
cron { 'cron.sh':
|
||||||
|
command => '/opt/nextcloud/cron.sh',
|
||||||
|
user => 'root',
|
||||||
|
minute => '*/5',
|
||||||
|
}
|
||||||
|
if $location =~ /^extern/ {
|
||||||
|
file { '/opt/nextcloud/user-sync.sh':
|
||||||
|
ensure => file,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
content => template('sunetdrive/application/user-sync.erb.sh'),
|
||||||
|
}
|
||||||
|
-> cron { 'gss_user_sync':
|
||||||
|
command => '/opt/nextcloud/user-sync.sh',
|
||||||
|
user => 'root',
|
||||||
|
minute => '*/5',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#Create users
|
||||||
|
unless $is_multinode{
|
||||||
|
user { 'www-data': ensure => present, system => true }
|
||||||
|
package { 'aufs-tools': ensure => latest, provider => 'apt' }
|
||||||
|
|
||||||
|
file { '/usr/local/bin/occ':
|
||||||
|
ensure => present,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/occ.erb'),
|
||||||
|
mode => '0740',
|
||||||
|
}
|
||||||
|
file { '/etc/sudoers.d/99-occ':
|
||||||
|
ensure => file,
|
||||||
|
content => "script ALL=(root) NOPASSWD: /usr/local/bin/occ\n",
|
||||||
|
mode => '0440',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/upgrade23-25.sh':
|
||||||
|
ensure => present,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/upgrade23-25.erb.sh'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { '/opt/rotate/conf.d/nextcloud.conf':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => "#This file is managed by puppet\n#filename:retention days:maxsize mb\n/opt/nextcloud/nextcloud.log:180:256\n",
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { '/opt/rotate/conf.d/redis.conf':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => "#This file is managed by puppet
|
||||||
|
#filename:retention days:maxsize mb\n/opt/redis/server/server.log:180:256\n/opt/redis/sentinel/sentinel.log:180:256\n",
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { '/opt/nextcloud/000-default.conf':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/000-default.conf.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { '/opt/nextcloud/mpm_prefork.conf':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/mpm_prefork.conf.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { '/opt/nextcloud/404.html':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/404.html.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { '/opt/nextcloud/config.php':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/config.php.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { '/opt/nextcloud/nextcloud.log':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { '/opt/nextcloud/rclone.conf':
|
||||||
|
ensure => file,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/rclone.conf.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { '/opt/nextcloud/apache.php.ini':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/apache.php.ini.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/opt/nextcloud/apcu.ini':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/apcu.ini.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/opt/nextcloud/cli.php.ini':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/cli.php.ini.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/migrate_external_mounts':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/migrate_external_mounts.erb'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { '/opt/nextcloud/complete_reinstall.sh':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/complete_reinstall.erb.sh'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { '/etc/sudoers.d/99-run-cosmos':
|
||||||
|
ensure => file,
|
||||||
|
content => "script ALL=(root) NOPASSWD: /usr/local/bin/run-cosmos\n",
|
||||||
|
mode => '0440',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/redis-cli':
|
||||||
|
ensure => present,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/redis-cli.erb'),
|
||||||
|
mode => '0740',
|
||||||
|
}
|
||||||
|
file { '/etc/sudoers.d/99-redis-cli':
|
||||||
|
ensure => file,
|
||||||
|
content => "script ALL=(root) NOPASSWD: /usr/local/bin/redis-cli\n",
|
||||||
|
mode => '0440',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/add_admin_user':
|
||||||
|
ensure => present,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/add_admin_user.erb'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { '/etc/sudoers.d/99-no_mysql_servers':
|
||||||
|
ensure => file,
|
||||||
|
content => "script ALL=(root) NOPASSWD: /home/script/bin/get_no_mysql_servers.sh\n",
|
||||||
|
mode => '0440',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
file { '/home/script/bin/get_no_mysql_servers.sh':
|
||||||
|
ensure => present,
|
||||||
|
force => true,
|
||||||
|
owner => 'script',
|
||||||
|
group => 'script',
|
||||||
|
content => template('sunetdrive/application/get_no_mysql_servers.erb.sh'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if $location =~ /^gss-test/ {
|
||||||
|
file { '/opt/nextcloud/mappingfile.json':
|
||||||
|
ensure => present,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/mappingfile-test.json.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
} elsif $location =~ /^gss/ {
|
||||||
|
file { '/opt/nextcloud/mappingfile.json':
|
||||||
|
ensure => present,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/mappingfile-prod.json.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
} elsif $location =~ /^kau/ {
|
||||||
|
file { '/mnt':
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'www-data',
|
||||||
|
mode => '0755',
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
if $skeletondirectory {
|
||||||
|
file { '/opt/nextcloud/skeleton':
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'www-data',
|
||||||
|
mode => '0755',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if $customer == 'mdu' {
|
||||||
|
file { '/opt/nextcloud/skeleton/README.md':
|
||||||
|
ensure => present,
|
||||||
|
require => File['/opt/nextcloud/skeleton'],
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'www-data',
|
||||||
|
content => template('sunetdrive/application/MDU-README.md.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if $is_multinode {
|
||||||
|
$compose = $override_compose
|
||||||
|
} else {
|
||||||
|
$compose = sunet::docker_compose { 'drive_application_docker_compose':
|
||||||
|
content => template('sunetdrive/application/docker-compose_nextcloud.yml.erb'),
|
||||||
|
service_name => 'nextcloud',
|
||||||
|
compose_dir => '/opt/',
|
||||||
|
compose_filename => 'docker-compose.yml',
|
||||||
|
description => 'Nextcloud application',
|
||||||
|
}
|
||||||
|
sunet::misc::ufw_allow { 'https':
|
||||||
|
from => '0.0.0.0/0',
|
||||||
|
port => 443,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
13
manifests/application.pp
Normal file
13
manifests/application.pp
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
# A Class using the app resurce
|
||||||
|
class sunetdrive::application (
|
||||||
|
$bootstrap = undef,
|
||||||
|
$location = undef,
|
||||||
|
$override_config = undef,
|
||||||
|
$override_compose = undef
|
||||||
|
) {
|
||||||
|
|
||||||
|
$app = sunetdrive::app_type { 'base_app':
|
||||||
|
bootstrap => $bootstrap,
|
||||||
|
location => $location,
|
||||||
|
}
|
||||||
|
}
|
13
manifests/cache.pp
Normal file
13
manifests/cache.pp
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
#Class for SUNET-Drive-Cache
|
||||||
|
class sunetdrive::cache (
|
||||||
|
$bootstrap = undef,
|
||||||
|
$location = undef,
|
||||||
|
$override_config = undef,
|
||||||
|
$override_compose = undef,
|
||||||
|
$override_redis_conf = undef
|
||||||
|
) {
|
||||||
|
$cache = sunetdrive::cache_type { 'base_cache':
|
||||||
|
bootstrap => $bootstrap,
|
||||||
|
location => $location,
|
||||||
|
}
|
||||||
|
}
|
74
manifests/cache_type.pp
Normal file
74
manifests/cache_type.pp
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
#Resourcetype for SUNET-Drive-Cache
|
||||||
|
define sunetdrive::cache_type (
|
||||||
|
$bootstrap = undef,
|
||||||
|
$location = undef,
|
||||||
|
$override_config = undef,
|
||||||
|
$override_compose = undef,
|
||||||
|
$override_redis_conf = undef
|
||||||
|
) {
|
||||||
|
$environment = sunetdrive::get_environment()
|
||||||
|
$is_multinode = (($override_config != undef) and ($override_compose != undef) and ($override_redis_conf != undef))
|
||||||
|
# Now we get the corresponding config from group.yaml
|
||||||
|
if $is_multinode {
|
||||||
|
$config = $override_config
|
||||||
|
} else {
|
||||||
|
$config = hiera_hash($environment)
|
||||||
|
$nextcloud_ip = $config['app']
|
||||||
|
|
||||||
|
# Pick out the first host to be redis leader
|
||||||
|
$leader_address = $nextcloud_ip[0]
|
||||||
|
|
||||||
|
}
|
||||||
|
#Static variable defined here
|
||||||
|
$leader_name = 'cache1'
|
||||||
|
|
||||||
|
if $is_multinode {
|
||||||
|
$redis_config = $override_redis_conf
|
||||||
|
} else {
|
||||||
|
$redis_host_password = safe_hiera('redis_host_password')
|
||||||
|
$replica_of = hiera('replica_of')
|
||||||
|
$announce_address = hiera('announce_address')
|
||||||
|
|
||||||
|
file { '/opt/redis/server':
|
||||||
|
ensure => directory,
|
||||||
|
recurse => true,
|
||||||
|
}
|
||||||
|
$redis_config = file { '/opt/redis/server/server.conf':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/cache/server.conf.erb'),
|
||||||
|
mode => '0666',
|
||||||
|
require => File['/opt/redis/server'],
|
||||||
|
}
|
||||||
|
file { '/opt/redis/sentinel':
|
||||||
|
ensure => directory,
|
||||||
|
recurse => true,
|
||||||
|
}
|
||||||
|
file { '/opt/redis/sentinel/sentinel.conf':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/cache/sentinel.conf.erb'),
|
||||||
|
mode => '0666',
|
||||||
|
require => File['/opt/redis/sentinel'],
|
||||||
|
}
|
||||||
|
sunet::misc::ufw_allow { 'redis_server_port':
|
||||||
|
from => '0.0.0.0/0',
|
||||||
|
port => 6379,
|
||||||
|
}
|
||||||
|
sunet::misc::ufw_allow { 'redis_sentinel_port':
|
||||||
|
from => '0.0.0.0/0',
|
||||||
|
port => 26379,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if $is_multinode {
|
||||||
|
$compose = $override_compose
|
||||||
|
} else {
|
||||||
|
$compose = sunet::docker_compose { 'drive_redis_docker_compose':
|
||||||
|
content => template('sunetdrive/cache/docker-compose_cache.yml.erb'),
|
||||||
|
service_name => 'redis',
|
||||||
|
compose_dir => '/opt/',
|
||||||
|
compose_filename => 'docker-compose.yml',
|
||||||
|
description => 'Redis cache cluster',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
50
manifests/cloudimage.pp
Normal file
50
manifests/cloudimage.pp
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
# Wrapper with thiss common settings for sunet::cloudimage
|
||||||
|
define sunetdrive::cloudimage(
|
||||||
|
String $mac,
|
||||||
|
String $cpus = '1',
|
||||||
|
String $memory = '1024',
|
||||||
|
String $description = undef,
|
||||||
|
Optional[String] $ip = undef,
|
||||||
|
Optional[String] $netmask = undef,
|
||||||
|
Optional[String] $gateway = undef,
|
||||||
|
Optional[String] $ip6 = undef,
|
||||||
|
Optional[String] $netmask6 = '64',
|
||||||
|
Optional[String] $gateway6 = undef,
|
||||||
|
Array[String] $search = ['komreg.net'],
|
||||||
|
String $bridge = 'br0',
|
||||||
|
String $size = '40G',
|
||||||
|
String $local_size = '0',
|
||||||
|
String $image_url = 'https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img',
|
||||||
|
) {
|
||||||
|
# This is a hack, use SSH keys from KVM host?
|
||||||
|
$_ssh_key = hiera('ssh_authorized_keys')['berra+96E0A9D4']
|
||||||
|
$cloudimage_ssh_keys = [sprintf('%s %s %s', $_ssh_key['type'], $_ssh_key['key'], $_ssh_key['name'])]
|
||||||
|
|
||||||
|
sunet::cloudimage { $name:
|
||||||
|
image_url => $image_url,
|
||||||
|
ssh_keys => $cloudimage_ssh_keys,
|
||||||
|
apt_dir => '/etc/cosmos/apt',
|
||||||
|
disable_ec2 => true,
|
||||||
|
#
|
||||||
|
bridge => $bridge,
|
||||||
|
dhcp => false,
|
||||||
|
mac => $mac,
|
||||||
|
ip => $ip,
|
||||||
|
netmask => $netmask,
|
||||||
|
gateway => $gateway,
|
||||||
|
ip6 => $ip6,
|
||||||
|
netmask6 => $netmask6,
|
||||||
|
gateway6 => $gateway6,
|
||||||
|
resolver => ['130.242.80.14', '130.242.80.99'],
|
||||||
|
search => $search,
|
||||||
|
#
|
||||||
|
repo => $::cosmos_repo_origin_url,
|
||||||
|
tagpattern => $::cosmos_tag_pattern,
|
||||||
|
#
|
||||||
|
cpus => $cpus,
|
||||||
|
memory => $memory,
|
||||||
|
description => $description,
|
||||||
|
size => $size,
|
||||||
|
local_size => $local_size,
|
||||||
|
}
|
||||||
|
}
|
122
manifests/db_type.pp
Normal file
122
manifests/db_type.pp
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
# Mariadb cluster class for SUNET Drive
|
||||||
|
define sunetdrive::db_type(
|
||||||
|
$tag_mariadb=undef,
|
||||||
|
$bootstrap=undef,
|
||||||
|
$location=undef,
|
||||||
|
$override_config = undef,
|
||||||
|
$override_compose = undef)
|
||||||
|
{
|
||||||
|
|
||||||
|
# Config from group.yaml
|
||||||
|
$environment = sunetdrive::get_environment()
|
||||||
|
$mariadb_version = hiera("mariadb_version_${environment}")
|
||||||
|
$is_multinode = (($override_config != undef) and ($override_compose != undef))
|
||||||
|
if $is_multinode {
|
||||||
|
$config = $override_config
|
||||||
|
$mysql_root_password = $config['mysql_root_password']
|
||||||
|
$mysql_user_password = $config['mysql_user_password']
|
||||||
|
$backup_password = $config['backup_password']
|
||||||
|
$mariadb_dir = $config['mariadb_dir']
|
||||||
|
$mycnf_path = $config['mycnf_path']
|
||||||
|
$server_id = '1000'
|
||||||
|
} else {
|
||||||
|
$config = hiera_hash($environment)
|
||||||
|
$mysql_root_password = safe_hiera('mysql_root_password')
|
||||||
|
$backup_password = safe_hiera('backup_password')
|
||||||
|
$proxysql_password = safe_hiera('proxysql_password')
|
||||||
|
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||||
|
$mariadb_dir = '/etc/mariadb'
|
||||||
|
$mycnf_path = 'sunetdrive/mariadb/my.cnf.erb'
|
||||||
|
$server_id = 1000 + Integer($facts['hostname'][-1])
|
||||||
|
ensure_resource('file',"${mariadb_dir}", { ensure => directory, recurse => true } )
|
||||||
|
$dirs = ['datadir', 'init', 'conf', 'backups', 'scripts' ]
|
||||||
|
$dirs.each |$dir| {
|
||||||
|
ensure_resource('file',"${mariadb_dir}/${dir}", { ensure => directory, recurse => true } )
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
$nextcloud_ip = $config['app']
|
||||||
|
|
||||||
|
unless $is_multinode {
|
||||||
|
$db_ip = $config['db']
|
||||||
|
$db_ipv6 = $config['db_v6']
|
||||||
|
$backup_ip = $config['backup']
|
||||||
|
$backup_ipv6 = $config['backup_v6']
|
||||||
|
$ports = [3306, 4444, 4567, 4568]
|
||||||
|
|
||||||
|
sunet::misc::ufw_allow { 'mariadb_ports':
|
||||||
|
from => $db_ip + $nextcloud_ip + $backup_ip + $backup_ipv6 + $db_ipv6,
|
||||||
|
port => $ports,
|
||||||
|
}
|
||||||
|
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if $location =~ /^lookup/ {
|
||||||
|
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '05-lookup.sql']
|
||||||
|
} else {
|
||||||
|
$sql_files = ['02-backup_user.sql', '03-proxysql.sql', '04-nextcloud.sql']
|
||||||
|
}
|
||||||
|
$sql_files.each |$sql_file|{
|
||||||
|
file { "${mariadb_dir}/init/${sql_file}":
|
||||||
|
ensure => present,
|
||||||
|
content => template("sunetdrive/mariadb/${sql_file}.erb"),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file { "${mariadb_dir}/conf/credentials.cnf":
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/mariadb/credentials.cnf.erb'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { "${mariadb_dir}/conf/my.cnf":
|
||||||
|
ensure => present,
|
||||||
|
content => template($mycnf_path),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { "${mariadb_dir}/scripts/run_manual_backup_dump.sh":
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/mariadb/run_manual_backup_dump.erb.sh'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { "${mariadb_dir}/scripts/rename-docker.sh":
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/mariadb/rename-docker.sh'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
if $is_multinode {
|
||||||
|
$docker_compose = $override_compose
|
||||||
|
} else {
|
||||||
|
file { '/usr/local/bin/size-test':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/mariadb/size-test.erb'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/status-test':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/mariadb/status-test.erb'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { "/etc/sudoers.d/99-size-test":
|
||||||
|
ensure => file,
|
||||||
|
content => "script ALL=(root) NOPASSWD: /usr/local/bin/size-test\n",
|
||||||
|
mode => '0440',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
file { "/etc/sudoers.d/99-status-test":
|
||||||
|
ensure => file,
|
||||||
|
content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n",
|
||||||
|
mode => '0440',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
$docker_compose = sunet::docker_compose { 'drive_mariadb_docker_compose':
|
||||||
|
content => template('sunetdrive/mariadb/docker-compose_mariadb.yml.erb'),
|
||||||
|
service_name => 'mariadb',
|
||||||
|
compose_dir => '/opt/',
|
||||||
|
compose_filename => 'docker-compose.yml',
|
||||||
|
description => 'Mariadb server',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
36
manifests/dockerhost.pp
Normal file
36
manifests/dockerhost.pp
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
include apt
|
||||||
|
# Wrapper for sunet::dockerhost to do thiss specific things
|
||||||
|
class sunetdrive::dockerhost(
|
||||||
|
String $version = safe_hiera('docker_version'),
|
||||||
|
String $package_name = hiera('docker_package_name', 'docker-ce'),
|
||||||
|
Enum['stable', 'edge', 'test'] $docker_repo = hiera('docker_repo', 'stable'),
|
||||||
|
String $compose_version = safe_hiera('docker_compose_version'),
|
||||||
|
String $docker_args = '',
|
||||||
|
Optional[String] $docker_dns = undef,
|
||||||
|
String $storage_driver = 'aufs',
|
||||||
|
) {
|
||||||
|
if $version == 'NOT_SET_IN_HIERA' {
|
||||||
|
fail('Docker version not set in Hiera')
|
||||||
|
}
|
||||||
|
if $compose_version == 'NOT_SET_IN_HIERA' {
|
||||||
|
fail('Docker-compose version not set in Hiera')
|
||||||
|
}
|
||||||
|
class { 'sunet::dockerhost':
|
||||||
|
docker_version => $version,
|
||||||
|
docker_package_name => $package_name,
|
||||||
|
docker_repo => $docker_repo,
|
||||||
|
run_docker_cleanup => true,
|
||||||
|
manage_dockerhost_unbound => true,
|
||||||
|
docker_extra_parameters => $docker_args,
|
||||||
|
docker_dns => $docker_dns,
|
||||||
|
storage_driver => $storage_driver,
|
||||||
|
docker_network => true, # let docker choose a network for the 'docker' bridge
|
||||||
|
compose_version => $compose_version,
|
||||||
|
}
|
||||||
|
file { '/etc/apt/preferences.d/containerd.io.pref':
|
||||||
|
ensure => absent,
|
||||||
|
}
|
||||||
|
-> package { 'containerd.io':
|
||||||
|
ensure => 'held',
|
||||||
|
}
|
||||||
|
}
|
89
manifests/infra_script.pp
Normal file
89
manifests/infra_script.pp
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
#Class for SUNET-Drive-Script
|
||||||
|
class sunetdrive::infra_script (
|
||||||
|
$bootstrap = undef,
|
||||||
|
$location = undef
|
||||||
|
) {
|
||||||
|
$environment = sunetdrive::get_environment()
|
||||||
|
$customer = "common"
|
||||||
|
$config = hiera_hash($environment)
|
||||||
|
$gss_backup_server = $config['gss_backup_server']
|
||||||
|
$lookup_backup_server = $config['lookup_backup_server']
|
||||||
|
$ssh_config = "Host *.sunet.se
|
||||||
|
User script
|
||||||
|
IdentityFile /root/.ssh/id_script"
|
||||||
|
$site_name = $config['site_name']
|
||||||
|
package { 'python3-pip':
|
||||||
|
ensure => installed,
|
||||||
|
provider => apt,
|
||||||
|
}
|
||||||
|
package { 'drive-utils':
|
||||||
|
ensure => installed,
|
||||||
|
provider => pip3,
|
||||||
|
source => 'https://pypi.sunet.se/packages/drive-utils-0.1.3.tar.gz',
|
||||||
|
require => Package['python3-pip'],
|
||||||
|
}
|
||||||
|
file { '/root/.ssh/':
|
||||||
|
ensure => directory,
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/':
|
||||||
|
ensure => directory,
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/.ssh/id_script':
|
||||||
|
ensure => file,
|
||||||
|
content => safe_hiera('ssh_priv_key'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0600',
|
||||||
|
}
|
||||||
|
file { '/root/.ssh/config':
|
||||||
|
ensure => file,
|
||||||
|
content => $ssh_config,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0600',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/backupdb.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/backupdb.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/maintenance.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/maintenance.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
sunet::scriptherder::cronjob { 'backupgssdb':
|
||||||
|
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
|
||||||
|
hour => '2',
|
||||||
|
minute => '0',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||||
|
}
|
||||||
|
sunet::scriptherder::cronjob { 'backuplookupdb':
|
||||||
|
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
|
||||||
|
hour => '2',
|
||||||
|
minute => '0',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||||
|
}
|
||||||
|
# sunet::scriptherder::cronjob { 'scriptherder_daily':
|
||||||
|
# cmd => '/bin/true',
|
||||||
|
# special => 'daily',
|
||||||
|
# ok_criteria => ['exit_status=0','max_age=4d'],
|
||||||
|
# warn_criteria => ['exit_status=1','max_age=8d'],
|
||||||
|
# }
|
||||||
|
# cron { 'example_job':
|
||||||
|
# ensure => 'present',
|
||||||
|
# command => '/bin/true',
|
||||||
|
# hour => ['0'],
|
||||||
|
# target => 'root',
|
||||||
|
# user => 'root',
|
||||||
|
# }
|
||||||
|
}
|
||||||
|
|
47
manifests/lb.pp
Normal file
47
manifests/lb.pp
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
class sunetdrive::lb($location=undef) {
|
||||||
|
$nodenumber = $::fqdn[2,1]
|
||||||
|
|
||||||
|
sunet::nagios::nrpe_command {'check_exabgp_announce':
|
||||||
|
command_line => '/usr/lib/nagios/plugins/check_exabgp_announce -w 1 -c 10',
|
||||||
|
require => File['/usr/lib/nagios/plugins/check_exabgp_announce'],
|
||||||
|
}
|
||||||
|
sunet::nagios::nrpe_command {'check_sarimner':
|
||||||
|
command_line => '/usr/lib/nagios/plugins/check_sarimner',
|
||||||
|
require => File['/usr/lib/nagios/plugins/check_sarimner'],
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/etc/sudoers.d/99-docker-logs':
|
||||||
|
ensure => file,
|
||||||
|
content => "nagios ALL=(root) NOPASSWD: /usr/bin/docker logs*\n",
|
||||||
|
mode => '0440',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
file { '/usr/lib/nagios/plugins/check_exabgp_announce':
|
||||||
|
ensure => 'file',
|
||||||
|
mode => '0755',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/lb/check_exabgp_announce.erb'),
|
||||||
|
}
|
||||||
|
file { '/usr/lib/nagios/plugins/check_sarimner':
|
||||||
|
ensure => 'file',
|
||||||
|
mode => '0755',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/lb/check_sarimner.erb'),
|
||||||
|
}
|
||||||
|
file { '/opt/frontend/errorfiles':
|
||||||
|
ensure => 'directory',
|
||||||
|
mode => '0755',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
-> file { '/opt/frontend/errorfiles/503.http':
|
||||||
|
ensure => 'file',
|
||||||
|
mode => '0644',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/lb/503.http.erb'),
|
||||||
|
}
|
||||||
|
}
|
42
manifests/lookup.pp
Normal file
42
manifests/lookup.pp
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
#Class for SUNET-Drive-Lookup-Server
|
||||||
|
class sunetdrive::lookup (
|
||||||
|
$bootstrap = undef,
|
||||||
|
$location = undef
|
||||||
|
) {
|
||||||
|
|
||||||
|
$environment = sunetdrive::get_environment()
|
||||||
|
|
||||||
|
|
||||||
|
# Firewall settings
|
||||||
|
$nextcloud_ip = hiera_array("${location}_app", [])
|
||||||
|
$tug_office = hiera_array('tug_office')
|
||||||
|
|
||||||
|
$dbhost = 'proxysql_proxysql_1'
|
||||||
|
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||||
|
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||||
|
$lookup_version = hiera("lookup_version_${environment}")
|
||||||
|
|
||||||
|
#Create users
|
||||||
|
user { 'www-data': ensure => present, system => true }
|
||||||
|
|
||||||
|
file { '/opt/lookup/config.php':
|
||||||
|
ensure => present,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/lookup/config.php.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
|
||||||
|
sunet::docker_compose { 'drive_lookup_docker_compose':
|
||||||
|
content => template('sunetdrive/lookup/docker-compose_lookup.yml.erb'),
|
||||||
|
service_name => 'lookup',
|
||||||
|
compose_dir => '/opt/',
|
||||||
|
compose_filename => 'docker-compose.yml',
|
||||||
|
description => 'Lookup server',
|
||||||
|
}
|
||||||
|
|
||||||
|
sunet::misc::ufw_allow { 'https':
|
||||||
|
from => '0.0.0.0/0',
|
||||||
|
port => 443,
|
||||||
|
}
|
||||||
|
}
|
27
manifests/mariadb.pp
Normal file
27
manifests/mariadb.pp
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
# A Class using the db resurce
|
||||||
|
class sunetdrive::mariadb (
|
||||||
|
$bootstrap = undef,
|
||||||
|
$location = undef,
|
||||||
|
$tag_mariadb = undef,
|
||||||
|
$override_config = undef,
|
||||||
|
$override_compose = undef
|
||||||
|
) {
|
||||||
|
|
||||||
|
$quorum_id = $::fqdn
|
||||||
|
$quorum_password = safe_hiera('quorum_password')
|
||||||
|
$db = sunetdrive::db_type { 'base_db':
|
||||||
|
bootstrap => $bootstrap,
|
||||||
|
tag_mariadb => $tag_mariadb,
|
||||||
|
location => $location,
|
||||||
|
}
|
||||||
|
file { '/etc/quorum.conf':
|
||||||
|
ensure => file,
|
||||||
|
mode => '0644',
|
||||||
|
content => template('sunetdrive/mariadb/quorum.conf.erb'),
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/quorum':
|
||||||
|
ensure => file,
|
||||||
|
mode => '0700',
|
||||||
|
content => template('sunetdrive/mariadb/quorum.erb.sh'),
|
||||||
|
}
|
||||||
|
}
|
126
manifests/mariadb_backup.pp
Normal file
126
manifests/mariadb_backup.pp
Normal file
|
@ -0,0 +1,126 @@
|
||||||
|
# This is a asyncronous replica of the Maria DB Cluster for SUNET Drive
|
||||||
|
class sunetdrive::mariadb_backup($tag_mariadb=undef, $location=undef) {
|
||||||
|
$dirs = [ 'datadir', 'init', 'conf', 'backups' ]
|
||||||
|
$dirs.each | $dir | {
|
||||||
|
ensure_resource('file',"/opt/mariadb_backup/${dir}", { ensure => directory, recurse => true } )
|
||||||
|
}
|
||||||
|
# Config from group.yaml
|
||||||
|
$environment = sunetdrive::get_environment()
|
||||||
|
$config = hiera_hash($environment)
|
||||||
|
$first_db = $config['first_db']
|
||||||
|
|
||||||
|
# Secrets from local.eyaml
|
||||||
|
$mysql_root_password = safe_hiera('mysql_root_password')
|
||||||
|
$backup_password = safe_hiera('backup_password')
|
||||||
|
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||||
|
$statistics_secret = safe_hiera('statistics_secret')
|
||||||
|
|
||||||
|
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
|
||||||
|
|
||||||
|
$sql_files = ['02-backup_user.sql']
|
||||||
|
$sql_files.each |$sql_file|{
|
||||||
|
file { "/opt/mariadb_backup/init/${sql_file}":
|
||||||
|
ensure => present,
|
||||||
|
content => template("sunetdrive/mariadb_backup/${sql_file}.erb"),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
$conf_files = ['credentials.cnf', 'my.cnf']
|
||||||
|
$conf_files.each |$conf_file|{
|
||||||
|
file { "/opt/mariadb_backup/conf/${conf_file}":
|
||||||
|
ensure => present,
|
||||||
|
content => template("sunetdrive/mariadb_backup/${conf_file}.erb"),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file { '/opt/mariadb_backup/start_replica_from_init.sh':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/mariadb_backup/start_replica_from_init.erb.sh'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { '/opt/mariadb_backup/do_backup.sh':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/mariadb_backup/do_backup.erb.sh'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { '/opt/mariadb_backup/check_replication.sh':
|
||||||
|
ensure => absent,
|
||||||
|
}
|
||||||
|
file { '/etc/sudoers.d/99-check_replication':
|
||||||
|
ensure => absent,
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/check_replication':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/mariadb_backup/check_replication.erb'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/status-test':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/mariadb_backup/status-test.erb'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { '/etc/sudoers.d/99-status-test':
|
||||||
|
ensure => file,
|
||||||
|
content => "script ALL=(root) NOPASSWD: /usr/local/bin/status-test\n",
|
||||||
|
mode => '0440',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
sunet::docker_compose { 'mariadb_backup':
|
||||||
|
content => template('sunetdrive/mariadb_backup/docker-compose_mariadb_backup.yml.erb'),
|
||||||
|
service_name => 'mariadb_backup',
|
||||||
|
compose_dir => '/opt/',
|
||||||
|
compose_filename => 'docker-compose.yml',
|
||||||
|
description => 'Mariadb replica',
|
||||||
|
}
|
||||||
|
|
||||||
|
# Rclone stuff
|
||||||
|
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
|
||||||
|
$local_path = '/tmp/rclone-current-linux-amd64.deb'
|
||||||
|
exec { 'rclone_deb':
|
||||||
|
command => "/usr/bin/wget -q ${rclone_url} -O ${local_path}",
|
||||||
|
creates => $local_path,
|
||||||
|
}
|
||||||
|
package { 'rclone':
|
||||||
|
ensure => installed,
|
||||||
|
provider => dpkg,
|
||||||
|
source => $local_path,
|
||||||
|
require => Exec['rclone_deb'],
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/root/.rclone.conf':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/mariadb_backup/rclone.conf.erb'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0600',
|
||||||
|
}
|
||||||
|
file { '/opt/mariadb_backup/listusers.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/mariadb_backup/listusers.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/opt/mariadb_backup/find_disabled_sharing.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/mariadb_backup/find_disabled_sharing.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
sunet::scriptherder::cronjob { 'listusers':
|
||||||
|
cmd => '/opt/mariadb_backup/listusers.sh',
|
||||||
|
minute => '*/5',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=30m'],
|
||||||
|
warn_criteria => ['exit_status=1', 'max_age=60m'],
|
||||||
|
}
|
||||||
|
sunet::scriptherder::cronjob { 'disabledsharing':
|
||||||
|
cmd => '/opt/mariadb_backup/find_disabled_sharing.sh',
|
||||||
|
minute => '5',
|
||||||
|
hour => '3',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
364
manifests/multinode.pp
Normal file
364
manifests/multinode.pp
Normal file
|
@ -0,0 +1,364 @@
|
||||||
|
# This class uses all the other classes to create a multinode server
|
||||||
|
class sunetdrive::multinode (
|
||||||
|
$bootstrap = undef,
|
||||||
|
$location = undef
|
||||||
|
)
|
||||||
|
{
|
||||||
|
$myname = $facts['hostname']
|
||||||
|
$is_multinode = true;
|
||||||
|
$environment = sunetdrive::get_environment()
|
||||||
|
$lb_servers = hiera_hash($environment)['lb_servers']
|
||||||
|
$document_servers = hiera_hash($environment)['document_servers']
|
||||||
|
|
||||||
|
$twofactor_enforced_groups = []
|
||||||
|
$twofactor_enforced_excluded_groups = []
|
||||||
|
$allcustomers = hiera_hash('multinode_mapping')
|
||||||
|
$allnames = $allcustomers.keys
|
||||||
|
$tempcustomers = $allnames.map | $index, $potential | {
|
||||||
|
if $myname =~ $allcustomers[$potential]['server'] {
|
||||||
|
$potential
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
$nodenumber = $::fqdn[9,1]
|
||||||
|
$customers = $tempcustomers - nil
|
||||||
|
user { 'www-data': ensure => present, system => true }
|
||||||
|
sunet::system_user {'mysql': username => 'mysql', group => 'mysql' }
|
||||||
|
ensure_resource('file', '/opt/nextcloud' , { ensure => directory, recurse => true } )
|
||||||
|
file { '/usr/local/bin/occ':
|
||||||
|
ensure => present,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/occ.erb'),
|
||||||
|
mode => '0740',
|
||||||
|
}
|
||||||
|
file { '/etc/sudoers.d/99-occ':
|
||||||
|
ensure => file,
|
||||||
|
content => "script ALL=(root) NOPASSWD: /usr/local/bin/occ\n",
|
||||||
|
mode => '0440',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/upgrade23-25.sh':
|
||||||
|
ensure => present,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/multinode/upgrade23-25.erb.sh'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/get_containers':
|
||||||
|
ensure => present,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/multinode/get_containers'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/add_admin_user':
|
||||||
|
ensure => present,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/add_admin_user.erb'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { '/opt/nextcloud/prune.sh':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/multinode/prune.erb.sh'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
cron { 'multinode_prune':
|
||||||
|
command => '/opt/nextcloud/prune.sh',
|
||||||
|
require => File['/opt/nextcloud/prune.sh'],
|
||||||
|
user => 'root',
|
||||||
|
minute => '25',
|
||||||
|
hour => '3',
|
||||||
|
}
|
||||||
|
file { '/opt/nextcloud/apache.php.ini':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/apache.php.ini.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/opt/nextcloud/apcu.ini':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/apcu.ini.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/opt/nextcloud/cli.php.ini':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/cli.php.ini.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/opt/nextcloud/cron.sh':
|
||||||
|
ensure => file,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
content => template('sunetdrive/application/cron.erb.sh'),
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/opt/nextcloud/000-default.conf':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/000-default.conf.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/opt/nextcloud/404.html':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/404.html.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
$link_content = '[Match]
|
||||||
|
Driver=bridge veth
|
||||||
|
|
||||||
|
[Link]
|
||||||
|
MACAddressPolicy=none'
|
||||||
|
file { '/etc/systemd/network/98-default.link':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => $link_content,
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
file { '/opt/nextcloud/compress-logs.sh':
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/multinode/compress-logs.erb.sh'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
cron { 'multinode_compress_logs':
|
||||||
|
command => '/opt/nextcloud/compress-logs.sh',
|
||||||
|
require => File['/opt/nextcloud/compress-logs.sh'],
|
||||||
|
user => 'root',
|
||||||
|
minute => '10',
|
||||||
|
hour => '0',
|
||||||
|
weekday => '0',
|
||||||
|
}
|
||||||
|
cron { 'multinode_cron':
|
||||||
|
command => '/opt/nextcloud/cron.sh',
|
||||||
|
require => File['/opt/nextcloud/cron.sh'],
|
||||||
|
user => 'root',
|
||||||
|
minute => '*/10',
|
||||||
|
}
|
||||||
|
$customers.each | $index, $customer | {
|
||||||
|
if $environment == 'prod' {
|
||||||
|
$s3_bucket = "primary-${customer}-drive.sunet.se"
|
||||||
|
$site_name = "${customer}.drive.sunet.se"
|
||||||
|
$trusted_proxies = ['lb1.drive.sunet.se','lb2.drive.sunet.se', 'lb3.drive.sunet.se', 'lb4.drive.sunet.se']
|
||||||
|
} else {
|
||||||
|
$s3_bucket = "primary-${customer}-${environment}.sunet.se"
|
||||||
|
$site_name = "${customer}.drive.${environment}.sunet.se"
|
||||||
|
$trusted_proxies = ["lb1.drive.${environment}.sunet.se","lb2.drive.${environment}.sunet.se",
|
||||||
|
"lb3.drive.${environment}.sunet.se","lb4.drive.${environment}.sunet.se"]
|
||||||
|
}
|
||||||
|
$apache_default_path = "/opt/multinode/${customer}/000-default.conf"
|
||||||
|
$apache_error_path = "/opt/multinode/${customer}/404.html"
|
||||||
|
$config_php_path = "/opt/multinode/${customer}/config.php"
|
||||||
|
$cron_log_path ="/opt/multinode/${customer}/cron.log"
|
||||||
|
$customer_config_full = hiera_hash($customer)
|
||||||
|
$customer_config = $customer_config_full[$environment]
|
||||||
|
$dbhost = "mariadb${customer}_db_1"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
$gs_enabled = hiera('gs_enabled')
|
||||||
|
$gs_federation = hiera('gs_federation')
|
||||||
|
$gss_master_admin = hiera_array('gss_master_admin')
|
||||||
|
$gss_master_url = hiera("gss_master_url_${environment}")
|
||||||
|
$https_port = hiera_hash('multinode_mapping')[$customer]['port']
|
||||||
|
$lookup_server = hiera("lookup_server_${environment}")
|
||||||
|
$mail_domain = hiera("mail_domain_${environment}")
|
||||||
|
$mail_from_address = hiera("mail_from_address_${environment}")
|
||||||
|
$mail_smtphost = hiera("mail_smtphost_${environment}")
|
||||||
|
$nextcloud_log_path ="/opt/multinode/${customer}/nextcloud.log"
|
||||||
|
$nextcloud_version = hiera("nextcloud_version_${environment}")
|
||||||
|
$nextcloud_version_string = split($nextcloud_version, '[-]')[0]
|
||||||
|
$rclone_conf_path = "/opt/multinode/${customer}/rclone.conf"
|
||||||
|
$redis_conf_dir = "/opt/multinode/${customer}/server"
|
||||||
|
$redis_conf_path = "${redis_conf_dir}/redis.conf"
|
||||||
|
$redis_host= "redis${customer}_redis-server_1"
|
||||||
|
$s3_host = $customer_config['s3_host']
|
||||||
|
$s3_usepath = hiera('s3_usepath')
|
||||||
|
$smtpuser = hiera("smtp_user_${environment}")
|
||||||
|
$trusted_domains = [$site_name, $facts['fqdn'], 'localhost']
|
||||||
|
$tug_office = hiera_array('tug_office')
|
||||||
|
|
||||||
|
# Secrets from local.eyaml
|
||||||
|
$admin_password = safe_hiera("${customer}_admin_password")
|
||||||
|
$instanceid = safe_hiera("${customer}_instanceid")
|
||||||
|
$mysql_root_password = safe_hiera("${customer}_mysql_root_password")
|
||||||
|
$backup_password = safe_hiera("${customer}_backup_password")
|
||||||
|
$mysql_user_password = safe_hiera("${customer}_mysql_user_password")
|
||||||
|
$s3_key = safe_hiera("${customer}_s3_key")
|
||||||
|
$s3_secret = safe_hiera("${customer}_s3_secret")
|
||||||
|
$secret = safe_hiera("${customer}_secret")
|
||||||
|
$passwordsalt= safe_hiera("${customer}_passwordsalt")
|
||||||
|
$redis_host_password = safe_hiera("${customer}_redis_host_password")
|
||||||
|
$gss_jwt_key = safe_hiera('gss_jwt_key')
|
||||||
|
$smtppassword = safe_hiera('smtp_password')
|
||||||
|
|
||||||
|
$extra_config = {
|
||||||
|
admin_password => $admin_password,
|
||||||
|
backup_password => $backup_password,
|
||||||
|
dbhost => $dbhost,
|
||||||
|
drive_email_template_plain_text_left => hiera($environment)['drive_email_template_plain_text_left'],
|
||||||
|
drive_email_template_text_left => hiera($environment)['drive_email_template_text_left'],
|
||||||
|
drive_email_template_url_left => hiera($environment)['drive_email_template_url_left'],
|
||||||
|
mariadb_dir => "/opt/multinode/${customer}/mariadb-${customer}",
|
||||||
|
mycnf_path => 'sunetdrive/multinode/my.cnf.erb',
|
||||||
|
mysql_root_password => $mysql_root_password,
|
||||||
|
mysql_user_password => $mysql_user_password,
|
||||||
|
trusted_domains => $trusted_domains,
|
||||||
|
trusted_proxies => $trusted_proxies,
|
||||||
|
}
|
||||||
|
$config = deep_merge($customer_config, $extra_config)
|
||||||
|
ensure_resource('file', "/opt/multinode/${customer}" , { ensure => directory, recurse => true } )
|
||||||
|
$dirs = ['datadir', 'init', 'conf', 'scripts' ]
|
||||||
|
$dirs.each |$dir| {
|
||||||
|
ensure_resource('file',"${config['mariadb_dir']}/${dir}", { ensure => directory, recurse => true } )
|
||||||
|
}
|
||||||
|
|
||||||
|
ensure_resource('file',"${config['mariadb_dir']}/backups", {
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'script',
|
||||||
|
mode => '0750',
|
||||||
|
recurse => true
|
||||||
|
} )
|
||||||
|
# Use the other sunetdrive classes with overridden config
|
||||||
|
$db_ip = ['127.0.0.1']
|
||||||
|
$app_compose = sunet::docker_compose { "drive_${customer}_app_docker_compose":
|
||||||
|
content => template('sunetdrive/multinode/docker-compose_nextcloud.yml.erb'),
|
||||||
|
service_name => "nextcloud-${customer}",
|
||||||
|
compose_dir => "/opt/multinode/${customer}",
|
||||||
|
compose_filename => 'docker-compose.yml',
|
||||||
|
description => "Nextcloud application for ${customer}",
|
||||||
|
require => File[$config_php_path,
|
||||||
|
'/opt/nextcloud/apache.php.ini',
|
||||||
|
'/opt/nextcloud/cli.php.ini',
|
||||||
|
"/opt/multinode/${customer}/complete_reinstall.sh",
|
||||||
|
],
|
||||||
|
}
|
||||||
|
$cache_compose = sunet::docker_compose { "drive_${customer}_redis_docker_compose":
|
||||||
|
content => template('sunetdrive/multinode/docker-compose_cache.yml.erb'),
|
||||||
|
service_name => "redis-${customer}",
|
||||||
|
compose_dir => "/opt/multinode/${customer}",
|
||||||
|
compose_filename => 'docker-compose.yml',
|
||||||
|
description => "Redis cache server for ${customer}",
|
||||||
|
require => File[$redis_conf_path],
|
||||||
|
}
|
||||||
|
$mariadb_compose = sunet::docker_compose { "drive_mariadb_${customer}_compose":
|
||||||
|
content => template('sunetdrive/multinode/docker-compose_mariadb.yml.erb'),
|
||||||
|
service_name => "mariadb-${customer}",
|
||||||
|
compose_dir => "/opt/multinode/${customer}",
|
||||||
|
compose_filename => 'docker-compose.yml',
|
||||||
|
description => "Mariadb server for ${customer}",
|
||||||
|
owner => 'root',
|
||||||
|
group => 'script',
|
||||||
|
mode => '0750',
|
||||||
|
}
|
||||||
|
|
||||||
|
file { "/opt/multinode/${customer}/mariadb-${customer}/do_backup.sh":
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/mariadb_backup/do_backup.erb.sh'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
sunetdrive::app_type { "app_${customer}":
|
||||||
|
location => $location,
|
||||||
|
override_config => $config,
|
||||||
|
override_compose => $app_compose,
|
||||||
|
}
|
||||||
|
|
||||||
|
sunetdrive::db_type { "db_${customer}":
|
||||||
|
location => $location,
|
||||||
|
override_config => $config,
|
||||||
|
override_compose => $mariadb_compose,
|
||||||
|
}
|
||||||
|
file { $redis_conf_dir:
|
||||||
|
ensure => directory,
|
||||||
|
recurse => true,
|
||||||
|
}
|
||||||
|
$redis_config = file { $redis_conf_path:
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/multinode/redis.conf.erb'),
|
||||||
|
mode => '0666',
|
||||||
|
require => [ File[$redis_conf_dir]]
|
||||||
|
}
|
||||||
|
sunetdrive::cache_type { "cache_${customer}":
|
||||||
|
location => $location,
|
||||||
|
override_config => $config,
|
||||||
|
override_compose => $cache_compose,
|
||||||
|
override_redis_conf => $redis_config,
|
||||||
|
require => File[$redis_conf_path],
|
||||||
|
}
|
||||||
|
file { $config_php_path:
|
||||||
|
ensure => present,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/application/config.php.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { $cron_log_path:
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { $nextcloud_log_path:
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { $rclone_conf_path:
|
||||||
|
ensure => present,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/multinode/rclone.conf.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { "/opt/multinode/${customer}/complete_reinstall.sh":
|
||||||
|
ensure => file,
|
||||||
|
force => true,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/multinode/complete_reinstall.erb.sh'),
|
||||||
|
mode => '0744',
|
||||||
|
}
|
||||||
|
# Open ports
|
||||||
|
sunet::misc::ufw_allow { "https_port_${customer}":
|
||||||
|
from => '0.0.0.0',
|
||||||
|
port => $https_port,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
22
manifests/nagios.pp
Normal file
22
manifests/nagios.pp
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
# Nagios health check
|
||||||
|
define sunetdrive::nagios::check_mdq_health ($url=undef) {
|
||||||
|
$mdq_url = $url ? {
|
||||||
|
undef => $title,
|
||||||
|
default => $url
|
||||||
|
}
|
||||||
|
ensure_resource('file', '/usr/lib/nagios/plugins/check_mdq_health', {
|
||||||
|
ensure => 'file',
|
||||||
|
mode => '0555',
|
||||||
|
group => 'nagios',
|
||||||
|
require => Package['nagios-nrpe-server'],
|
||||||
|
content => template('thiss/mdq/check_mdq_health.erb'),
|
||||||
|
})
|
||||||
|
ensure_resource('nagioscfg::command','check_mdq_health', {
|
||||||
|
command_line => "/usr/lib/nagios/plugins/check_mdq_health '\$HOSTNAME\'"
|
||||||
|
})
|
||||||
|
nagioscfg::service {"check_mdq_health_${name}":
|
||||||
|
host_name => [$name],
|
||||||
|
check_command => 'check_mdq_health',
|
||||||
|
description => "Check MDQ health at ${name}"
|
||||||
|
}
|
||||||
|
}
|
74
manifests/ni.pp
Normal file
74
manifests/ni.pp
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
# This is NI for SUNET Drive
|
||||||
|
class sunetdrive::ni() {
|
||||||
|
if $environment == 'prod' {
|
||||||
|
$domain = 'ni.drive.sunet.se'
|
||||||
|
} else {
|
||||||
|
$domain = 'ni.drive.test.sunet.se'
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/opt/sri/postgresql':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/postgresql/data':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/neo4j':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/neo4j/data':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/ni':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/ni/etc':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/ni/log':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/backup':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/backup/neo4j':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/staticfiles':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/srifrontfiles':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/nginx':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/nginx/etc':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/nginx/log':
|
||||||
|
ensure => directory,
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/nginx/etc/ni.http':
|
||||||
|
ensure => present,
|
||||||
|
content => '';
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/nginx/etc/dhparams.pem':
|
||||||
|
ensure => present,
|
||||||
|
content => '';
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/ni/etc/dotenv':
|
||||||
|
ensure => present,
|
||||||
|
content => '';
|
||||||
|
}
|
||||||
|
-> file { '/opt/sri/postgresql/init/init-noclook-db.sh':
|
||||||
|
ensure => present,
|
||||||
|
content => '';
|
||||||
|
}
|
||||||
|
sunet::docker_compose { 'drive_ni_compose':
|
||||||
|
content => template('sunetdrive/ni/docker-compose.yml.erb'),
|
||||||
|
service_name => 'sri',
|
||||||
|
compose_dir => '/opt/',
|
||||||
|
compose_filename => 'docker-compose.yml',
|
||||||
|
description => 'NI',
|
||||||
|
}
|
||||||
|
}
|
23
manifests/onlyoffice.pp
Normal file
23
manifests/onlyoffice.pp
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
#Class for SUNET-Drive-OnlyOffice
|
||||||
|
class sunetdrive::onlyoffice () {
|
||||||
|
$environment = sunetdrive::get_environment()
|
||||||
|
$extra_hosts = hiera_hash($environment)['extra_hosts']
|
||||||
|
$docker_tag = hiera_hash($environment)['collabora_tag']
|
||||||
|
$customers = hiera('fullnodes')
|
||||||
|
$multinode_customers = keys(hiera_hash('multinode_mapping'))
|
||||||
|
if $environment == 'prod' {
|
||||||
|
$domain = 'drive.sunet.se'
|
||||||
|
} else {
|
||||||
|
$domain = 'drive.test.sunet.se'
|
||||||
|
}
|
||||||
|
sunet::collabora::docs { 'sunet-onlyoffice':
|
||||||
|
dns => [ '89.32.32.32' ],
|
||||||
|
extra_hosts => $extra_hosts,
|
||||||
|
extra_volumes => ['/opt/collabora/coolwsd.xml:/etc/coolwsd/coolwsd.xml'],
|
||||||
|
docker_tag => $docker_tag,
|
||||||
|
}
|
||||||
|
file {'/opt/collabora/coolwsd.xml':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/document/coolwsd.xml.erb'),
|
||||||
|
}
|
||||||
|
}
|
99
manifests/proxysql.pp
Normal file
99
manifests/proxysql.pp
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
#Class for SUNET-Drive-Proxysql
|
||||||
|
class sunetdrive::proxysql (
|
||||||
|
$bootstrap = undef,
|
||||||
|
$location = undef,
|
||||||
|
$proxysql_container_name = 'proxysql_proxysql_1',
|
||||||
|
) {
|
||||||
|
|
||||||
|
# Config from group.yaml
|
||||||
|
$environment = sunetdrive::get_environment()
|
||||||
|
$config = hiera_hash($environment)
|
||||||
|
$db_ip = $config['db']
|
||||||
|
$nextcloud_ip = $config['app']
|
||||||
|
$proxysql_ok_num = length($nextcloud_ip)
|
||||||
|
$proxysql_warn_num = $proxysql_ok_num - 1
|
||||||
|
|
||||||
|
# Global config from common.yaml
|
||||||
|
$proxysql_version = hiera('proxysql_version')
|
||||||
|
$tug_office = hiera_array('tug_office')
|
||||||
|
|
||||||
|
# Config from local.yaml and local.eyaml
|
||||||
|
$admin_password = safe_hiera('admin_password')
|
||||||
|
$cluster_admin_password = safe_hiera('cluster_admin_password')
|
||||||
|
$monitor_password = safe_hiera('proxysql_password')
|
||||||
|
$mysql_user_password = safe_hiera('mysql_user_password')
|
||||||
|
$mysql_user = safe_hiera('mysql_user')
|
||||||
|
|
||||||
|
$transaction_persistent = 1
|
||||||
|
|
||||||
|
file { '/usr/local/bin/proxysql':
|
||||||
|
ensure => file,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/proxysql/proxysql.erb.sh'),
|
||||||
|
mode => '0755',
|
||||||
|
}
|
||||||
|
file { '/opt/proxysql/insert_server_in_proxysql.sh':
|
||||||
|
ensure => file,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/proxysql/insert_server_in_proxysql.erb.sh'),
|
||||||
|
mode => '0755',
|
||||||
|
}
|
||||||
|
file {'/usr/lib/nagios/plugins/check_proxysql_server':
|
||||||
|
ensure => 'file',
|
||||||
|
mode => '0755',
|
||||||
|
group => 'nagios',
|
||||||
|
require => Package['nagios-nrpe-server'],
|
||||||
|
content => template('sunetdrive/proxysql/check_proxysql_server.erb'),
|
||||||
|
}
|
||||||
|
file {'/usr/lib/nagios/plugins/check_mysql_server_status':
|
||||||
|
ensure => 'file',
|
||||||
|
mode => '0755',
|
||||||
|
group => 'nagios',
|
||||||
|
require => Package['nagios-nrpe-server'],
|
||||||
|
content => template('sunetdrive/proxysql/check_mysql_server_status.erb'),
|
||||||
|
}
|
||||||
|
file { '/opt/proxysql/proxysql.cnf':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/proxysql/proxysql.cnf.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/opt/proxysql/my.cnf':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/proxysql/my.cnf.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
sunet::misc::ufw_allow { 'stats_ports':
|
||||||
|
from => $tug_office,
|
||||||
|
port => 6080,
|
||||||
|
}
|
||||||
|
|
||||||
|
sunet::docker_compose { 'drive_proxysql_docker_compose':
|
||||||
|
content => template('sunetdrive/proxysql/docker-compose_proxysql.yml.erb'),
|
||||||
|
service_name => 'proxysql',
|
||||||
|
compose_dir => '/opt/',
|
||||||
|
compose_filename => 'docker-compose.yml',
|
||||||
|
description => 'Proxysql',
|
||||||
|
}
|
||||||
|
if $::fqdn[0,5] == 'node1' {
|
||||||
|
sunet::scriptherder::cronjob { 'insert_server_in_proxysql':
|
||||||
|
cmd => '/opt/proxysql/insert_server_in_proxysql.sh',
|
||||||
|
hour => '*',
|
||||||
|
minute => '*/5',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=1h'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=3h'],
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
sunet::scriptherder::cronjob { 'insert_server_in_proxysql':
|
||||||
|
ensure => 'absent',
|
||||||
|
cmd => '/opt/proxysql/insert_server_in_proxysql.sh',
|
||||||
|
purge_results => true,
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
26
manifests/redis_cluster.pp
Normal file
26
manifests/redis_cluster.pp
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
#Class for SUNET-Drive-Cache
|
||||||
|
class sunetdrive::redis_cluster (
|
||||||
|
$location = undef,
|
||||||
|
)
|
||||||
|
{
|
||||||
|
$customer = sunetdrive::get_customer()
|
||||||
|
$redis_password = safe_hiera('redis_password')
|
||||||
|
package { 'redis-tools': ensure => latest, provider => 'apt' }
|
||||||
|
|
||||||
|
file { '/usr/local/bin/bootstrap_cluster':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/redis_cluster/bootstrap_cluster.erb.sh'),
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/reset_cluster':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/redis_cluster/reset_cluster.erb.sh'),
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/.bashrc':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/redis_cluster/bashrc.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
40
manifests/resolve.pp
Normal file
40
manifests/resolve.pp
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
include stdlib
|
||||||
|
# Sunet drive resolver
|
||||||
|
class sunetdrive::resolve($location=undef) {
|
||||||
|
$unbound_conf = '# This file is managed by puppet
|
||||||
|
server:
|
||||||
|
interface: 0.0.0.0
|
||||||
|
interface: ::0
|
||||||
|
access-control: 37.156.195.0/24 allow
|
||||||
|
access-control: 89.45.237.0/24 allow
|
||||||
|
access-control: 89.45.20.0/24 allow
|
||||||
|
access-control: 89.45.21.0/24 allow
|
||||||
|
access-control: 2001:6b0:1c::/64 allow
|
||||||
|
access-control: 2001:6b0:6c::/64 allow'
|
||||||
|
|
||||||
|
file { 'sunetdrive_unbound_conf' :
|
||||||
|
ensure => 'file',
|
||||||
|
name => '/etc/unbound/unbound.conf.d/sunetdrive.conf',
|
||||||
|
mode => '0644',
|
||||||
|
content => $unbound_conf,
|
||||||
|
}
|
||||||
|
file_line {'disable_systemd_stubresolver':
|
||||||
|
line => 'DNSStubListener=no',
|
||||||
|
path => '/etc/systemd/resolved.conf'
|
||||||
|
}
|
||||||
|
-> exec {'disable_systemd_resolved':
|
||||||
|
command => 'systemctl disable --now systemd-resolved.service',
|
||||||
|
onlyif => 'systemctl is-enabled systemd-resolved.service',
|
||||||
|
}
|
||||||
|
sunet::misc::ufw_allow { 'dns_port_ufw_udp':
|
||||||
|
from => 'any',
|
||||||
|
port => 53,
|
||||||
|
proto => 'udp',
|
||||||
|
}
|
||||||
|
sunet::misc::ufw_allow { 'dns_port_ufw_tcp':
|
||||||
|
from => 'any',
|
||||||
|
port => 53,
|
||||||
|
proto => 'tcp',
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
46
manifests/reva.pp
Normal file
46
manifests/reva.pp
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
#Class for SUNET-Drive-Lookup-Server
|
||||||
|
class sunetdrive::reva (
|
||||||
|
String $domain = '',
|
||||||
|
String $reva_domain = ''
|
||||||
|
) {
|
||||||
|
|
||||||
|
$environment = sunetdrive::get_environment()
|
||||||
|
$shared_secret = safe_hiera('shared_secret')
|
||||||
|
$iopsecret = safe_hiera('iopsecret')
|
||||||
|
|
||||||
|
# Firewall settings
|
||||||
|
#Create users
|
||||||
|
user { 'www-data': ensure => present, system => true }
|
||||||
|
|
||||||
|
file { '/opt/reva/revad.toml':
|
||||||
|
ensure => present,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/reva/revad.toml.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { '/opt/reva/data':
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'www-data',
|
||||||
|
}
|
||||||
|
file { '/opt/reva/ocm-providers.json':
|
||||||
|
ensure => present,
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/reva/ocm-providers.json.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
|
||||||
|
sunet::docker_compose { 'drive_reva_docker_compose':
|
||||||
|
content => template('sunetdrive/reva/docker-compose.yml.erb'),
|
||||||
|
service_name => 'reva',
|
||||||
|
compose_dir => '/opt/',
|
||||||
|
compose_filename => 'docker-compose.yml',
|
||||||
|
description => 'Sciencemesh reva server',
|
||||||
|
}
|
||||||
|
|
||||||
|
sunet::misc::ufw_allow { 'https_reva':
|
||||||
|
from => '0.0.0.0/0',
|
||||||
|
port => 443,
|
||||||
|
}
|
||||||
|
}
|
99
manifests/satosa.pp
Normal file
99
manifests/satosa.pp
Normal file
|
@ -0,0 +1,99 @@
|
||||||
|
# IDP proxy used in SUNET Drive
|
||||||
|
class sunetdrive::satosa($dehydrated_name=undef,$image='docker.sunet.se/satosa',$tag=undef) {
|
||||||
|
|
||||||
|
$proxy_conf = hiera('satosa_proxy_conf')
|
||||||
|
$default_conf = {
|
||||||
|
'STATE_ENCRYPTION_KEY' => hiera('satosa_state_encryption_key'),
|
||||||
|
'USER_ID_HASH_SALT' => hiera('satosa_user_id_hash_salt'),
|
||||||
|
'CUSTOM_PLUGIN_MODULE_PATHS' => ['plugins'],
|
||||||
|
'COOKIE_STATE_NAME' => 'SATOSA_STATE'
|
||||||
|
}
|
||||||
|
$merged_conf = merge($proxy_conf,$default_conf)
|
||||||
|
|
||||||
|
ensure_resource('file','/etc', { ensure => directory } )
|
||||||
|
ensure_resource('file','/etc/satosa', { ensure => directory } )
|
||||||
|
ensure_resource('file','/etc/satosa/', { ensure => directory } )
|
||||||
|
ensure_resource('file','/etc/satosa/run', { ensure => directory } )
|
||||||
|
ensure_resource('file','/etc/satosa/plugins', { ensure => directory } )
|
||||||
|
ensure_resource('file','/etc/satosa/metadata', { ensure => directory } )
|
||||||
|
|
||||||
|
['backend','frontend','metadata'].each |$id| {
|
||||||
|
if hiera("satosa_${id}_key",undef) != undef {
|
||||||
|
sunet::snippets::secret_file { "/etc/satosa/${id}.key": hiera_key => "satosa_${id}_key" }
|
||||||
|
# assume cert is in cosmos repo
|
||||||
|
} else {
|
||||||
|
# make key pair
|
||||||
|
sunet::snippets::keygen {"satosa_${id}":
|
||||||
|
key_file => "/etc/satosa/${id}.key",
|
||||||
|
cert_file => "/etc/satosa/${id}.crt"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sunet::docker_run {'satosa':
|
||||||
|
image => $image,
|
||||||
|
imagetag => $tag,
|
||||||
|
volumes => ['/etc/satosa:/etc/satosa','/etc/dehydrated:/etc/dehydrated'],
|
||||||
|
ports => ['443:8000'],
|
||||||
|
env => ['METADATA_DIR=/etc/satosa/metadata', 'WORKER_TIMEOUT=120']
|
||||||
|
}
|
||||||
|
file {'/etc/satosa/proxy_conf.yaml':
|
||||||
|
content => inline_template("<%= @merged_conf.to_yaml %>\n"),
|
||||||
|
notify => Sunet::Docker_run['satosa']
|
||||||
|
}
|
||||||
|
$plugins = hiera('satosa_config')
|
||||||
|
sort(keys($plugins)).each |$n| {
|
||||||
|
$conf = hiera($n)
|
||||||
|
$fn = $plugins[$n]
|
||||||
|
file { $fn:
|
||||||
|
content => inline_template("<%= @conf.to_yaml %>\n"),
|
||||||
|
notify => Sunet::Docker_run['satosa']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ufw::allow { 'satosa-allow-https':
|
||||||
|
ip => 'any',
|
||||||
|
port => '443'
|
||||||
|
}
|
||||||
|
$dehydrated_status = $dehydrated_name ? {
|
||||||
|
undef => 'absent',
|
||||||
|
default => 'present'
|
||||||
|
}
|
||||||
|
sunet::docker_run {'alwayshttps':
|
||||||
|
ensure => $dehydrated_status,
|
||||||
|
image => 'docker.sunet.se/always-https',
|
||||||
|
ports => ['80:80'],
|
||||||
|
env => ['ACME_URL=http://acme-c.sunet.se']
|
||||||
|
}
|
||||||
|
ufw::allow { 'satosa-allow-http':
|
||||||
|
ensure => $dehydrated_status,
|
||||||
|
ip => 'any',
|
||||||
|
port => '80'
|
||||||
|
}
|
||||||
|
if ($dehydrated_name) {
|
||||||
|
file { '/etc/satosa/https.key': ensure => link, target => "/etc/dehydrated/certs/${dehydrated_name}.key" }
|
||||||
|
file { '/etc/satosa/https.crt': ensure => link, target => "/etc/dehydrated/certs/${dehydrated_name}/fullchain.pem" }
|
||||||
|
} else {
|
||||||
|
sunet::snippets::keygen {'satosa_https':
|
||||||
|
key_file => '/etc/satosa/https.key',
|
||||||
|
cert_file => '/etc/satosa/https.crt'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file { '/opt/satosa':
|
||||||
|
ensure => directory,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0755',
|
||||||
|
}
|
||||||
|
-> file { '/opt/satosa/restart.sh':
|
||||||
|
ensure => file,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
content => template('sunetdrive/satosa/restart.erb.sh'),
|
||||||
|
}
|
||||||
|
-> cron { 'restart_satosa':
|
||||||
|
command => '/opt/satosa/restart.sh',
|
||||||
|
user => 'root',
|
||||||
|
minute => '15',
|
||||||
|
hour => '*/8',
|
||||||
|
}
|
||||||
|
}
|
414
manifests/script.pp
Normal file
414
manifests/script.pp
Normal file
|
@ -0,0 +1,414 @@
|
||||||
|
#Class for SUNET-Drive-Script
|
||||||
|
class sunetdrive::script (
|
||||||
|
$bootstrap = undef,
|
||||||
|
$location = undef
|
||||||
|
) {
|
||||||
|
$environment = sunetdrive::get_environment()
|
||||||
|
$customer = sunetdrive::get_customer()
|
||||||
|
$apikey_test = safe_hiera('monitor_apikey_test')
|
||||||
|
$apikey_prod = safe_hiera('monitor_apikey_prod')
|
||||||
|
$full_project_mapping = hiera_hash('project_mapping')
|
||||||
|
$project_mapping = $full_project_mapping[$customer][$environment]
|
||||||
|
$primary_project = $project_mapping['primary_project']
|
||||||
|
$mirror_project = $project_mapping['mirror_project']
|
||||||
|
$assigned_projects = $project_mapping['assigned']
|
||||||
|
$full_backup_retention = hiera('full_backup_retention')
|
||||||
|
$config = hiera_hash($environment)
|
||||||
|
$backup_server = $config['backup_server']
|
||||||
|
$rclone_url = 'https://downloads.rclone.org/rclone-current-linux-amd64.deb'
|
||||||
|
$local_path = '/tmp/rclone-current-linux-amd64.deb'
|
||||||
|
$singlenodes = hiera('singlenodes')
|
||||||
|
|
||||||
|
$ssh_config = "Host *.sunet.se
|
||||||
|
User script
|
||||||
|
IdentityFile /root/.ssh/id_script"
|
||||||
|
|
||||||
|
$s3_key = safe_hiera('s3_key')
|
||||||
|
$s3_secret = safe_hiera('s3_secret')
|
||||||
|
$statistics_secret = safe_hiera('statistics_secret')
|
||||||
|
$s3_key_pilot = hiera('s3_key_pilot', false)
|
||||||
|
$s3_secret_pilot = hiera('s3_secret_pilot', false)
|
||||||
|
# FIXME: This will not work if we start to mess around with the location of multinode customer data
|
||||||
|
$s3_host = $config['s3_host']
|
||||||
|
if $s3_host == 's3.sto4.safedc.net' {
|
||||||
|
$s3_host_mirror = 's3.sto3.safedc.net'
|
||||||
|
$s3_key_mirror = safe_hiera('s3_key_sto3')
|
||||||
|
$s3_secret_mirror = safe_hiera('s3_secret_sto3')
|
||||||
|
} else {
|
||||||
|
$s3_host_mirror = 's3.sto4.safedc.net'
|
||||||
|
$s3_key_mirror = safe_hiera('s3_key_sto4')
|
||||||
|
$s3_secret_mirror = safe_hiera('s3_secret_sto4')
|
||||||
|
}
|
||||||
|
$site_name = $config['site_name']
|
||||||
|
$user_bucket_name = $config['user_bucket_name']
|
||||||
|
|
||||||
|
# It is a start that will get us user buckets and primary buckets
|
||||||
|
$backup_projects = $location
|
||||||
|
exec { 'rclone_deb':
|
||||||
|
command => "/usr/bin/wget -q ${rclone_url} -O ${local_path}",
|
||||||
|
creates => $local_path,
|
||||||
|
}
|
||||||
|
package { 'rclone':
|
||||||
|
ensure => installed,
|
||||||
|
provider => dpkg,
|
||||||
|
source => $local_path,
|
||||||
|
require => Exec['rclone_deb'],
|
||||||
|
}
|
||||||
|
package { 'python3.9':
|
||||||
|
ensure => installed,
|
||||||
|
provider => apt,
|
||||||
|
}
|
||||||
|
-> package { 'python3-pip':
|
||||||
|
ensure => installed,
|
||||||
|
provider => apt,
|
||||||
|
}
|
||||||
|
package { 'duplicity':
|
||||||
|
ensure => installed,
|
||||||
|
provider => apt,
|
||||||
|
}
|
||||||
|
$drive_version = '0.3.1'
|
||||||
|
exec { 'drive-utils':
|
||||||
|
command => "python3.9 -m pip install https://pypi.sunet.se/packages/drive-utils-${drive_version}.tar.gz",
|
||||||
|
unless => "python3.9 -m pip list | grep drive-utils | grep ${drive_version}",
|
||||||
|
require => Package['python3.9'],
|
||||||
|
}
|
||||||
|
file { '/root/.ssh/':
|
||||||
|
ensure => directory,
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/':
|
||||||
|
ensure => directory,
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/scripts/':
|
||||||
|
ensure => directory,
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/.ssh/id_script':
|
||||||
|
ensure => file,
|
||||||
|
content => safe_hiera('ssh_priv_key'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0600',
|
||||||
|
}
|
||||||
|
file { '/root/.ssh/config':
|
||||||
|
ensure => file,
|
||||||
|
content => $ssh_config,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0600',
|
||||||
|
}
|
||||||
|
file { '/root/.rclone.conf':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/rclone.conf.erb'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0600',
|
||||||
|
}
|
||||||
|
if $s3_key_pilot and $s3_secret_pilot {
|
||||||
|
file { '/root/scripts/migratebuckets.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/migratebuckets.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/root/tasks/backupsingleproject.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/backupsingleproject.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/backupbuckets.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/backup-all-buckets.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/backup-projectbuckets.sh':
|
||||||
|
ensure => absent,
|
||||||
|
}
|
||||||
|
file { '/root/tasks/backupdb.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/backupdb.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/switch_redis_master_to.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/switch_redis_master_to.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/restart-nextcloud-farm':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/restart-nextcloud-farm.erb'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/restart-db-cluster':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/restart-db-cluster.erb'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/restart-proxysql.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/restart-proxysql.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/usage.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/usage.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/maintenance.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/maintenance.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/reboot-customer.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/reboot-customer.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/check_backups':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/check_backup.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/collect_backup_data.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/collect_backup_data.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/makebuckets.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/makebuckets.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/makemanualuserbucket.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/makemanualuserbucket.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
if $environment == 'test' {
|
||||||
|
sunet::scriptherder::cronjob { 'reboot-customer':
|
||||||
|
cmd => '/root/tasks/reboot-customer.sh',
|
||||||
|
hour => '2',
|
||||||
|
minute => '10',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=21d'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=31d'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# Opt out of userbuckets
|
||||||
|
unless $customer in ['extern', 'gih', 'suni', 'common'] {
|
||||||
|
sunet::scriptherder::cronjob { 'makebuckets':
|
||||||
|
cmd => '/root/tasks/makebuckets.sh',
|
||||||
|
minute => '*/5',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=15m'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=30m'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# Opt in folder structer for multinode customers
|
||||||
|
if $customer in ['common'] {
|
||||||
|
|
||||||
|
file { '/root/tasks/listusers.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/listusers.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
file { '/root/tasks/create_folders_in_singlenode_buckets.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/create_folders_in_singlenode_buckets.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
sunet::scriptherder::cronjob { 'create_folders_in_singlenode_buckets_for_kmh':
|
||||||
|
cmd => '/root/tasks/create_folders_in_singlenode_buckets.sh kmh true',
|
||||||
|
minute => '*/30',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=1h'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=2h'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# Opt in to folder structure in projectbuckets
|
||||||
|
if $customer in ['gih', 'mdu'] {
|
||||||
|
file { '/root/tasks/create_folders_in_project_buckets.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/create_folders_in_project_buckets.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if $customer in ['gih'] {
|
||||||
|
sunet::scriptherder::cronjob { 'create_folders_in_project_buckets':
|
||||||
|
cmd => '/root/tasks/create_folders_in_project_buckets.sh',
|
||||||
|
minute => '*/30',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=1h'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=2h'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if $customer in ['mdu'] {
|
||||||
|
sunet::scriptherder::cronjob { 'create_folders_in_project_buckets':
|
||||||
|
cmd => '/root/tasks/create_folders_in_project_buckets.sh "Arbetsmaterial (work material)" "Bevarande (retention)" "Gallringsbart (disposal)"',
|
||||||
|
minute => '*/30',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=1h'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=2h'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if $customer == 'common' {
|
||||||
|
if $environment == 'prod' {
|
||||||
|
file { '/root/tasks/aggregate.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/aggregate.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
sunet::scriptherder::cronjob { 'aggregate_billing':
|
||||||
|
cmd => '/root/tasks/aggregate.sh',
|
||||||
|
hour => '4',
|
||||||
|
minute => '10',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
file { '/root/tasks/backupsinglenodedb.sh':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/script/backupsinglenodedb.erb.sh'),
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
mode => '0700',
|
||||||
|
}
|
||||||
|
$singlenodes.each | $singlenode| {
|
||||||
|
$multinode = hiera_hash('multinode_mapping')[$singlenode]['server']
|
||||||
|
$multinodeserver = "${multinode}.${site_name}"
|
||||||
|
$nccontainer = "nextcloud${singlenode}_app_1"
|
||||||
|
|
||||||
|
sunet::scriptherder::cronjob { "listusers_${singlenode}":
|
||||||
|
cmd => "/root/tasks/listusers.sh ${singlenode} ${multinodeserver}",
|
||||||
|
minute => '*/5',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=30m'],
|
||||||
|
warn_criteria => ['exit_status=1', 'max_age=60m'],
|
||||||
|
}
|
||||||
|
sunet::scriptherder::cronjob { "backup${singlenode}db":
|
||||||
|
cmd => "/root/tasks/backupsinglenodedb.sh ${multinodeserver} ${singlenode}",
|
||||||
|
hour => '2',
|
||||||
|
minute => '0',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||||
|
}
|
||||||
|
if $environment == 'prod' {
|
||||||
|
sunet::scriptherder::cronjob { "statistics${singlenode}":
|
||||||
|
cmd => "/root/tasks/usage.sh ${singlenode} ${multinodeserver}",
|
||||||
|
hour => '2',
|
||||||
|
minute => '0',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
unless $singlenode in ['mau'] {
|
||||||
|
sunet::scriptherder::cronjob { "make${singlenode}buckets":
|
||||||
|
cmd => "/root/tasks/makebuckets.sh ${multinodeserver} ${nccontainer} ${singlenode}-${environment}",
|
||||||
|
minute => '*',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=15m'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=30m'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
$gss_backup_server = $config['gss_backup_server']
|
||||||
|
$lookup_backup_server = $config['lookup_backup_server']
|
||||||
|
sunet::scriptherder::cronjob { 'backupgssdb':
|
||||||
|
cmd => "/root/tasks/backupdb.sh ${gss_backup_server}",
|
||||||
|
hour => '2',
|
||||||
|
minute => '0',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||||
|
}
|
||||||
|
sunet::scriptherder::cronjob { 'backuplookupdb':
|
||||||
|
cmd => "/root/tasks/backupdb.sh ${lookup_backup_server}",
|
||||||
|
hour => '2',
|
||||||
|
minute => '0',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
sunet::scriptherder::cronjob { 'backupdb':
|
||||||
|
cmd => "/root/tasks/backupdb.sh ${backup_server}",
|
||||||
|
hour => '2',
|
||||||
|
minute => '0',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||||
|
}
|
||||||
|
sunet::scriptherder::cronjob { 'restart_proxysql':
|
||||||
|
ensure => 'absent',
|
||||||
|
cmd => '/bin/true',
|
||||||
|
purge_results => true,
|
||||||
|
}
|
||||||
|
if $environment == 'prod' {
|
||||||
|
sunet::scriptherder::cronjob { 'statistics':
|
||||||
|
cmd => '/root/tasks/usage.sh',
|
||||||
|
hour => '2',
|
||||||
|
minute => '0',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sunet::scriptherder::cronjob { 'collect_backup_data':
|
||||||
|
cmd => '/root/tasks/collect_backup_data.sh',
|
||||||
|
hour => '*',
|
||||||
|
minute => '3',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||||
|
}
|
||||||
|
sunet::scriptherder::cronjob { 'backupbuckets':
|
||||||
|
cmd => '/root/tasks/backupbuckets.sh',
|
||||||
|
hour => '2',
|
||||||
|
minute => '0',
|
||||||
|
ok_criteria => ['exit_status=0','max_age=2d'],
|
||||||
|
warn_criteria => ['exit_status=1','max_age=3d'],
|
||||||
|
}
|
||||||
|
# sunet::scriptherder::cronjob { 'scriptherder_daily':
|
||||||
|
# cmd => '/bin/true',
|
||||||
|
# special => 'daily',
|
||||||
|
# ok_criteria => ['exit_status=0','max_age=4d'],
|
||||||
|
# warn_criteria => ['exit_status=1','max_age=8d'],
|
||||||
|
# }
|
||||||
|
# cron { 'example_job':
|
||||||
|
# ensure => 'present',
|
||||||
|
# command => '/bin/true',
|
||||||
|
# hour => ['0'],
|
||||||
|
# target => 'root',
|
||||||
|
# user => 'root',
|
||||||
|
# }
|
||||||
|
}
|
110
manifests/scriptreceiver.pp
Normal file
110
manifests/scriptreceiver.pp
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
#Class for SUNET-Drive-Script-receiver
|
||||||
|
class sunetdrive::scriptreceiver()
|
||||||
|
{
|
||||||
|
sunet::system_user {'script': username => 'script', group => 'script', managehome => true, shell => '/bin/bash' }
|
||||||
|
|
||||||
|
# These tasks correspond to a ${task}.erb.sh template
|
||||||
|
$tasks = ['list_users', 'list_files_for_user', 'create_bucket', 'backup_db', 'purge_backups', 'maintenancemode', 'restart_sunet_service', 'start_sentinel', 'stop_sentinel']
|
||||||
|
|
||||||
|
$environment = sunetdrive::get_environment()
|
||||||
|
$config = hiera_hash($environment)
|
||||||
|
$script_server = $config['script_server']
|
||||||
|
$script_ipv4 = $config['script']
|
||||||
|
$script_ipv6 = $config['script_v6']
|
||||||
|
$script_pub_key = $config['script_pub_key']
|
||||||
|
file { '/etc/sudoers.d/99-script-user':
|
||||||
|
ensure => absent,
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/home/script/bin':
|
||||||
|
ensure => directory,
|
||||||
|
mode => '0750',
|
||||||
|
owner => 'script',
|
||||||
|
group => 'script',
|
||||||
|
}
|
||||||
|
|
||||||
|
$kano_shell = ['89.46.21.246','2001:6b0:6c::1bc']
|
||||||
|
sunet::misc::ufw_allow { 'script_port':
|
||||||
|
from => $script_ipv4 + $script_ipv6 + $kano_shell,
|
||||||
|
port => 22,
|
||||||
|
}
|
||||||
|
|
||||||
|
ssh_authorized_key { "script@${script_server}":
|
||||||
|
ensure => present,
|
||||||
|
user => 'script',
|
||||||
|
type => 'ssh-ed25519',
|
||||||
|
key => $script_pub_key,
|
||||||
|
}
|
||||||
|
|
||||||
|
file { '/opt/rotate':
|
||||||
|
ensure => directory,
|
||||||
|
mode => '0750',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
-> file { '/opt/rotate/conf.d':
|
||||||
|
ensure => directory,
|
||||||
|
mode => '0750',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/safer_reboot':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/scriptreceiver/safer_reboot.erb'),
|
||||||
|
mode => '0740',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
file { "/etc/sudoers.d/99-safer_reboot":
|
||||||
|
ensure => file,
|
||||||
|
content => "script ALL=(root) NOPASSWD: /usr/local/bin/safer_reboot\n",
|
||||||
|
mode => '0440',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/rotatefiles':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/scriptreceiver/rotatefiles.erb'),
|
||||||
|
mode => '0740',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
file { '/usr/local/bin/ini2json':
|
||||||
|
ensure => file,
|
||||||
|
content => template('sunetdrive/scriptreceiver/ini2json.py'),
|
||||||
|
mode => '0740',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
-> file { '/etc/scriptherder/check/rotatefiles.ini':
|
||||||
|
ensure => file,
|
||||||
|
content => "[check]\nok = exit_status=0, max_age=35m\nwarning = exit_status=0, max_age=1h\n",
|
||||||
|
mode => '0644',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
cron { 'rotate_logs':
|
||||||
|
command => ' /usr/local/bin/scriptherder --mode wrap --syslog --name rotatefiles -- /usr/local/bin/rotatefiles',
|
||||||
|
require => File['/usr/local/bin/rotatefiles'],
|
||||||
|
user => 'root',
|
||||||
|
minute => '*',
|
||||||
|
hour => '*',
|
||||||
|
}
|
||||||
|
$tasks.each |String $task| {
|
||||||
|
file { "/home/script/bin/${task}.sh":
|
||||||
|
ensure => file,
|
||||||
|
content => template("sunetdrive/scriptreceiver/${task}.erb.sh"),
|
||||||
|
mode => '0740',
|
||||||
|
owner => 'script',
|
||||||
|
group => 'script',
|
||||||
|
}
|
||||||
|
file { "/etc/sudoers.d/99-${task}":
|
||||||
|
ensure => file,
|
||||||
|
content => "script ALL=(root) NOPASSWD: /home/script/bin/${task}.sh\n",
|
||||||
|
mode => '0440',
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
27
manifests/sitemonitor.pp
Normal file
27
manifests/sitemonitor.pp
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
# Class for site monitor
|
||||||
|
class sunetdrive::sitemonitor() {
|
||||||
|
|
||||||
|
$sites = hiera_array('sites')
|
||||||
|
$tls_servers = flatten($sites,hiera_array('tls_servers'))
|
||||||
|
$tls_servers_with_port = hiera_array('tls_servers_with_port')
|
||||||
|
$nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0]
|
||||||
|
$nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0]
|
||||||
|
|
||||||
|
file { '/etc/nagios4/conf.d/sunetdrive_sites.cfg':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/monitor/sunetdrive_sites.cfg.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { '/etc/nagios4/conf.d/sunetdrive_ssl_checks.cfg':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/monitor/sunetdrive_ssl_checks.cfg.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
cron { 'restart_socket':
|
||||||
|
command => 'test -S /var/cache/thruk/live.sock || systemctl restart nagios4',
|
||||||
|
user => root,
|
||||||
|
minute => '*/5',
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
100
manifests/sitemonitornaemon.pp
Normal file
100
manifests/sitemonitornaemon.pp
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
# Class for site monitor
|
||||||
|
class sunetdrive::sitemonitornaemon() {
|
||||||
|
|
||||||
|
$sites = hiera_array('sites')
|
||||||
|
$fullnodes = hiera_array('fullnodes')
|
||||||
|
$tls_servers = flatten($sites,hiera_array('tls_servers'))
|
||||||
|
$tls_servers_with_port = hiera_array('tls_servers_with_port')
|
||||||
|
$nextcloud_version_prod = split(hiera('nextcloud_version_prod'),'[-]')[0]
|
||||||
|
$nextcloud_version_test = split(hiera('nextcloud_version_test'),'[-]')[0]
|
||||||
|
$monitorhost = $::fqdn
|
||||||
|
$environment = sunetdrive::get_environment()
|
||||||
|
$influx_passwd = safe_hiera('influx_passwd')
|
||||||
|
$slack_url = safe_hiera('slack_url')
|
||||||
|
|
||||||
|
file { '/usr/local/bin/slack_nagios.sh':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/monitor/notify_slack.erb.sh'),
|
||||||
|
mode => '0755',
|
||||||
|
}
|
||||||
|
file { '/etc/nagios-plugins/config/ping.cfg':
|
||||||
|
ensure => present,
|
||||||
|
owner => 'root',
|
||||||
|
group => 'root',
|
||||||
|
content => template('sunetdrive/monitor/ping.cfg.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
#definition for check_nrpe_1arg
|
||||||
|
file { '/etc/nagios-plugins/config/check_nrpe.cfg':
|
||||||
|
ensure => file,
|
||||||
|
mode => '0644',
|
||||||
|
content => template('sunetdrive/monitor/check_nrpe.cfg.erb'),
|
||||||
|
}
|
||||||
|
file { '/etc/naemon/conf.d/sunetdrive_sites.cfg':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/monitor/sunetdrive_sites.cfg.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { '/etc/naemon/conf.d/sunetdrive_ssl_checks.cfg':
|
||||||
|
ensure => present,
|
||||||
|
content => template('sunetdrive/monitor/sunetdrive_ssl_checks.cfg.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
file { '/etc/naemon/conf.d/sunetdrive_thruk_templates.conf':
|
||||||
|
ensure => present,
|
||||||
|
owner => 'naemon',
|
||||||
|
group => 'naemon',
|
||||||
|
content => template('sunetdrive/monitor/sunetdrive_thruk_templates.conf.erb'),
|
||||||
|
mode => '0644',
|
||||||
|
}
|
||||||
|
nagioscfg::service {'check_galera_cluster':
|
||||||
|
hostgroup_name => ['galera_monitor'],
|
||||||
|
check_command => 'check_nrpe_1arg!check_galera_cluster',
|
||||||
|
description => 'Galera Cluster Health',
|
||||||
|
contact_groups => ['alerts']
|
||||||
|
}
|
||||||
|
nagioscfg::service {'check_async_replication':
|
||||||
|
hostgroup_name => ['sunetdrive::mariadb_backup'],
|
||||||
|
check_command => 'check_nrpe_1arg!check_async_replication',
|
||||||
|
description => 'MySQL Replication Health',
|
||||||
|
contact_groups => ['alerts']
|
||||||
|
}
|
||||||
|
nagioscfg::service {'check_backups':
|
||||||
|
action_url => '/grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true',
|
||||||
|
hostgroup_name => ['sunetdrive::script'],
|
||||||
|
check_command => 'check_nrpe_1arg_to600!check_backups',
|
||||||
|
check_interval => '720',
|
||||||
|
retry_interval => '180',
|
||||||
|
description => 'Backup Status',
|
||||||
|
contact_groups => ['alerts']
|
||||||
|
}
|
||||||
|
nagioscfg::service {'check_proxysql_server':
|
||||||
|
hostgroup_name => ['sunetdrive::proxysql'],
|
||||||
|
check_command => 'check_nrpe_1arg!check_proxysql_server',
|
||||||
|
description => 'Number of ProxySQL servers available',
|
||||||
|
contact_groups => ['alerts']
|
||||||
|
}
|
||||||
|
nagioscfg::service {'check_mysql_server_status':
|
||||||
|
action_url => '/grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true',
|
||||||
|
hostgroup_name => ['sunetdrive::proxysql'],
|
||||||
|
check_command => 'check_nrpe_1arg!check_mysql_server_status',
|
||||||
|
description => 'Status of mysql servers',
|
||||||
|
contact_groups => ['alerts']
|
||||||
|
}
|
||||||
|
nagioscfg::service {'check_exabgp_announce':
|
||||||
|
action_url => '/grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true',
|
||||||
|
hostgroup_name => ['sunetdrive::lb'],
|
||||||
|
check_command => 'check_nrpe_1arg!check_exabgp_announce',
|
||||||
|
description => 'Status of exabgp routes',
|
||||||
|
contact_groups => ['alerts']
|
||||||
|
}
|
||||||
|
nagioscfg::service {'check_sarimner':
|
||||||
|
action_url => '/grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDISPLAYNAME$&theme=light&annotations=true',
|
||||||
|
hostgroup_name => ['sunetdrive::lb'],
|
||||||
|
check_command => 'check_nrpe_1arg_to300!check_sarimner',
|
||||||
|
description => 'Status of sarimner interface',
|
||||||
|
contact_groups => ['alerts']
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
81
manifests/thruk.pp
Normal file
81
manifests/thruk.pp
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
include stdlib
|
||||||
|
class sunetdrive::thruk($location=undef) {
|
||||||
|
|
||||||
|
|
||||||
|
$thruk_local_config = '# File managed by puppet
|
||||||
|
<Component Thruk::Backend>
|
||||||
|
<peer>
|
||||||
|
name = Core
|
||||||
|
type = livestatus
|
||||||
|
<options>
|
||||||
|
peer = /var/cache/thruk/live.sock
|
||||||
|
resource_file = /etc/nagios4/resource.cfg
|
||||||
|
</options>
|
||||||
|
<configtool>
|
||||||
|
core_conf = /etc/nagios4/nagios.cfg
|
||||||
|
obj_check_cmd = /usr/sbin/nagios4 -v /etc/nagios4/nagios.cfg
|
||||||
|
obj_reload_cmd = systemctl reload nagios4.service
|
||||||
|
</configtool>
|
||||||
|
</peer>
|
||||||
|
</Component>
|
||||||
|
cookie_auth_restricted_url = https://monitor.drive.sunet.se/thruk/cgi-bin/restricted.cgi
|
||||||
|
'
|
||||||
|
|
||||||
|
file_line {'nagios_livestatus_conf':
|
||||||
|
line => 'broker_module=/usr/local/lib/mk-livestatus/livestatus.o /var/cache/thruk/live.sock',
|
||||||
|
path => '/etc/nagios4/nagios.cfg'
|
||||||
|
}
|
||||||
|
file_line {'nagiosadmin_cgi_conf':
|
||||||
|
line => 'authorized_for_admin=nagiosadmin',
|
||||||
|
match => '^authorized_for_admin=thrukadmin',
|
||||||
|
path => '/etc/thruk/cgi.cfg',
|
||||||
|
require => Package['thruk'],
|
||||||
|
}
|
||||||
|
exec {'mk-livestatus-src':
|
||||||
|
command => 'curl -s https://download.checkmk.com/checkmk/1.5.0p24/mk-livestatus-1.5.0p24.tar.gz --output /opt/mk-livestatus-1.5.0p24.tar.gz',
|
||||||
|
unless => 'ls /usr/local/lib/mk-livestatus/livestatus.o',
|
||||||
|
}
|
||||||
|
exec {'mk-livestatus-tar':
|
||||||
|
command => 'cd /opt && tar xfv mk-livestatus-1.5.0p24.tar.gz',
|
||||||
|
require => Exec['mk-livestatus-src'],
|
||||||
|
unless => 'ls /usr/local/lib/mk-livestatus/livestatus.o',
|
||||||
|
}
|
||||||
|
exec {'mk-livestatus-build':
|
||||||
|
command => 'apt update && apt install -y make libboost-system1.71.0 clang librrd-dev libboost-dev libasio-dev libboost-system-dev && cd /opt/mk-livestatus-1.5.0p24 && ./configure --with-nagios4 && make && make install && apt -y remove clang librrd-dev libboost-dev libasio-dev libboost-system-dev make && apt autoremove -y',
|
||||||
|
require => [Exec['mk-livestatus-tar'], File_line['nagios_livestatus_conf'], Exec['www-data_in_nagios_group']],
|
||||||
|
unless => 'ls /usr/local/lib/mk-livestatus/livestatus.o',
|
||||||
|
}
|
||||||
|
exec {'www-data_in_nagios_group':
|
||||||
|
command => 'usermod -a -G nagios www-data && usermod -a -G www-data nagios',
|
||||||
|
unless => 'id www-data | grep nagios',
|
||||||
|
}
|
||||||
|
package {'thruk':
|
||||||
|
ensure => 'installed',
|
||||||
|
require => Exec['mk-livestatus-build'],
|
||||||
|
}
|
||||||
|
package {'thruk-plugin-reporting':
|
||||||
|
ensure => 'installed',
|
||||||
|
require => Package['thruk'],
|
||||||
|
}
|
||||||
|
file { 'thruk_repo' :
|
||||||
|
ensure => 'file',
|
||||||
|
name => '/etc/apt/sources.list.d/labs-consol-stable.list',
|
||||||
|
mode => '0644',
|
||||||
|
content => 'deb http://labs.consol.de/repo/stable/ubuntu focal main',
|
||||||
|
require => Exec['thruk_gpg_key'],
|
||||||
|
}
|
||||||
|
file { 'thruk_conf' :
|
||||||
|
ensure => 'file',
|
||||||
|
name => '/etc/thruk/thruk_local.conf',
|
||||||
|
mode => '0640',
|
||||||
|
owner => 'www-data',
|
||||||
|
group => 'www-data',
|
||||||
|
content => $thruk_local_config,
|
||||||
|
require => Package['thruk'],
|
||||||
|
}
|
||||||
|
exec { 'thruk_gpg_key':
|
||||||
|
command => 'curl -s "https://labs.consol.de/repo/stable/RPM-GPG-KEY" | sudo apt-key add -',
|
||||||
|
unless => 'apt-key list 2> /dev/null | grep "F2F9 7737 B59A CCC9 2C23 F8C7 F8C1 CA08 A57B 9ED7"',
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
51
templates/application/000-default.conf.erb
Normal file
51
templates/application/000-default.conf.erb
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
# This file is manged by puppet
|
||||||
|
LoadModule remoteip_module /usr/lib/apache2/modules/mod_remoteip.so
|
||||||
|
LoadModule headers_module /usr/lib/apache2/modules/mod_headers.so
|
||||||
|
<VirtualHost *:80>
|
||||||
|
ServerAdmin webmaster@localhost
|
||||||
|
DocumentRoot /var/www/html
|
||||||
|
|
||||||
|
ErrorLog ${APACHE_LOG_DIR}/error.log
|
||||||
|
CustomLog ${APACHE_LOG_DIR}/access.log combined
|
||||||
|
<Directory /var/www/nextcloud/>
|
||||||
|
Require all granted
|
||||||
|
AllowOverride All
|
||||||
|
Options FollowSymLinks MultiViews
|
||||||
|
|
||||||
|
<IfModule mod_dav.c>
|
||||||
|
Dav off
|
||||||
|
</IfModule>
|
||||||
|
</Directory>
|
||||||
|
</VirtualHost>
|
||||||
|
|
||||||
|
|
||||||
|
<VirtualHost *:443>
|
||||||
|
ServerAdmin webmaster@localhost
|
||||||
|
DocumentRoot /var/www/html
|
||||||
|
# Log format config
|
||||||
|
LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" common
|
||||||
|
SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded
|
||||||
|
|
||||||
|
# Header config
|
||||||
|
RemoteIPHeader X-Forwarded-For
|
||||||
|
<%- @lb_servers.each do |ip| -%>
|
||||||
|
RemoteIPInternalProxy <%= ip %>
|
||||||
|
<%- end -%>
|
||||||
|
ErrorDocument 404 /404.html
|
||||||
|
ErrorLog ${APACHE_LOG_DIR}/error.log
|
||||||
|
CustomLog ${APACHE_LOG_DIR}/access.log combined env=forwarded
|
||||||
|
SSLEngine On
|
||||||
|
SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem
|
||||||
|
SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key
|
||||||
|
<Directory /var/www/html/>
|
||||||
|
LimitRequestBody 0
|
||||||
|
Require all granted
|
||||||
|
AllowOverride All
|
||||||
|
Options FollowSymLinks MultiViews
|
||||||
|
|
||||||
|
<IfModule mod_dav.c>
|
||||||
|
Dav off
|
||||||
|
</IfModule>
|
||||||
|
</Directory>
|
||||||
|
</VirtualHost>
|
||||||
|
# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
|
54
templates/application/404.html.erb
Normal file
54
templates/application/404.html.erb
Normal file
File diff suppressed because one or more lines are too long
13
templates/application/MDU-README.md.erb
Normal file
13
templates/application/MDU-README.md.erb
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
Hello and welcome to your personal space for research data storage,
|
||||||
|
|
||||||
|
It is important that research data is managed in a secure and careful manner, so that it is protected from damage and destruction. Mälardalen University offers its researchers and doctoral students a secure and stable IT service that makes it possible to store research data. Furthermore, it enables research collaboration internally and externally since researchers can easily share their data within the project or with other individual researchers at MDU in a secure way. This technical infrastructure entails a good collaboration space for researchers, a secure handling of data, the possibility for the researcher to be able to store data in such a way that data management according to the FAIR principles can be complied with at MDU.
|
||||||
|
|
||||||
|
To meet the general need for research data storage, MDU has purchased Sunet Drive's S3 research data storage service. Each researcher will be offered a space of 200GB per person. To simplify administration, all spaces will be appropriated in advance in the same way, regardless of the research domain or total amount of research data.
|
||||||
|
|
||||||
|
200 GB will not correspond to all of MDU researchers' data storage needs; therefore, the researcher will have the possibility to purchase additional data space via a digital form. The additional purchase function is done by internal billing and the purchased space is then visible at the next login in the researcher's personal space.
|
||||||
|
|
||||||
|
If there is a need for a specific project space for research data storage and/or sharing, this is also possible. Such a space is created by an administrator after ordering project space through the same form as when purchasing additional storage space. The project space will then be visible to project members in the personal storage space at the next login.
|
||||||
|
|
||||||
|
Can a MDU student be included in the research project? It is possible to assign a student a time-limited access/sharing opportunity to specific folders in either the project space or the researcher's personal space. Contact the Data Access Unit (DAU) to enable such a feature.
|
||||||
|
|
||||||
|
If you have any questions about the storage space, please contact dau@mdu.se.
|
20
templates/application/add_admin_user.erb
Normal file
20
templates/application/add_admin_user.erb
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
#!/bin/bash
|
||||||
|
user=${1}
|
||||||
|
email=${2}
|
||||||
|
container=${3}
|
||||||
|
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
echo "Usage: ${0} <username> <email address> [container]"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ "x${user}" == "x" || "x${email}" == "x" ]]; then
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
pass=$(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c40;echo -n;)
|
||||||
|
OC_PASS="${pass}" /usr/local/bin/occ ${container} user:add --password-from-env --group admin "${user}"
|
||||||
|
unset OC_PASS
|
||||||
|
/usr/local/bin/occ ${container} user:setting ${user} settings email "${email}"
|
160
templates/application/apache.php.ini.erb
Normal file
160
templates/application/apache.php.ini.erb
Normal file
|
@ -0,0 +1,160 @@
|
||||||
|
[PHP]
|
||||||
|
allow_url_fopen = On
|
||||||
|
allow_url_include = Off
|
||||||
|
auto_append_file =
|
||||||
|
auto_globals_jit = On
|
||||||
|
auto_prepend_file =
|
||||||
|
default_charset = "UTF-8"
|
||||||
|
default_mimetype = "text/html"
|
||||||
|
default_socket_timeout = 60
|
||||||
|
disable_classes =
|
||||||
|
disable_functions = pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_get_handler,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,pcntl_async_signals,pcntl_unshare,
|
||||||
|
display_errors = Off
|
||||||
|
display_startup_errors = Off
|
||||||
|
doc_root =
|
||||||
|
enable_dl = Off
|
||||||
|
engine = On
|
||||||
|
error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
|
||||||
|
expose_php = Off
|
||||||
|
file_uploads = On
|
||||||
|
ignore_repeated_errors = Off
|
||||||
|
ignore_repeated_source = Off
|
||||||
|
implicit_flush = Off
|
||||||
|
log_errors = On
|
||||||
|
log_errors_max_len = 1024
|
||||||
|
max_execution_time = 86400
|
||||||
|
max_file_uploads = 20
|
||||||
|
max_input_time = 86400
|
||||||
|
memory_limit = 512M
|
||||||
|
output_buffering = Off
|
||||||
|
post_max_size = 30G
|
||||||
|
precision = 14
|
||||||
|
register_argc_argv = Off
|
||||||
|
report_memleaks = On
|
||||||
|
request_order = "GP"
|
||||||
|
serialize_precision = -1
|
||||||
|
short_open_tag = Off
|
||||||
|
unserialize_callback_func =
|
||||||
|
upload_max_filesize = 30G
|
||||||
|
user_dir =
|
||||||
|
variables_order = "GPCS"
|
||||||
|
zend.enable_gc = On
|
||||||
|
zend.exception_ignore_args = On
|
||||||
|
zlib.output_compression = Off
|
||||||
|
<% if @customer == "kau" -%>
|
||||||
|
upload_tmp_dir = /opt/tmp/
|
||||||
|
<% end %>
|
||||||
|
|
||||||
|
[CLI Server]
|
||||||
|
cli_server.color = On
|
||||||
|
[Date]
|
||||||
|
; Nothing here
|
||||||
|
[filter]
|
||||||
|
; Nothing here
|
||||||
|
[iconv]
|
||||||
|
; Nothing here
|
||||||
|
[imap]
|
||||||
|
; Nothing here
|
||||||
|
[intl]
|
||||||
|
; Nothing here
|
||||||
|
[sqlite3]
|
||||||
|
; Nothing here
|
||||||
|
[Pcre]
|
||||||
|
; Nothing here
|
||||||
|
[Pdo]
|
||||||
|
; Nothing here
|
||||||
|
[Pdo_mysql]
|
||||||
|
pdo_mysql.default_socket=
|
||||||
|
[Phar]
|
||||||
|
; Nothing here
|
||||||
|
[mail function]
|
||||||
|
SMTP = localhost
|
||||||
|
smtp_port = 25
|
||||||
|
mail.add_x_header = Off
|
||||||
|
[ODBC]
|
||||||
|
odbc.allow_persistent = On
|
||||||
|
odbc.check_persistent = On
|
||||||
|
odbc.max_persistent = -1
|
||||||
|
odbc.max_links = -1
|
||||||
|
odbc.defaultlrl = 4096
|
||||||
|
odbc.defaultbinmode = 1
|
||||||
|
[MySQLi]
|
||||||
|
mysqli.max_persistent = -1
|
||||||
|
mysqli.allow_persistent = On
|
||||||
|
mysqli.max_links = -1
|
||||||
|
mysqli.default_port = 3306
|
||||||
|
mysqli.default_socket =
|
||||||
|
mysqli.default_host =
|
||||||
|
mysqli.default_user =
|
||||||
|
mysqli.default_pw =
|
||||||
|
mysqli.reconnect = Off
|
||||||
|
[mysqlnd]
|
||||||
|
mysqlnd.collect_statistics = On
|
||||||
|
mysqlnd.collect_memory_statistics = Off
|
||||||
|
[OCI8]
|
||||||
|
; Nothing here
|
||||||
|
[PostgreSQL]
|
||||||
|
pgsql.allow_persistent = On
|
||||||
|
pgsql.auto_reset_persistent = Off
|
||||||
|
pgsql.max_persistent = -1
|
||||||
|
pgsql.max_links = -1
|
||||||
|
pgsql.ignore_notice = 0
|
||||||
|
pgsql.log_notice = 0
|
||||||
|
[bcmath]
|
||||||
|
bcmath.scale = 0
|
||||||
|
[browscap]
|
||||||
|
; Nothing here
|
||||||
|
[Session]
|
||||||
|
session.save_handler = files
|
||||||
|
session.use_strict_mode = 0
|
||||||
|
session.use_cookies = 1
|
||||||
|
session.use_only_cookies = 1
|
||||||
|
session.name = PHPSESSID
|
||||||
|
session.auto_start = 0
|
||||||
|
session.cookie_lifetime = 0
|
||||||
|
session.cookie_path = /
|
||||||
|
session.cookie_domain =
|
||||||
|
session.cookie_httponly =
|
||||||
|
session.cookie_samesite =
|
||||||
|
session.serialize_handler = php
|
||||||
|
session.gc_probability = 0
|
||||||
|
session.gc_divisor = 1000
|
||||||
|
session.gc_maxlifetime = 1440
|
||||||
|
session.referer_check =
|
||||||
|
session.cache_limiter = nocache
|
||||||
|
session.cache_expire = 180
|
||||||
|
session.use_trans_sid = 0
|
||||||
|
session.sid_length = 26
|
||||||
|
session.trans_sid_tags = "a=href,area=href,frame=src,form="
|
||||||
|
session.sid_bits_per_character = 5
|
||||||
|
[Assertion]
|
||||||
|
zend.assertions = -1
|
||||||
|
[COM]
|
||||||
|
; Nothing here
|
||||||
|
[mbstring]
|
||||||
|
; Nothing here
|
||||||
|
[gd]
|
||||||
|
; Nothing here
|
||||||
|
[exif]
|
||||||
|
; Nothing here
|
||||||
|
[Tidy]
|
||||||
|
tidy.clean_output = Off
|
||||||
|
[soap]
|
||||||
|
soap.wsdl_cache_enabled=1
|
||||||
|
soap.wsdl_cache_dir="/tmp"
|
||||||
|
soap.wsdl_cache_ttl=86400
|
||||||
|
soap.wsdl_cache_limit = 5
|
||||||
|
[sysvshm]
|
||||||
|
; Nothing here
|
||||||
|
[ldap]
|
||||||
|
ldap.max_links = -1
|
||||||
|
[dba]
|
||||||
|
; Nothing here
|
||||||
|
[opcache]
|
||||||
|
; Nothing here
|
||||||
|
[curl]
|
||||||
|
; Nothing here
|
||||||
|
[openssl]
|
||||||
|
; Nothing here
|
||||||
|
[ffi]
|
||||||
|
; Nothing here
|
2
templates/application/apcu.ini.erb
Normal file
2
templates/application/apcu.ini.erb
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
extension=apcu.so
|
||||||
|
apc.enable_cli=1
|
156
templates/application/cli.php.ini.erb
Normal file
156
templates/application/cli.php.ini.erb
Normal file
|
@ -0,0 +1,156 @@
|
||||||
|
[PHP]
|
||||||
|
allow_url_fopen = On
|
||||||
|
allow_url_include = Off
|
||||||
|
auto_append_file =
|
||||||
|
auto_globals_jit = On
|
||||||
|
auto_prepend_file =
|
||||||
|
default_charset = "UTF-8"
|
||||||
|
default_mimetype = "text/html"
|
||||||
|
default_socket_timeout = 60
|
||||||
|
disable_classes =
|
||||||
|
disable_functions =
|
||||||
|
display_errors = Off
|
||||||
|
display_startup_errors = Off
|
||||||
|
doc_root =
|
||||||
|
enable_dl = Off
|
||||||
|
engine = On
|
||||||
|
error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT
|
||||||
|
expose_php = On
|
||||||
|
file_uploads = On
|
||||||
|
ignore_repeated_errors = Off
|
||||||
|
ignore_repeated_source = Off
|
||||||
|
implicit_flush = Off
|
||||||
|
log_errors = On
|
||||||
|
log_errors_max_len = 1024
|
||||||
|
max_execution_time = 86400
|
||||||
|
max_file_uploads = 20
|
||||||
|
max_input_time = 86400
|
||||||
|
memory_limit = -1
|
||||||
|
output_buffering = Off
|
||||||
|
post_max_size = 16G
|
||||||
|
precision = 14
|
||||||
|
register_argc_argv = Off
|
||||||
|
report_memleaks = On
|
||||||
|
request_order = "GP"
|
||||||
|
serialize_precision = -1
|
||||||
|
short_open_tag = Off
|
||||||
|
unserialize_callback_func =
|
||||||
|
upload_max_filesize = 16G
|
||||||
|
user_dir =
|
||||||
|
variables_order = "GPCS"
|
||||||
|
zend.enable_gc = On
|
||||||
|
zend.exception_ignore_args = On
|
||||||
|
zlib.output_compression = Off
|
||||||
|
[CLI Server]
|
||||||
|
cli_server.color = On
|
||||||
|
[Date]
|
||||||
|
; Nothing here
|
||||||
|
[filter]
|
||||||
|
; Nothing here
|
||||||
|
[iconv]
|
||||||
|
; Nothing here
|
||||||
|
[imap]
|
||||||
|
; Nothing here
|
||||||
|
[intl]
|
||||||
|
; Nothing here
|
||||||
|
[sqlite3]
|
||||||
|
; Nothing here
|
||||||
|
[Pcre]
|
||||||
|
; Nothing here
|
||||||
|
[Pdo]
|
||||||
|
; Nothing here
|
||||||
|
[Pdo_mysql]
|
||||||
|
pdo_mysql.default_socket=
|
||||||
|
[Phar]
|
||||||
|
; Nothing here
|
||||||
|
[mail function]
|
||||||
|
SMTP = localhost
|
||||||
|
smtp_port = 25
|
||||||
|
mail.add_x_header = Off
|
||||||
|
[ODBC]
|
||||||
|
odbc.allow_persistent = On
|
||||||
|
odbc.check_persistent = On
|
||||||
|
odbc.max_persistent = -1
|
||||||
|
odbc.max_links = -1
|
||||||
|
odbc.defaultlrl = 4096
|
||||||
|
odbc.defaultbinmode = 1
|
||||||
|
[MySQLi]
|
||||||
|
mysqli.max_persistent = -1
|
||||||
|
mysqli.allow_persistent = On
|
||||||
|
mysqli.max_links = -1
|
||||||
|
mysqli.default_port = 3306
|
||||||
|
mysqli.default_socket =
|
||||||
|
mysqli.default_host =
|
||||||
|
mysqli.default_user =
|
||||||
|
mysqli.default_pw =
|
||||||
|
mysqli.reconnect = Off
|
||||||
|
[mysqlnd]
|
||||||
|
mysqlnd.collect_statistics = On
|
||||||
|
mysqlnd.collect_memory_statistics = Off
|
||||||
|
[OCI8]
|
||||||
|
; Nothing here
|
||||||
|
[PostgreSQL]
|
||||||
|
pgsql.allow_persistent = On
|
||||||
|
pgsql.auto_reset_persistent = Off
|
||||||
|
pgsql.max_persistent = -1
|
||||||
|
pgsql.max_links = -1
|
||||||
|
pgsql.ignore_notice = 0
|
||||||
|
pgsql.log_notice = 0
|
||||||
|
[bcmath]
|
||||||
|
bcmath.scale = 0
|
||||||
|
[browscap]
|
||||||
|
; Nothing here
|
||||||
|
[Session]
|
||||||
|
session.save_handler = files
|
||||||
|
session.use_strict_mode = 0
|
||||||
|
session.use_cookies = 1
|
||||||
|
session.use_only_cookies = 1
|
||||||
|
session.name = PHPSESSID
|
||||||
|
session.auto_start = 0
|
||||||
|
session.cookie_lifetime = 0
|
||||||
|
session.cookie_path = /
|
||||||
|
session.cookie_domain =
|
||||||
|
session.cookie_httponly =
|
||||||
|
session.cookie_samesite =
|
||||||
|
session.serialize_handler = php
|
||||||
|
session.gc_probability = 0
|
||||||
|
session.gc_divisor = 1000
|
||||||
|
session.gc_maxlifetime = 1440
|
||||||
|
session.referer_check =
|
||||||
|
session.cache_limiter = nocache
|
||||||
|
session.cache_expire = 180
|
||||||
|
session.use_trans_sid = 0
|
||||||
|
session.sid_length = 26
|
||||||
|
session.trans_sid_tags = "a=href,area=href,frame=src,form="
|
||||||
|
session.sid_bits_per_character = 5
|
||||||
|
[Assertion]
|
||||||
|
zend.assertions = -1
|
||||||
|
[COM]
|
||||||
|
; Nothing here
|
||||||
|
[mbstring]
|
||||||
|
; Nothing here
|
||||||
|
[gd]
|
||||||
|
; Nothing here
|
||||||
|
[exif]
|
||||||
|
; Nothing here
|
||||||
|
[Tidy]
|
||||||
|
tidy.clean_output = Off
|
||||||
|
[soap]
|
||||||
|
soap.wsdl_cache_enabled=1
|
||||||
|
soap.wsdl_cache_dir="/tmp"
|
||||||
|
soap.wsdl_cache_ttl=86400
|
||||||
|
soap.wsdl_cache_limit = 5
|
||||||
|
[sysvshm]
|
||||||
|
; Nothing here
|
||||||
|
[ldap]
|
||||||
|
ldap.max_links = -1
|
||||||
|
[dba]
|
||||||
|
; Nothing here
|
||||||
|
[opcache]
|
||||||
|
; Nothing here
|
||||||
|
[curl]
|
||||||
|
; Nothing here
|
||||||
|
[openssl]
|
||||||
|
; Nothing here
|
||||||
|
[ffi]
|
||||||
|
; Nothing here
|
61
templates/application/complete_reinstall.erb.sh
Normal file
61
templates/application/complete_reinstall.erb.sh
Normal file
|
@ -0,0 +1,61 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
config_php='/var/www/html/config/config.php'
|
||||||
|
dbhost="<%= @dbhost %>"
|
||||||
|
mysql_user_password="<%= @mysql_user_password %>"
|
||||||
|
admin_password="<%= @admin_password %>"
|
||||||
|
location="<%= @location %>"
|
||||||
|
bucket="<%= @s3_bucket %>"
|
||||||
|
|
||||||
|
echo "Input 'IKnowWhatIAmDoing' if you are sure you want to delete everything and reinstall Nextcloud: "
|
||||||
|
read -r user_input
|
||||||
|
|
||||||
|
if [[ "${user_input}" == "IKnowWhatIAmDoing" ]]; then
|
||||||
|
echo "WARNING: This will delete everything in the database and reinstall Nextcloud."
|
||||||
|
echo "You have 10 seconds to abort by hitting CTRL/C"
|
||||||
|
sleep 10s
|
||||||
|
echo "Ok, proceeding."
|
||||||
|
echo "Dropping database in 3 seconds"
|
||||||
|
sleep 3s
|
||||||
|
/usr/bin/mysql -e "drop database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}"
|
||||||
|
/usr/bin/mysql -e "create database nextcloud" -u nextcloud -p"${mysql_user_password}" -h "${dbhost}"
|
||||||
|
if [[ "x${location}" != "x" || "x${bucket}" != "x" ]]; then
|
||||||
|
bucket_content=$(/usr/bin/rclone ls "${location}":"${bucket}" --config /rclone.conf)
|
||||||
|
if [[ "x${bucket_content}" != "x" ]]; then
|
||||||
|
echo "Deleting all files in ${location}:${bucket} in 3 seconds"
|
||||||
|
sleep 3s
|
||||||
|
/usr/bin/rclone purge "${location}:${bucket}" --config /rclone.conf
|
||||||
|
/usr/bin/rclone mkdir "${location}:${bucket}" --config /rclone.conf
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
: >${config_php}
|
||||||
|
echo "Running maintenance:install"
|
||||||
|
su - www-data -s /bin/bash <<EOF
|
||||||
|
cd /var/www/html && php --define apc.enable_cli=1 ./occ maintenance:install \
|
||||||
|
--database "mysql" --database-name "nextcloud" --database-user "nextcloud" \
|
||||||
|
--database-pass "${mysql_user_password}" --database-host "${dbhost}" \
|
||||||
|
--admin-user "admin" --admin-pass "${admin_password}" --data-dir /var/www/html/data \
|
||||||
|
--no-interaction && php --define apc.enable_cli=1 ./occ files:recommendations:recommend admin && \
|
||||||
|
php --define apc.enable_cli=1 ./occ app:list && \
|
||||||
|
php --define apc.enable_cli=1 ./occ app:enable globalsiteselector && \
|
||||||
|
php --define apc.enable_cli=1 ./occ app:list && \
|
||||||
|
php --define apc.enable_cli=1 ./occ app:enable files_external && \
|
||||||
|
php --define apc.enable_cli=1 ./occ app:enable twofactor_totp && \
|
||||||
|
php --define apc.enable_cli=1 ./occ app:enable twofactor_u2f && \
|
||||||
|
php --define apc.enable_cli=1 ./occ app:enable admin_audit
|
||||||
|
EOF
|
||||||
|
/usr/bin/wget --no-check-certificate -q https://localhost/index.php -O /dev/null
|
||||||
|
instanceid=$(grep -E "^ 'instanceid'" ${config_php} | awk -F "'" '{print $4}')
|
||||||
|
secret=$(grep -E "^ 'secret'" ${config_php} | awk -F "'" '{print $4}')
|
||||||
|
passwordsalt=$(grep -E "^ 'passwordsalt'" ${config_php} | awk -F "'" '{print $4}')
|
||||||
|
echo "Please use edit-secrets to add these variables to all Nextcloud servers:"
|
||||||
|
echo "instanceid: DEC::PKCS7[${instanceid}]!"
|
||||||
|
echo "secret: DEC::PKCS7[${secret}]!"
|
||||||
|
echo "passwordsalt: DEC::PKCS7[${passwordsalt}]!"
|
||||||
|
|
||||||
|
echo "All done, please enjoy your new nextcloud setup"
|
||||||
|
else
|
||||||
|
echo "You did not input 'IKnowWhatIAmDoing', I am bailing out."
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 0
|
200
templates/application/config.php.erb
Normal file
200
templates/application/config.php.erb
Normal file
|
@ -0,0 +1,200 @@
|
||||||
|
<?php
|
||||||
|
$CONFIG = array (
|
||||||
|
'memcache.local' => '\\OC\\Memcache\\APCu',
|
||||||
|
'appstoreenabled' => false,
|
||||||
|
'apps_paths' =>
|
||||||
|
array (
|
||||||
|
0 =>
|
||||||
|
array (
|
||||||
|
'path' => '/var/www/html/apps',
|
||||||
|
'url' => '/apps',
|
||||||
|
'writable' => false,
|
||||||
|
),
|
||||||
|
1 =>
|
||||||
|
array (
|
||||||
|
'path' => '/var/www/html/custom_apps',
|
||||||
|
'url' => '/custom_apps',
|
||||||
|
'writable' => true,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
'memcache.distributed' => '\\OC\\Memcache\\Redis',
|
||||||
|
'memcache.locking' => '\\OC\\Memcache\\Redis',
|
||||||
|
<% if @location == 'sunet-test' -%>
|
||||||
|
'redis.cluster' => [
|
||||||
|
'seeds' => [
|
||||||
|
'redis1.drive.test.sunet.se:6379',
|
||||||
|
'redis2.drive.test.sunet.se:6379',
|
||||||
|
'redis3.drive.test.sunet.se:6379',
|
||||||
|
'redis1.drive.test.sunet.se:6380',
|
||||||
|
'redis2.drive.test.sunet.se:6380',
|
||||||
|
'redis3.drive.test.sunet.se:6380',
|
||||||
|
'redis1.drive.test.sunet.se:6381',
|
||||||
|
'redis2.drive.test.sunet.se:6381',
|
||||||
|
'redis3.drive.test.sunet.se:6381'
|
||||||
|
],
|
||||||
|
'timeout' => 1.1,
|
||||||
|
'read_timeout' => 0.0,
|
||||||
|
'password' => '<%= @redis_cluster_password %>',
|
||||||
|
'failover_mode' => \RedisCluster::FAILOVER_ERROR
|
||||||
|
],
|
||||||
|
<% elsif @environment == 'test' && ! @is_multinode && @location != 'gss-test' -%>
|
||||||
|
'redis.cluster' => [
|
||||||
|
'seeds' => [
|
||||||
|
'redis1.<%= @customer %>.drive.test.sunet.se:6379',
|
||||||
|
'redis2.<%= @customer %>.drive.test.sunet.se:6379',
|
||||||
|
'redis3.<%= @customer %>.drive.test.sunet.se:6379',
|
||||||
|
'redis1.<%= @customer %>.drive.test.sunet.se:6380',
|
||||||
|
'redis2.<%= @customer %>.drive.test.sunet.se:6380',
|
||||||
|
'redis3.<%= @customer %>.drive.test.sunet.se:6380',
|
||||||
|
'redis1.<%= @customer %>.drive.test.sunet.se:6381',
|
||||||
|
'redis2.<%= @customer %>.drive.test.sunet.se:6381',
|
||||||
|
'redis3.<%= @customer %>.drive.test.sunet.se:6381'
|
||||||
|
],
|
||||||
|
'timeout' => 1.1,
|
||||||
|
'read_timeout' => 0.0,
|
||||||
|
'password' => '<%= @redis_cluster_password %>',
|
||||||
|
'failover_mode' => \RedisCluster::FAILOVER_ERROR
|
||||||
|
],
|
||||||
|
<% elsif @environment == 'prod' && ! @is_multinode && @location != 'gss-prod' -%>
|
||||||
|
'redis.cluster' => [
|
||||||
|
'seeds' => [
|
||||||
|
'redis1.<%= @customer %>.drive.sunet.se:6379',
|
||||||
|
'redis2.<%= @customer %>.drive.sunet.se:6379',
|
||||||
|
'redis3.<%= @customer %>.drive.sunet.se:6379',
|
||||||
|
'redis1.<%= @customer %>.drive.sunet.se:6380',
|
||||||
|
'redis2.<%= @customer %>.drive.sunet.se:6380',
|
||||||
|
'redis3.<%= @customer %>.drive.sunet.se:6380',
|
||||||
|
'redis1.<%= @customer %>.drive.sunet.se:6381',
|
||||||
|
'redis2.<%= @customer %>.drive.sunet.se:6381',
|
||||||
|
'redis3.<%= @customer %>.drive.sunet.se:6381'
|
||||||
|
],
|
||||||
|
'timeout' => 1.1,
|
||||||
|
'read_timeout' => 0.0,
|
||||||
|
'password' => '<%= @redis_cluster_password %>',
|
||||||
|
'failover_mode' => \RedisCluster::FAILOVER_ERROR
|
||||||
|
],
|
||||||
|
<% else -%>
|
||||||
|
'redis' =>
|
||||||
|
array (
|
||||||
|
'host' => '<%= @redis_host %>',
|
||||||
|
'password' => '<%= @redis_host_password %>',
|
||||||
|
'port' => 6379,
|
||||||
|
),
|
||||||
|
<% end -%>
|
||||||
|
'forcessl' => true,
|
||||||
|
'overwriteprotocol' => 'https',
|
||||||
|
'objectstore' =>
|
||||||
|
array (
|
||||||
|
'class' => '\\OC\\Files\\ObjectStore\\S3',
|
||||||
|
'arguments' =>
|
||||||
|
array (
|
||||||
|
'bucket' => '<%= @s3_bucket %>',
|
||||||
|
'key' => '<%= @s3_key %>',
|
||||||
|
'secret' => '<%= @s3_secret %>',
|
||||||
|
'region' => 'us-east-1',
|
||||||
|
'hostname' => '<%= @s3_host %>',
|
||||||
|
'port' => '',
|
||||||
|
'objectPrefix' => 'urn:oid:',
|
||||||
|
'autocreate' => false,
|
||||||
|
'use_ssl' => true,
|
||||||
|
'use_path_style' => true,
|
||||||
|
'legacy_auth' => false,
|
||||||
|
),
|
||||||
|
),
|
||||||
|
'csrf.disabled' => true,
|
||||||
|
'passwordsalt' => '<%= @passwordsalt %>',
|
||||||
|
'secret' => '<%= @secret %>',
|
||||||
|
'trusted_domains' =>
|
||||||
|
array (
|
||||||
|
<%- index = 0 -%>
|
||||||
|
<%- @trusted_domains.each do |item| -%>
|
||||||
|
<%= index %> => '<%= item %>',<% index += 1 %>
|
||||||
|
<%- end -%>
|
||||||
|
),
|
||||||
|
<%- unless @trusted_proxies.empty? -%>
|
||||||
|
'trusted_proxies' =>
|
||||||
|
array (
|
||||||
|
<%- index = 0 -%>
|
||||||
|
<%- @trusted_proxies.each do |item| -%>
|
||||||
|
<%= index %> => '<%= item %>',
|
||||||
|
<%- index += 1 -%>
|
||||||
|
<%- end -%>
|
||||||
|
),
|
||||||
|
<%- end -%>
|
||||||
|
'config_is_read_only' => true,
|
||||||
|
'datadirectory' => '/var/www/html/data',
|
||||||
|
'dbhost' => '<%= @dbhost %>',
|
||||||
|
'dbname' => 'nextcloud',
|
||||||
|
'dbpassword' => '<%= @mysql_user_password %>',
|
||||||
|
'dbport' => '3306',
|
||||||
|
'dbtableprefix' => 'oc_',
|
||||||
|
'dbtype' => 'mysql',
|
||||||
|
'dbuser' => 'nextcloud',
|
||||||
|
'drive_email_template_text_left' => '<%= @drive_email_template_text_left %>',
|
||||||
|
'drive_email_template_plain_text_left' => '<%= @drive_email_template_plain_text_left %>',
|
||||||
|
'drive_email_template_url_left' => '<%= @drive_email_template_url_left %>',
|
||||||
|
'gs.enabled' => '<%= @gs_enabled %>',
|
||||||
|
'gs.federation' => '<%= @gs_federation %>',
|
||||||
|
'gs.trustedHosts' => ['*.sunet.se'],
|
||||||
|
'gss.discovery.manual.mapping.file' => '/var/www/html/mappingfile.json',
|
||||||
|
'gss.discovery.manual.mapping.parameter' => 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6',
|
||||||
|
'gss.discovery.manual.mapping.regex' => true,
|
||||||
|
'gss.jwt.key' => '<%= @gss_jwt_key %>',
|
||||||
|
'gss.master.admin' =>
|
||||||
|
array (
|
||||||
|
<%- index = 0 -%>
|
||||||
|
<%- @gss_master_admin.each do |item| -%>
|
||||||
|
<%= index %> => '<%= item %>',
|
||||||
|
<%- index += 1 -%>
|
||||||
|
<%- end -%>
|
||||||
|
),
|
||||||
|
'gss.master.url' => '<%= @gss_master_url %>',
|
||||||
|
'gss.mode' => '<%= @gss_mode %>',
|
||||||
|
'gss.user.discovery.module' => '\\OCA\\GlobalSiteSelector\\UserDiscoveryModules\\ManualUserMapping',
|
||||||
|
'installed' => true,
|
||||||
|
'instanceid' => '<%= @instanceid %>',
|
||||||
|
'log_type' => 'file',
|
||||||
|
'loglevel' => 0,
|
||||||
|
'lookup_server' => '<%= @lookup_server %>',
|
||||||
|
'mail_domain' => '<%= @mail_domain %>',
|
||||||
|
'mail_from_address' => '<%= @mail_from_address %>',
|
||||||
|
'mail_sendmailmode' => 'smtp',
|
||||||
|
'mail_smtpauth' => 1,
|
||||||
|
'mail_smtpauthtype' => 'LOGIN',
|
||||||
|
'mail_smtphost' => '<%= @mail_smtphost %>',
|
||||||
|
'mail_smtpmode' => 'smtp',
|
||||||
|
'mail_smtpname' => '<%= @smtpuser %>',
|
||||||
|
'mail_smtppassword' => '<%= @smtppassword %>',
|
||||||
|
'mail_smtpport' => '587',
|
||||||
|
'mail_smtpsecure' => 'tls',
|
||||||
|
'mail_template_class' => 'OCA\DriveEmailTemplate\EMailTemplate',
|
||||||
|
'mysql.utf8mb4' => true,
|
||||||
|
'overwritehost' => '<%= @site_name %>',
|
||||||
|
'overwrite.cli.url' => 'https://<%= @site_name %>',
|
||||||
|
'templatedirectory' => '',
|
||||||
|
'skeletondirectory' => '<%= @skeletondirectory %>',
|
||||||
|
'twofactor_enforced' => 'true',
|
||||||
|
'twofactor_enforced_groups' =>
|
||||||
|
array (
|
||||||
|
0 => 'admin',
|
||||||
|
<%- index = 1 -%>
|
||||||
|
<%- @twofactor_enforced_groups.each do |item| -%>
|
||||||
|
<%= index %> => '<%= item %>',
|
||||||
|
<%- index += 1 -%>
|
||||||
|
<%- end -%>
|
||||||
|
),
|
||||||
|
'twofactor_enforced_excluded_groups' =>
|
||||||
|
array (
|
||||||
|
<%- index = 0 -%>
|
||||||
|
<%- @twofactor_enforced_excluded_groups.each do |item| -%>
|
||||||
|
<%= index %> => '<%= item %>',
|
||||||
|
<%- index += 1 -%>
|
||||||
|
<%- end -%>
|
||||||
|
),
|
||||||
|
'version' => '<%= @nextcloud_version_string %>',
|
||||||
|
'app_install_overwrite' =>
|
||||||
|
array (
|
||||||
|
0 => 'globalsiteselector',
|
||||||
|
),
|
||||||
|
|
||||||
|
);
|
22
templates/application/cron.erb.sh
Executable file
22
templates/application/cron.erb.sh
Executable file
|
@ -0,0 +1,22 @@
|
||||||
|
#!/bin/bash
|
||||||
|
echo "$(date) - Start executing cron.sh"
|
||||||
|
|
||||||
|
# These are configurable with positional args
|
||||||
|
container=${1}
|
||||||
|
if [[ -z ${container} ]]; then
|
||||||
|
container=nextcloud_app_1
|
||||||
|
fi
|
||||||
|
|
||||||
|
lock="/tmp/cron-${container}.lock"
|
||||||
|
if [[ -f ${lock} ]]; then
|
||||||
|
echo "Lockfile exists, another instance of ${0} is running"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
touch ${lock}
|
||||||
|
fi
|
||||||
|
|
||||||
|
/usr/bin/docker exec -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/cron.php
|
||||||
|
|
||||||
|
echo "$(date) - Done executing cron.sh"
|
||||||
|
rm ${lock}
|
||||||
|
|
42
templates/application/docker-compose_nextcloud.yml.erb
Normal file
42
templates/application/docker-compose_nextcloud.yml.erb
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
version: '3.2'
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
app:
|
||||||
|
image: docker.sunet.se/drive/nextcloud-custom:<%= @nextcloud_version %>
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- /opt/nextcloud/000-default.conf:/etc/apache2/sites-enabled/000-default.conf
|
||||||
|
- /opt/nextcloud/mpm_prefork.conf:/etc/apache2/mods-available/mpm_prefork.conf
|
||||||
|
- /opt/nextcloud/404.html:/var/www/html/404.html
|
||||||
|
- /opt/nextcloud/apache.php.ini:/etc/php/8.0/apache2/php.ini
|
||||||
|
- /opt/nextcloud/apcu.ini:/etc/php/8.0/mods-available/apcu.ini
|
||||||
|
- /opt/nextcloud/cli.php.ini:/etc/php/8.0/cli/php.ini
|
||||||
|
- /opt/nextcloud/complete_reinstall.sh:/complete_reinstall.sh
|
||||||
|
- /opt/nextcloud/config.php:/var/www/html/config/config.php
|
||||||
|
- /opt/nextcloud/nextcloud.log:/var/www/html/data/nextcloud.log
|
||||||
|
- /opt/nextcloud/rclone.conf:/rclone.conf
|
||||||
|
<%- if @skeletondirectory -%>
|
||||||
|
- /opt/nextcloud/skeleton:<%= @skeletondirectory %>
|
||||||
|
<%- end -%>
|
||||||
|
<%- if @location =~ /^gss/ -%>
|
||||||
|
- /opt/nextcloud/mappingfile.json:/var/www/html/mappingfile.json
|
||||||
|
<%- end -%>
|
||||||
|
<% if @location =~ /^kau/ -%>
|
||||||
|
- /mnt:/opt/tmp/
|
||||||
|
<%- end -%>
|
||||||
|
networks:
|
||||||
|
- default
|
||||||
|
- proxysql_proxysql
|
||||||
|
dns:
|
||||||
|
- 89.46.20.75
|
||||||
|
- 89.46.21.29
|
||||||
|
- 89.32.32.32
|
||||||
|
ports:
|
||||||
|
- 443:443
|
||||||
|
command: apachectl -D FOREGROUND
|
||||||
|
tty: true
|
||||||
|
|
||||||
|
networks:
|
||||||
|
proxysql_proxysql:
|
||||||
|
external: true
|
2
templates/application/get_no_mysql_servers.erb.sh
Normal file
2
templates/application/get_no_mysql_servers.erb.sh
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
#!/bin/bash
|
||||||
|
docker exec -ti proxysql_proxysql_1 mysql -NB -e "select count(*) FROM main.runtime_mysql_servers where hostgroup_id = 10" | tr -d '\r'
|
57
templates/application/mappingfile-prod.json.erb
Normal file
57
templates/application/mappingfile-prod.json.erb
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
{
|
||||||
|
"/antagning.se$/": "antagning.drive.sunet.se",
|
||||||
|
"/bth.se$/": "bth.drive.sunet.se",
|
||||||
|
"/chalmers.se$/": "chalmers.drive.sunet.se",
|
||||||
|
"/du.se$/": "du.drive.sunet.se",
|
||||||
|
"/eduid.se$/": "extern.drive.sunet.se",
|
||||||
|
"/esh.se$/": "esh.drive.sunet.se",
|
||||||
|
"/fhs.se$/": "fhs.drive.sunet.se",
|
||||||
|
"/gih.se$/": "gih.drive.sunet.se",
|
||||||
|
"/gu.se$/": "gu.drive.sunet.se",
|
||||||
|
"/hb.se$/": "hb.drive.sunet.se",
|
||||||
|
"/shh.se$/": "shh.drive.sunet.se",
|
||||||
|
"/hh.se$/": "hh.drive.sunet.se",
|
||||||
|
"/hhs.se$/": "hhs.drive.sunet.se",
|
||||||
|
"/hig.se$/": "hig.drive.sunet.se",
|
||||||
|
"/his.se$/": "his.drive.sunet.se",
|
||||||
|
"/hj.se$/": "hj.drive.sunet.se",
|
||||||
|
"/hkr.se$/": "hkr.drive.sunet.se",
|
||||||
|
"/hv.se$/": "hv.drive.sunet.se",
|
||||||
|
"/irf.se$/": "irf.drive.sunet.se",
|
||||||
|
"/kb.se$/": "kb.drive.sunet.se",
|
||||||
|
"/ki.se$/": "ki.drive.sunet.se",
|
||||||
|
"/kkh.se$/": "kkh.drive.sunet.se",
|
||||||
|
"/kmh.se$/": "kmh.drive.sunet.se",
|
||||||
|
"/konstfack.se$/": "konstfack.drive.sunet.se",
|
||||||
|
"/kth.se$/": "kth.drive.sunet.se",
|
||||||
|
"/kva.se$/": "kva.drive.sunet.se",
|
||||||
|
"/liu.se$/": "liu.drive.sunet.se",
|
||||||
|
"/lnu.se$/": "lnu.drive.sunet.se",
|
||||||
|
"/ltu.se$/": "ltu.drive.sunet.se",
|
||||||
|
"/lu.se$/": "lu.drive.sunet.se",
|
||||||
|
"/mah.se$/": "mau.drive.sunet.se",
|
||||||
|
"/mau.se$/": "mau.drive.sunet.se",
|
||||||
|
"/mdh.se$/": "mdu.drive.sunet.se",
|
||||||
|
"/mdu.se$/": "mdu.drive.sunet.se",
|
||||||
|
"/miun.se$/": "miun.drive.sunet.se",
|
||||||
|
"/nordunet.se$/": "nordunet.drive.sunet.se",
|
||||||
|
"/nrm.se$/": "nrm.drive.sunet.se",
|
||||||
|
"/oru.se$/": "oru.drive.sunet.se",
|
||||||
|
"/rkh.se$/": "rkh.drive.sunet.se",
|
||||||
|
"/sics.se$/": "sics.drive.sunet.se",
|
||||||
|
"/slu.se$/": "slu.drive.sunet.se",
|
||||||
|
"/smhi.se$/": "smhi.drive.sunet.se",
|
||||||
|
"/sp.se$/": "sp.drive.sunet.se",
|
||||||
|
"/su.se$/": "su.drive.sunet.se",
|
||||||
|
"/sunet.se$/": "sunet.drive.sunet.se",
|
||||||
|
"/suni.se$/": "suni.drive.sunet.se",
|
||||||
|
"/swamid.se$/": "swamid.drive.sunet.se",
|
||||||
|
"/ths.se$/": "ths.drive.sunet.se",
|
||||||
|
"/uhr.se$/": "uhr.drive.sunet.se",
|
||||||
|
"/umu.se$/": "umu.drive.sunet.se",
|
||||||
|
"/uniarts.se$/": "uniarts.drive.sunet.se",
|
||||||
|
"/uu.se$/": "uu.drive.sunet.se",
|
||||||
|
"/vinnova.se$/": "vinnova.drive.sunet.se",
|
||||||
|
"/vr.se$/": "vr.drive.sunet.se",
|
||||||
|
"/$/": "extern.drive.sunet.se"
|
||||||
|
}
|
57
templates/application/mappingfile-test.json.erb
Normal file
57
templates/application/mappingfile-test.json.erb
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
{
|
||||||
|
"/antagning.se$/": "antagning.drive.test.sunet.se",
|
||||||
|
"/bth.se$/": "bth.drive.test.sunet.se",
|
||||||
|
"/chalmers.se$/": "chalmers.drive.test.sunet.se",
|
||||||
|
"/du.se$/": "du.drive.test.sunet.se",
|
||||||
|
"/eduid.se$/": "extern.drive.test.sunet.se",
|
||||||
|
"/esh.se$/": "esh.drive.test.sunet.se",
|
||||||
|
"/fhs.se$/": "fhs.drive.test.sunet.se",
|
||||||
|
"/gih.se$/": "gih.drive.test.sunet.se",
|
||||||
|
"/gu.se$/": "gu.drive.test.sunet.se",
|
||||||
|
"/hb.se$/": "hb.drive.test.sunet.se",
|
||||||
|
"/shh.se$/": "shh.drive.test.sunet.se",
|
||||||
|
"/hh.se$/": "hh.drive.test.sunet.se",
|
||||||
|
"/hhs.se$/": "hhs.drive.test.sunet.se",
|
||||||
|
"/hig.se$/": "hig.drive.test.sunet.se",
|
||||||
|
"/his.se$/": "his.drive.test.sunet.se",
|
||||||
|
"/hj.se$/": "hj.drive.test.sunet.se",
|
||||||
|
"/hkr.se$/": "hkr.drive.test.sunet.se",
|
||||||
|
"/hv.se$/": "hv.drive.test.sunet.se",
|
||||||
|
"/irf.se$/": "irf.drive.test.sunet.se",
|
||||||
|
"/kb.se$/": "kb.drive.test.sunet.se",
|
||||||
|
"/ki.se$/": "ki.drive.test.sunet.se",
|
||||||
|
"/kkh.se$/": "kkh.drive.test.sunet.se",
|
||||||
|
"/kmh.se$/": "kmh.drive.test.sunet.se",
|
||||||
|
"/konstfack.se$/": "konstfack.drive.test.sunet.se",
|
||||||
|
"/kth.se$/": "kth.drive.test.sunet.se",
|
||||||
|
"/kva.se$/": "kva.drive.test.sunet.se",
|
||||||
|
"/liu.se$/": "liu.drive.test.sunet.se",
|
||||||
|
"/lnu.se$/": "lnu.drive.test.sunet.se",
|
||||||
|
"/ltu.se$/": "ltu.drive.test.sunet.se",
|
||||||
|
"/lu.se$/": "lu.drive.test.sunet.se",
|
||||||
|
"/mah.se$/": "mau.drive.test.sunet.se",
|
||||||
|
"/mau.se$/": "mau.drive.test.sunet.se",
|
||||||
|
"/mdh.se$/": "mdu.drive.test.sunet.se",
|
||||||
|
"/mdu.se$/": "mdu.drive.test.sunet.se",
|
||||||
|
"/miun.se$/": "miun.drive.test.sunet.se",
|
||||||
|
"/nordunet.se$/": "nordunet.drive.test.sunet.se",
|
||||||
|
"/nrm.se$/": "nrm.drive.test.sunet.se",
|
||||||
|
"/oru.se$/": "oru.drive.test.sunet.se",
|
||||||
|
"/rkh.se$/": "rkh.drive.test.sunet.se",
|
||||||
|
"/sics.se$/": "sics.drive.test.sunet.se",
|
||||||
|
"/slu.se$/": "slu.drive.test.sunet.se",
|
||||||
|
"/smhi.se$/": "smhi.drive.test.sunet.se",
|
||||||
|
"/sp.se$/": "sp.drive.test.sunet.se",
|
||||||
|
"/su.se$/": "su.drive.test.sunet.se",
|
||||||
|
"/sunet.se$/": "sunet.drive.test.sunet.se",
|
||||||
|
"/suni.se$/": "suni.drive.test.sunet.se",
|
||||||
|
"/swamid.se$/": "swamid.drive.test.sunet.se",
|
||||||
|
"/ths.se$/": "ths.drive.test.sunet.se",
|
||||||
|
"/uhr.se$/": "uhr.drive.test.sunet.se",
|
||||||
|
"/umu.se$/": "umu.drive.test.sunet.se",
|
||||||
|
"/uniarts.se$/": "uniarts.drive.test.sunet.se",
|
||||||
|
"/uu.se$/": "uu.drive.test.sunet.se",
|
||||||
|
"/vinnova.se$/": "vinnova.drive.test.sunet.se",
|
||||||
|
"/vr.se$/": "vr.drive.test.sunet.se",
|
||||||
|
"/$/": "extern.drive.test.sunet.se"
|
||||||
|
}
|
34
templates/application/migrate_external_mounts.erb
Normal file
34
templates/application/migrate_external_mounts.erb
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
keymapping=${1}
|
||||||
|
|
||||||
|
if ! [[ -f ${keymapping} ]]; then
|
||||||
|
echo "We need a valid keymapping file to proceed"
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
function get_secrets {
|
||||||
|
# Expects a space separated file with oldkey newkey newsecret
|
||||||
|
grep ${1} ${keymapping} | awk '{print $2, $3}'
|
||||||
|
}
|
||||||
|
|
||||||
|
for line in $(docker exec -u www-data nextcloud_app_1 /var/www/html/occ files_external:list --all --output json | jq -r '.[] | "\(.mount_id);\(.configuration.key)"'); do
|
||||||
|
id=$(echo ${line} | awk -F ';' '{print $1}')
|
||||||
|
key=$(echo ${line} | awk -F ';' '{print $2}')
|
||||||
|
if [[ "x${key}" == "x" ]] || [[ "${id}" == "x" ]]; then
|
||||||
|
echo "Old key or mount id is empty, bailing out."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
secrets="$(get_secrets ${key})"
|
||||||
|
newkey="$(echo ${secrets} | awk '{print $1}')"
|
||||||
|
secret="$(echo ${secrets} | awk '{print $2}')"
|
||||||
|
if [[ "x${newkey}" == "x" ]] || [[ "x${secret}" == "x" ]]; then
|
||||||
|
echo "New key or secret is empty, skipping mount id ${id}."
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker exec -u www-data nextcloud_app_1 /var/www/html/occ files_external:config ${id} region us-east-1
|
||||||
|
docker exec -u www-data nextcloud_app_1 /var/www/html/occ files_external:config ${id} hostname s3.sto4.safedc.net
|
||||||
|
docker exec -u www-data nextcloud_app_1 /var/www/html/occ files_external:config ${id} key ${newkey}
|
||||||
|
docker exec -u www-data nextcloud_app_1 /var/www/html/occ files_external:config ${id} secret ${secret}
|
||||||
|
done
|
17
templates/application/mpm_prefork.conf.erb
Normal file
17
templates/application/mpm_prefork.conf.erb
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
# prefork MPM
|
||||||
|
# StartServers: number of server processes to start
|
||||||
|
# MinSpareServers: minimum number of server processes which are kept spare
|
||||||
|
# MaxSpareServers: maximum number of server processes which are kept spare
|
||||||
|
# MaxRequestWorkers: maximum number of server processes allowed to start
|
||||||
|
# MaxConnectionsPerChild: maximum number of requests a server process serves
|
||||||
|
|
||||||
|
<IfModule mpm_prefork_module>
|
||||||
|
StartServers 5
|
||||||
|
MinSpareServers 5
|
||||||
|
MaxSpareServers 10
|
||||||
|
MaxRequestWorkers 2048
|
||||||
|
MaxConnectionsPerChild 0
|
||||||
|
ServerLimit 2048
|
||||||
|
</IfModule>
|
||||||
|
|
||||||
|
# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
|
17
templates/application/occ.erb
Executable file
17
templates/application/occ.erb
Executable file
|
@ -0,0 +1,17 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [[ "${1}" =~ ^nextcloud ]]; then
|
||||||
|
container=${1}
|
||||||
|
shift
|
||||||
|
else
|
||||||
|
container="nextcloud_app_1"
|
||||||
|
fi
|
||||||
|
|
||||||
|
oc_list=$(env| grep 'OC_')
|
||||||
|
if [[ "x${oc_list}" != "x" ]]; then
|
||||||
|
for row in $(echo "${oc_list}"); do
|
||||||
|
MY_VARS="${MY_VARS} -e ${row}"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker exec -ti ${MY_VARS} -u www-data ${container} php --define apc.enable_cli=1 /var/www/html/occ "$@"
|
8
templates/application/rclone.conf.erb
Normal file
8
templates/application/rclone.conf.erb
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
[<%= @location %>]
|
||||||
|
type = s3
|
||||||
|
provider = Ceph
|
||||||
|
access_key_id = <%= @s3_key %>
|
||||||
|
secret_access_key = <%= @s3_secret %>
|
||||||
|
endpoint = <%= @s3_host %>
|
||||||
|
acl = private
|
||||||
|
|
3
templates/application/redis-cli.erb
Normal file
3
templates/application/redis-cli.erb
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
docker exec -ti redis_redis-server_1 redis-cli -a <%= @redis_host_password %> ${@}
|
26
templates/application/upgrade23-25.erb.sh
Normal file
26
templates/application/upgrade23-25.erb.sh
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
#!/bin/bash
|
||||||
|
touch /etc/no-automatic-cosmos
|
||||||
|
for version in 24.0.9.3-1 25.0.3.3-4; do
|
||||||
|
docker pull docker.sunet.se/drive/nextcloud-custom:${version}
|
||||||
|
done
|
||||||
|
|
||||||
|
function block_for_container {
|
||||||
|
while ! [[ "$( docker container inspect -f '{{.State.Running}}' "${1}" )" == "true" ]]; do
|
||||||
|
echo "Waiting for ${1}"
|
||||||
|
sleep 1s
|
||||||
|
done
|
||||||
|
}
|
||||||
|
container="nextcloud_app_1"
|
||||||
|
sed -i "s/ 'version' => '.*',/ 'version' => '24.0.9.3',/" "/opt/nextcloud/config.php"
|
||||||
|
sed -i "s/ 'config_is_read_only' => true,/ 'config_is_read_only' => false,/" "/opt/nextcloud/config.php"
|
||||||
|
sed -i 's#docker.sunet.se/drive/nextcloud-custom:.*#docker.sunet.se/drive/nextcloud-custom:24.0.9.3-1#' "/opt/nextcloud/docker-compose.yml"
|
||||||
|
systemctl restart "sunet-nextcloud"
|
||||||
|
sleep 10s
|
||||||
|
block_for_container ${container}
|
||||||
|
occ upgrade && occ db:add-missing-columns && occ db:add-missing-indices && occ db:add-missing-primary-keys
|
||||||
|
sed -i 's#docker.sunet.se/drive/nextcloud-custom:.*#docker.sunet.se/drive/nextcloud-custom:25.0.3.3-4#' "/opt/nextcloud/docker-compose.yml"
|
||||||
|
systemctl restart "sunet-nextcloud"
|
||||||
|
sleep 10s
|
||||||
|
block_for_container ${container}
|
||||||
|
occ upgrade && occ db:add-missing-columns && occ db:add-missing-indices && occ db:add-missing-primary-keys && occ maintenance:repair
|
||||||
|
rm /etc/no-automatic-cosmos
|
3
templates/application/user-sync.erb.sh
Normal file
3
templates/application/user-sync.erb.sh
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
#!/bin/bash
|
||||||
|
/usr/local/bin/occ globalsiteselector:users:update
|
||||||
|
/usr/local/bin/occ federation:sync-addressbooks
|
29
templates/cache/docker-compose_cache.yml.erb
vendored
Normal file
29
templates/cache/docker-compose_cache.yml.erb
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
version: '3.2'
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
redis-server:
|
||||||
|
image: redis:alpine
|
||||||
|
dns:
|
||||||
|
- 89.46.20.75
|
||||||
|
- 89.46.21.29
|
||||||
|
- 89.32.32.32
|
||||||
|
ports:
|
||||||
|
- "6379:6379"
|
||||||
|
volumes:
|
||||||
|
- /opt/redis/server:/data
|
||||||
|
command: redis-server /data/server.conf --loglevel verbose
|
||||||
|
restart: always
|
||||||
|
|
||||||
|
redis-sentinel:
|
||||||
|
image: redis:alpine
|
||||||
|
dns:
|
||||||
|
- 89.46.20.75
|
||||||
|
- 89.46.21.29
|
||||||
|
- 89.32.32.32
|
||||||
|
ports:
|
||||||
|
- "26379:26379"
|
||||||
|
volumes:
|
||||||
|
- /opt/redis/sentinel:/data
|
||||||
|
command: redis-server /data/sentinel.conf --sentinel
|
||||||
|
restart: always
|
20
templates/cache/sentinel.conf.erb
vendored
Normal file
20
templates/cache/sentinel.conf.erb
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
port 26379
|
||||||
|
daemonize no
|
||||||
|
logfile sentinel.log
|
||||||
|
sentinel deny-scripts-reconfig yes
|
||||||
|
sentinel monitor <%= @leader_name %> <%= @leader_address %> 6379 2
|
||||||
|
<% if @location == 'sunet-prod' %>
|
||||||
|
sentinel down-after-milliseconds <%= @leader_name %> 10000
|
||||||
|
sentinel failover-timeout <%= @leader_name %> 60000
|
||||||
|
<% else %>
|
||||||
|
sentinel down-after-milliseconds <%= @leader_name %> 1000
|
||||||
|
sentinel failover-timeout <%= @leader_name %> 1000
|
||||||
|
<% end %>
|
||||||
|
sentinel auth-pass <%= @leader_name %> <%= @redis_host_password %>
|
||||||
|
sentinel config-epoch <%= @leader_name %> 1
|
||||||
|
sentinel leader-epoch <%= @leader_name %> 1
|
||||||
|
protected-mode yes
|
||||||
|
requirepass "<%= @redis_host_password %>"
|
||||||
|
sentinel current-epoch 1
|
||||||
|
sentinel announce-ip <%= @announce_address %>
|
||||||
|
sentinel announce-port 26379
|
65
templates/cache/server.conf.erb
vendored
Normal file
65
templates/cache/server.conf.erb
vendored
Normal file
|
@ -0,0 +1,65 @@
|
||||||
|
protected-mode yes
|
||||||
|
port 6379
|
||||||
|
tcp-backlog 511
|
||||||
|
timeout 0
|
||||||
|
tcp-keepalive 300
|
||||||
|
daemonize no
|
||||||
|
logfile server.log
|
||||||
|
databases 16
|
||||||
|
always-show-logo yes
|
||||||
|
save 900 1
|
||||||
|
save 300 10
|
||||||
|
save 60 10000
|
||||||
|
stop-writes-on-bgsave-error yes
|
||||||
|
<% if @location == 'sunet-prod' %>
|
||||||
|
rdbcompression no
|
||||||
|
appendonly yes
|
||||||
|
<% else %>
|
||||||
|
rdbcompression yes
|
||||||
|
appendonly no
|
||||||
|
<% end %>
|
||||||
|
rdbchecksum yes
|
||||||
|
dbfilename "dump.rdb"
|
||||||
|
masterauth "<%= @redis_host_password %>"
|
||||||
|
replica-serve-stale-data yes
|
||||||
|
replica-read-only yes
|
||||||
|
repl-diskless-sync no
|
||||||
|
repl-diskless-sync-delay 5
|
||||||
|
repl-disable-tcp-nodelay no
|
||||||
|
replica-priority 100
|
||||||
|
requirepass "<%= @redis_host_password %>"
|
||||||
|
lazyfree-lazy-eviction no
|
||||||
|
lazyfree-lazy-expire no
|
||||||
|
lazyfree-lazy-server-del no
|
||||||
|
replica-lazy-flush no
|
||||||
|
appendfilename "appendonly.aof"
|
||||||
|
appendfsync everysec
|
||||||
|
no-appendfsync-on-rewrite no
|
||||||
|
auto-aof-rewrite-percentage 100
|
||||||
|
auto-aof-rewrite-min-size 64mb
|
||||||
|
aof-load-truncated yes
|
||||||
|
aof-use-rdb-preamble yes
|
||||||
|
lua-time-limit 5000
|
||||||
|
slowlog-log-slower-than 10000
|
||||||
|
slowlog-max-len 128
|
||||||
|
latency-monitor-threshold 0
|
||||||
|
notify-keyspace-events ""
|
||||||
|
hash-max-ziplist-entries 512
|
||||||
|
hash-max-ziplist-value 64
|
||||||
|
list-max-ziplist-size -2
|
||||||
|
list-compress-depth 0
|
||||||
|
set-max-intset-entries 512
|
||||||
|
zset-max-ziplist-entries 128
|
||||||
|
zset-max-ziplist-value 64
|
||||||
|
hll-sparse-max-bytes 3000
|
||||||
|
stream-node-max-bytes 4096
|
||||||
|
stream-node-max-entries 100
|
||||||
|
activerehashing yes
|
||||||
|
client-output-buffer-limit normal 0 0 0
|
||||||
|
client-output-buffer-limit replica 256mb 64mb 60
|
||||||
|
client-output-buffer-limit pubsub 32mb 8mb 60
|
||||||
|
hz 10
|
||||||
|
dynamic-hz yes
|
||||||
|
aof-rewrite-incremental-fsync yes
|
||||||
|
rdb-save-incremental-fsync yes
|
||||||
|
<%= @replica_of %>
|
253
templates/document/coolwsd.xml.erb
Normal file
253
templates/document/coolwsd.xml.erb
Normal file
|
@ -0,0 +1,253 @@
|
||||||
|
<?xml version="1.0"?>
|
||||||
|
<!-- -*- nxml-child-indent: 4; tab-width: 4; indent-tabs-mode: nil -*- -->
|
||||||
|
<config>
|
||||||
|
|
||||||
|
<!-- For more detailed documentation on typical configuration options please see:
|
||||||
|
https://sdk.collaboraonline.com/docs/installation/Configuration.html -->
|
||||||
|
|
||||||
|
<!-- Note: 'default' attributes are used to document a setting's default value as well as to use as fallback. -->
|
||||||
|
<!-- Note: When adding a new entry, a default must be set in WSD in case the entry is missing upon deployment. -->
|
||||||
|
|
||||||
|
<allowed_languages desc="List of supported languages of Writing Aids (spell checker, grammar checker, thesaurus, hyphenation) on this instance. Allowing too many has negative effect on startup performance." default="de_DE en_GB en_US es_ES fr_FR it nl pt_BR pt_PT ru">sv_SE de_DE en_GB en_US es_ES fr_FR it nl pt_BR pt_PT ru</allowed_languages>
|
||||||
|
|
||||||
|
<sys_template_path desc="Path to a template tree with shared libraries etc to be used as source for chroot jails for child processes." type="path" relative="true" default="systemplate"/>
|
||||||
|
<child_root_path desc="Path to the directory under which the chroot jails for the child processes will be created. Should be on the same file system as systemplate and lotemplate. Must be an empty directory." type="path" relative="true" default="jails"/>
|
||||||
|
<mount_jail_tree desc="Controls whether the systemplate and lotemplate contents are mounted or not, which is much faster than the default of linking/copying each file." type="bool" default="true"/>
|
||||||
|
|
||||||
|
<server_name desc="External hostname:port of the server running coolwsd. If empty, it's derived from the request (please set it if this doesn't work). May be specified when behind a reverse-proxy or when the hostname is not reachable directly." type="string" default=""/>
|
||||||
|
<file_server_root_path desc="Path to the directory that should be considered root for the file server. This should be the directory containing cool." type="path" relative="true" default="browser/../"/>
|
||||||
|
<hexify_embedded_urls desc="Enable to protect encoded URLs from getting decoded by intermediate hops. Particularly useful on Azure deployments" type="bool" default="false"/>
|
||||||
|
<experimental_features desc="Enable/Disable experimental features" type="bool" default="false">false</experimental_features>
|
||||||
|
<memproportion desc="The maximum percentage of system memory consumed by all of the Collabora Online Development Edition, after which we start cleaning up idle documents" type="double" default="80.0"/>
|
||||||
|
<num_prespawn_children desc="Number of child processes to keep started in advance and waiting for new clients." type="uint" default="1">10</num_prespawn_children>
|
||||||
|
<per_document desc="Document-specific settings, including LO Core settings.">
|
||||||
|
<max_concurrency desc="The maximum number of threads to use while processing a document." type="uint" default="4">4</max_concurrency>
|
||||||
|
<batch_priority desc="A (lower) priority for use by batch eg. convert-to processes to avoid starving interactive ones" type="uint" default="5">5</batch_priority>
|
||||||
|
<document_signing_url desc="The endpoint URL of signing server, if empty the document signing is disabled" type="string" default=""/>
|
||||||
|
<redlining_as_comments desc="If true show red-lines as comments" type="bool" default="false">false</redlining_as_comments>
|
||||||
|
<pdf_resolution_dpi desc="The resolution, in DPI, used to render PDF documents as image. Memory consumption grows proportionally. Must be a positive value less than 385. Defaults to 96." type="uint" default="96">96</pdf_resolution_dpi>
|
||||||
|
<idle_timeout_secs desc="The maximum number of seconds before unloading an idle document. Defaults to 1 hour." type="uint" default="3600">3600</idle_timeout_secs>
|
||||||
|
<!-- Idle save and auto save are checked every 30 seconds -->
|
||||||
|
<!-- They are disabled when the value is zero or negative. -->
|
||||||
|
<idlesave_duration_secs desc="The number of idle seconds after which document, if modified, should be saved. Defaults to 30 seconds." type="int" default="30">30</idlesave_duration_secs>
|
||||||
|
<autosave_duration_secs desc="The number of seconds after which document, if modified, should be saved. Defaults to 5 minutes." type="int" default="300">300</autosave_duration_secs>
|
||||||
|
<always_save_on_exit desc="On exiting the last editor, always perform the save, even if the document is not modified." type="bool" default="false">false</always_save_on_exit>
|
||||||
|
<limit_virt_mem_mb desc="The maximum virtual memory allowed to each document process. 0 for unlimited." type="uint">0</limit_virt_mem_mb>
|
||||||
|
<limit_stack_mem_kb desc="The maximum stack size allowed to each document process. 0 for unlimited." type="uint">8000</limit_stack_mem_kb>
|
||||||
|
<limit_file_size_mb desc="The maximum file size allowed to each document process to write. 0 for unlimited." type="uint">0</limit_file_size_mb>
|
||||||
|
<limit_num_open_files desc="The maximum number of files allowed to each document process to open. 0 for unlimited." type="uint">0</limit_num_open_files>
|
||||||
|
<limit_load_secs desc="Maximum number of seconds to wait for a document load to succeed. 0 for unlimited." type="uint" default="100">100</limit_load_secs>
|
||||||
|
<limit_store_failures desc="Maximum number of consecutive save-and-upload to storage failures when unloading the document. 0 for unlimited (not recommended)." type="uint" default="5">5</limit_store_failures>
|
||||||
|
<limit_convert_secs desc="Maximum number of seconds to wait for a document conversion to succeed. 0 for unlimited." type="uint" default="100">100</limit_convert_secs>
|
||||||
|
<cleanup desc="Checks for resource consuming (bad) documents and kills associated kit process. A document is considered resource consuming (bad) if is in idle state for idle_time_secs period and memory usage passed limit_dirty_mem_mb or CPU usage passed limit_cpu_per" enable="true">
|
||||||
|
<cleanup_interval_ms desc="Interval between two checks" type="uint" default="10000">10000</cleanup_interval_ms>
|
||||||
|
<bad_behavior_period_secs desc="Minimum time period for a document to be in bad state before associated kit process is killed. If in this period the condition for bad document is not met once then this period is reset" type="uint" default="60">60</bad_behavior_period_secs>
|
||||||
|
<idle_time_secs desc="Minimum idle time for a document to be candidate for bad state" type="uint" default="300">300</idle_time_secs>
|
||||||
|
<limit_dirty_mem_mb desc="Minimum memory usage for a document to be candidate for bad state" type="uint" default="3072">3072</limit_dirty_mem_mb>
|
||||||
|
<limit_cpu_per desc="Minimum CPU usage for a document to be candidate for bad state" type="uint" default="85">85</limit_cpu_per>
|
||||||
|
<lost_kit_grace_period_secs desc="The minimum grace period for a lost kit process (not referenced by coolwsd) to resolve its lost status before it is terminated. To disable the cleanup of lost kits use value 0" default="120">120</lost_kit_grace_period_secs>
|
||||||
|
</cleanup>
|
||||||
|
</per_document>
|
||||||
|
|
||||||
|
<per_view desc="View-specific settings.">
|
||||||
|
<group_download_as desc="If set to true, groups download as icons into a dropdown for the notebookbar view." type="bool" default="false">false</group_download_as>
|
||||||
|
<out_of_focus_timeout_secs desc="The maximum number of seconds before dimming and stopping updates when the browser tab is no longer in focus. Defaults to 120 seconds." type="uint" default="120">120</out_of_focus_timeout_secs>
|
||||||
|
<idle_timeout_secs desc="The maximum number of seconds before dimming and stopping updates when the user is no longer active (even if the browser is in focus). Defaults to 15 minutes." type="uint" default="900">900</idle_timeout_secs>
|
||||||
|
</per_view>
|
||||||
|
|
||||||
|
<ver_suffix desc="Appended to etags to allow easy refresh of changed files during development" type="string" default=""/>
|
||||||
|
|
||||||
|
<logging>
|
||||||
|
<color type="bool">true</color>
|
||||||
|
<!--
|
||||||
|
Note to developers: When you do "make run", the logging.level will be set on the
|
||||||
|
coolwsd command line, so if you want to change it for your testing, do it in
|
||||||
|
Makefile.am, not here.
|
||||||
|
-->
|
||||||
|
<level type="string" desc="Can be 0-8 (with the lowest numbers being the least verbose), or none (turns off logging), fatal, critical, error, warning, notice, information, debug, trace" default="warning">warning</level>
|
||||||
|
<most_verbose_level_settable_from_client type="string" desc="A loggingleveloverride message from the client can not set a more verbose log level than this" default="notice">notice</most_verbose_level_settable_from_client>
|
||||||
|
<least_verbose_level_settable_from_client type="string" desc="A loggingleveloverride message from a client can not set a less verbose log level than this" default="fatal">fatal</least_verbose_level_settable_from_client>
|
||||||
|
<protocol type="bool" desc="Enable minimal client-site JS protocol logging from the start">false</protocol>
|
||||||
|
<!-- lokit_sal_log example: Log WebDAV-related messages, that is interesting for debugging Insert - Image operation: "+TIMESTAMP+INFO.ucb.ucp.webdav+WARN.ucb.ucp.webdav"
|
||||||
|
See also: https://docs.libreoffice.org/sal/html/sal_log.html -->
|
||||||
|
<lokit_sal_log type="string" desc="Fine tune log messages from LOKit. Default is to suppress log messages from LOKit." default="-INFO-WARN">-INFO-WARN</lokit_sal_log>
|
||||||
|
<file enable="false">
|
||||||
|
<!-- If you use other path than /var/log and you run coolwsd from systemd, make sure that you enable that path in coolwsd.service (ReadWritePaths). -->
|
||||||
|
<property name="path" desc="Log file path.">/var/log/coolwsd.log</property>
|
||||||
|
<property name="rotation" desc="Log file rotation strategy. See Poco FileChannel.">never</property>
|
||||||
|
<property name="archive" desc="Append either timestamp or number to the archived log filename.">timestamp</property>
|
||||||
|
<property name="compress" desc="Enable/disable log file compression.">true</property>
|
||||||
|
<property name="purgeAge" desc="The maximum age of log files to preserve. See Poco FileChannel.">10 days</property>
|
||||||
|
<property name="purgeCount" desc="The maximum number of log archives to preserve. Use 'none' to disable purging. See Poco FileChannel.">10</property>
|
||||||
|
<property name="rotateOnOpen" desc="Enable/disable log file rotation on opening.">true</property>
|
||||||
|
<property name="flush" desc="Enable/disable flushing after logging each line. May harm performance. Note that without flushing after each line, the log lines from the different processes will not appear in chronological order.">false</property>
|
||||||
|
</file>
|
||||||
|
<anonymize>
|
||||||
|
<anonymize_user_data type="bool" desc="Enable to anonymize/obfuscate of user-data in logs. If default is true, it was forced at compile-time and cannot be disabled." default="false">false</anonymize_user_data>
|
||||||
|
<anonymization_salt type="uint" desc="The salt used to anonymize/obfuscate user-data in logs. Use a secret 64-bit random number." default="82589933">82589933</anonymization_salt>
|
||||||
|
</anonymize>
|
||||||
|
<docstats type="bool" desc="Enable to see document handling information in logs." default="false">false</docstats>
|
||||||
|
</logging>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Note to developers: When you do "make run", the trace_event[@enable] will be set on the
|
||||||
|
coolwsd command line, so if you want to change it for your testing, do it in Makefile.am,
|
||||||
|
not here.
|
||||||
|
-->
|
||||||
|
<trace_event desc="The possibility to turn on generation of a Chrome Trace Event file" enable="false">
|
||||||
|
<path desc="Output path for the Trace Event file, to which they will be written if turned on at run-time" type="string" default="/var/log/coolwsd.trace.json">/var/log/coolwsd.trace.json</path>
|
||||||
|
</trace_event>
|
||||||
|
|
||||||
|
<browser_logging desc="Logging in the browser console" default="false">false</browser_logging>
|
||||||
|
|
||||||
|
<trace desc="Dump commands and notifications for replay. When 'snapshot' is true, the source file is copied to the path first." enable="false">
|
||||||
|
<path desc="Output path to hold trace file and docs. Use '%' for timestamp to avoid overwriting. For example: /some/path/to/cooltrace-%.gz" compress="true" snapshot="false"/>
|
||||||
|
<filter>
|
||||||
|
<message desc="Regex pattern of messages to exclude"/>
|
||||||
|
</filter>
|
||||||
|
<outgoing>
|
||||||
|
<record desc="Whether or not to record outgoing messages" default="false">false</record>
|
||||||
|
</outgoing>
|
||||||
|
</trace>
|
||||||
|
|
||||||
|
<net desc="Network settings">
|
||||||
|
<!-- On systems where localhost resolves to IPv6 [::1] address first, when net.proto is all and net.listen is loopback, coolwsd unexpectedly listens on [::1] only.
|
||||||
|
You need to change net.proto to IPv4, if you want to use 127.0.0.1. -->
|
||||||
|
<proto type="string" default="all" desc="Protocol to use IPv4, IPv6 or all for both">all</proto>
|
||||||
|
<listen type="string" default="any" desc="Listen address that coolwsd binds to. Can be 'any' or 'loopback'.">any</listen>
|
||||||
|
<!-- this allows you to shift all of our URLs into a sub-path from
|
||||||
|
https://my.com/browser/a123... to https://my.com/my/sub/path/browser/a123... -->
|
||||||
|
<service_root type="path" default="" desc="Prefix all the pages, websockets, etc. with this path."/>
|
||||||
|
<post_allow desc="Allow/deny client IP address for POST(REST)." allow="true">
|
||||||
|
<host desc="The IPv4 private 192.168 block as plain IPv4 dotted decimal addresses.">192\.168\.[0-9]{1,3}\.[0-9]{1,3}</host>
|
||||||
|
<host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:192\.168\.[0-9]{1,3}\.[0-9]{1,3}</host>
|
||||||
|
<host desc="The IPv4 loopback (localhost) address.">127\.0\.0\.1</host>
|
||||||
|
<host desc="Ditto, but as IPv4-mapped IPv6 address">::ffff:127\.0\.0\.1</host>
|
||||||
|
<host desc="The IPv6 loopback (localhost) address.">::1</host>
|
||||||
|
<host desc="The IPv4 private 172.16.0.0/12 subnet part 1.">172\.1[6789]\.[0-9]{1,3}\.[0-9]{1,3}</host>
|
||||||
|
<host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:172\.1[6789]\.[0-9]{1,3}\.[0-9]{1,3}</host>
|
||||||
|
<host desc="The IPv4 private 172.16.0.0/12 subnet part 2.">172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3}</host>
|
||||||
|
<host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:172\.2[0-9]\.[0-9]{1,3}\.[0-9]{1,3}</host>
|
||||||
|
<host desc="The IPv4 private 172.16.0.0/12 subnet part 3.">172\.3[01]\.[0-9]{1,3}\.[0-9]{1,3}</host>
|
||||||
|
<host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:172\.3[01]\.[0-9]{1,3}\.[0-9]{1,3}</host>
|
||||||
|
<host desc="The IPv4 private 10.0.0.0/8 subnet (Podman).">10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}</host>
|
||||||
|
<host desc="Ditto, but as IPv4-mapped IPv6 addresses">::ffff:10\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}</host>
|
||||||
|
</post_allow>
|
||||||
|
<frame_ancestors desc="Specify who is allowed to embed the Collabora Online iframe (coolwsd and WOPI host are always allowed). Separate multiple hosts by space.">https://*.sunet.se</frame_ancestors>
|
||||||
|
<connection_timeout_secs desc="Specifies the connection, send, recv timeout in seconds for connections initiated by coolwsd (such as WOPI connections)." type="int" default="30"/>
|
||||||
|
|
||||||
|
<!-- this setting radically changes how online works, it should not be used in a production environment -->
|
||||||
|
<proxy_prefix type="bool" default="false" desc="Enable a ProxyPrefix to be passed int through which to redirect requests"/>
|
||||||
|
</net>
|
||||||
|
|
||||||
|
<ssl desc="SSL settings">
|
||||||
|
<!-- switches from https:// + wss:// to http:// + ws:// -->
|
||||||
|
<enable type="bool" desc="Controls whether SSL encryption between coolwsd and the network is enabled (do not disable for production deployment). If default is false, must first be compiled with SSL support to enable." default="true">true</enable>
|
||||||
|
<!-- SSL off-load can be done in a proxy, if so disable SSL, and enable termination below in production -->
|
||||||
|
<termination desc="Connection via proxy where coolwsd acts as working via https, but actually uses http." type="bool" default="true">false</termination>
|
||||||
|
<cert_file_path desc="Path to the cert file" relative="false">/etc/coolwsd/cert.pem</cert_file_path>
|
||||||
|
<key_file_path desc="Path to the key file" relative="false">/etc/coolwsd/key.pem</key_file_path>
|
||||||
|
<ca_file_path desc="Path to the ca file" relative="false">/etc/coolwsd/ca-chain.cert.pem</ca_file_path>
|
||||||
|
<cipher_list desc="List of OpenSSL ciphers to accept" default="ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH"/>
|
||||||
|
<hpkp desc="Enable HTTP Public key pinning" enable="false" report_only="false">
|
||||||
|
<max_age desc="HPKP's max-age directive - time in seconds browser should remember the pins" enable="true">1000</max_age>
|
||||||
|
<report_uri desc="HPKP's report-uri directive - pin validation failure are reported at this URL" enable="false"/>
|
||||||
|
<pins desc="Base64 encoded SPKI fingerprints of keys to be pinned">
|
||||||
|
<pin/>
|
||||||
|
</pins>
|
||||||
|
</hpkp>
|
||||||
|
<sts desc="Strict-Transport-Security settings, per rfc6797. Subdomains are always included.">
|
||||||
|
<enabled desc="Whether or not Strict-Transport-Security is enabled. Enable only when ready for production. Cannot be disabled without resetting the browsers." type="bool" default="false">false</enabled>
|
||||||
|
<max_age desc="Strict-Transport-Security max-age directive, in seconds. 0 is allowed; please see rfc6797 for details. Defaults to 1 year." type="int" default="31536000">31536000</max_age>
|
||||||
|
</sts>
|
||||||
|
</ssl>
|
||||||
|
|
||||||
|
<security desc="Altering these defaults potentially opens you to significant risk">
|
||||||
|
<seccomp desc="Should we use the seccomp system call filtering." type="bool" default="true">true</seccomp>
|
||||||
|
<capabilities desc="Should we require capabilities to isolate processes into chroot jails" type="bool" default="true">true</capabilities>
|
||||||
|
<jwt_expiry_secs desc="Time in seconds before the Admin Console's JWT token expires" type="int" default="1800">1800</jwt_expiry_secs>
|
||||||
|
<enable_macros_execution desc="Specifies whether the macro execution is enabled in general. This will enable Basic, Beanshell, Javascript and Python scripts. If it is set to false, the macro_security_level is ignored. If it is set to true, the mentioned entry specified the level of macro security." type="bool" default="false">false</enable_macros_execution>
|
||||||
|
<macro_security_level desc="Level of Macro security. 1 (Medium) Confirmation required before executing macros from untrusted sources. 0 (Low, not recommended) All macros will be executed without confirmation." type="int" default="1">1</macro_security_level>
|
||||||
|
<enable_metrics_unauthenticated desc="When enabled, the /cool/getMetrics endpoint will not require authentication." type="bool" default="false">false</enable_metrics_unauthenticated>
|
||||||
|
</security>
|
||||||
|
|
||||||
|
<certificates>
|
||||||
|
<database_path type="string" desc="Path to the NSS certificates that are used for signing documents" default=""/>
|
||||||
|
</certificates>
|
||||||
|
|
||||||
|
<watermark>
|
||||||
|
<opacity desc="Opacity of on-screen watermark from 0.0 to 1.0" type="double" default="0.2"/>
|
||||||
|
<text desc="Watermark text to be displayed on the document if entered" type="string"/>
|
||||||
|
</watermark>
|
||||||
|
<welcome enable="false">false</welcome>
|
||||||
|
|
||||||
|
<user_interface>
|
||||||
|
<mode type="string" desc="Controls the user interface style. The 'default' means: Take the value from ui_defaults, or decide for one of classic or notebookbar (default|classic|notebookbar)" default="default">default</mode>
|
||||||
|
<use_integration_theme desc="Use theme from the integrator" type="bool" default="true">true</use_integration_theme>
|
||||||
|
</user_interface>
|
||||||
|
|
||||||
|
<storage desc="Backend storage">
|
||||||
|
<filesystem allow="false"/>
|
||||||
|
<wopi desc="Allow/deny wopi storage." allow="true">
|
||||||
|
<max_file_size desc="Maximum document size in bytes to load. 0 for unlimited." type="uint">0</max_file_size>
|
||||||
|
<locking desc="Locking settings">
|
||||||
|
<refresh desc="How frequently we should re-acquire a lock with the storage server, in seconds (default 15 mins) or 0 for no refresh" type="int" default="900">900</refresh>
|
||||||
|
</locking>
|
||||||
|
|
||||||
|
<alias_groups desc="default mode is 'first' it allows only the first host when groups are not defined. set mode to 'groups' and define group to allow multiple host and its aliases" mode="groups">
|
||||||
|
<!-- If you need to use multiple wopi hosts, please change the mode to "groups" and
|
||||||
|
add the hosts below. If one host is accessible under multiple ip addresses
|
||||||
|
or names, add them as aliases. -->
|
||||||
|
<% @customers.each do |customer| -%>
|
||||||
|
<group>
|
||||||
|
<host desc="hostname to allow or deny." allow="true">https://<%= customer %>.<%= @domain %>:443</host>
|
||||||
|
<alias desc="regex pattern of aliasname">https://node[1-3].<%= customer %>.<%= @domain %>:443</alias>
|
||||||
|
</group>
|
||||||
|
<% end -%>
|
||||||
|
<% @multinode_customers.each do |customer| -%>
|
||||||
|
<group>
|
||||||
|
<host desc="hostname to allow or deny." allow="true">https://<%= customer %>.<%= @domain %>:443</host>
|
||||||
|
<alias desc="regex pattern of aliasname">https://multinode[1-3].<%= @domain %>:443</alias>
|
||||||
|
</group>
|
||||||
|
<% end -%>
|
||||||
|
</alias_groups>
|
||||||
|
|
||||||
|
</wopi>
|
||||||
|
<ssl desc="SSL settings">
|
||||||
|
<as_scheme type="bool" default="true" desc="When set we exclusively use the WOPI URI's scheme to enable SSL for storage">true</as_scheme>
|
||||||
|
<enable type="bool" desc="If as_scheme is false or not set, this can be set to force SSL encryption between storage and coolwsd. When empty this defaults to following the ssl.enable setting"/>
|
||||||
|
<cert_file_path desc="Path to the cert file" relative="false"/>
|
||||||
|
<key_file_path desc="Path to the key file" relative="false"/>
|
||||||
|
<ca_file_path desc="Path to the ca file. If this is not empty, then SSL verification will be strict, otherwise cert of storage (WOPI-like host) will not be verified." relative="false"/>
|
||||||
|
<cipher_list desc="List of OpenSSL ciphers to accept. If empty the defaults are used. These can be overridden only if absolutely needed."/>
|
||||||
|
</ssl>
|
||||||
|
</storage>
|
||||||
|
|
||||||
|
<tile_cache_persistent desc="Should the tiles persist between two editing sessions of the given document?" type="bool" default="true">true</tile_cache_persistent>
|
||||||
|
|
||||||
|
<admin_console desc="Web admin console settings.">
|
||||||
|
<enable desc="Enable the admin console functionality" type="bool" default="true">true</enable>
|
||||||
|
<enable_pam desc="Enable admin user authentication with PAM" type="bool" default="false">false</enable_pam>
|
||||||
|
<username desc="The username of the admin console. Ignored if PAM is enabled.">admin</username>
|
||||||
|
<password desc="The password of the admin console. Deprecated on most platforms. Instead, use PAM or coolconfig to set up a secure password."><%= @collabora_admin_password %></password>
|
||||||
|
</admin_console>
|
||||||
|
|
||||||
|
<monitors desc="Addresses of servers we connect to on start for monitoring">
|
||||||
|
</monitors>
|
||||||
|
|
||||||
|
<quarantine_files desc="Files are stored here to be examined later in cases of crashes or similar situation." default="false" enable="false">
|
||||||
|
<limit_dir_size_mb desc="Maximum directory size. On exceeding the specified limit, older files will be deleted." default="250" type="uint"/>
|
||||||
|
<max_versions_to_maintain desc="How many versions of the same file to keep." default="2" type="uint"/>
|
||||||
|
<path desc="Path to directory under which quarantined files will be stored" type="path" relative="true" default="quarantine"/>
|
||||||
|
<expiry_min desc="Time in mins after quarantined files will be deleted." type="int" default="30"/>
|
||||||
|
</quarantine_files>
|
||||||
|
|
||||||
|
<remote_config>
|
||||||
|
<remote_url desc="remote server to which you will send resquest to get remote config in response" type="string" default=""/>
|
||||||
|
</remote_config>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
</config>
|
59
templates/lb/503.http.erb
Normal file
59
templates/lb/503.http.erb
Normal file
File diff suppressed because one or more lines are too long
59
templates/lb/check_exabgp_announce.erb
Executable file
59
templates/lb/check_exabgp_announce.erb
Executable file
|
@ -0,0 +1,59 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
OK=0
|
||||||
|
WARNING=1
|
||||||
|
CRITICAL=2
|
||||||
|
UNKNOWN=3
|
||||||
|
|
||||||
|
|
||||||
|
function show_help {
|
||||||
|
echo "Usage: ${0} -w <warn level as % of total> -c <crit level as % of total>"
|
||||||
|
echo "Example: ${0} -w 1 -c 10"
|
||||||
|
}
|
||||||
|
|
||||||
|
warning=1
|
||||||
|
critical=2
|
||||||
|
output="OK: "
|
||||||
|
|
||||||
|
OPTIND=1
|
||||||
|
while getopts "w:c:" opt; do
|
||||||
|
case "$opt" in
|
||||||
|
w) warning=${OPTARG}
|
||||||
|
;;
|
||||||
|
c) critical=${OPTARG}
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
show_help
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
status=${OK}
|
||||||
|
all="$(cat /opt/frontend/monitor/*/announce)"
|
||||||
|
withdraw=$(echo "${all}" | grep withdraw)
|
||||||
|
num_withdraw=$(echo "${all}" | grep withdraw | wc -l)
|
||||||
|
num_total=$(echo "${all}" | wc -l)
|
||||||
|
percent=$( echo "scale=1;( ( ${num_withdraw} + 0.05 ) / ${num_total} ) * 100 " | bc | sed 's/\.[0-9]//')
|
||||||
|
|
||||||
|
if [[ ${percent} -gt ${warning} ]]; then
|
||||||
|
status=${WARNING}
|
||||||
|
output="WARNING: "
|
||||||
|
fi
|
||||||
|
if [[ ${percent} -gt ${critical} ]]; then
|
||||||
|
status=${CRITICAL}
|
||||||
|
output="CRITICAL: "
|
||||||
|
fi
|
||||||
|
|
||||||
|
output="${output}Total routes: ${num_total}"
|
||||||
|
|
||||||
|
if [[ ${status} != 0 ]]; then
|
||||||
|
output="${output}, number of withdrawn: ${num_withdraw}. There is a problem with the following routes "
|
||||||
|
for ip in $(echo "${withdraw}" | awk '{print $3}'); do
|
||||||
|
output="${output} ${ip}"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${output} | withdraw=${percent}%;${warning};${critical};"
|
||||||
|
|
||||||
|
exit ${status}
|
21
templates/lb/check_sarimner.erb
Normal file
21
templates/lb/check_sarimner.erb
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
problems=""
|
||||||
|
num=0
|
||||||
|
status=0
|
||||||
|
output="OK: all systems nominal"
|
||||||
|
for i in $(ls /etc/systemd/system/frontend*.service | sed -e 's_.*/frontend-__' -e 's/.service//'); do
|
||||||
|
sudo docker logs ${i}_haproxy_1 | grep 'Device "sarimner0" does not exist.' > /dev/null
|
||||||
|
if [[ ${?} -eq 0 ]]; then
|
||||||
|
num=$(( num + 1))
|
||||||
|
problems="${i} "
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ num -ne 0 ]]; then
|
||||||
|
output="CRITICAL: problems with: ${problems}"
|
||||||
|
status=2
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "${output}|problems=${num};1;1"
|
||||||
|
exit ${status}
|
16
templates/lookup/config.php.erb
Normal file
16
templates/lookup/config.php.erb
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
|
||||||
|
<?php
|
||||||
|
|
||||||
|
|
||||||
|
$CONFIG = [
|
||||||
|
'DB' => [
|
||||||
|
'host' => "<%= @dbhost %>",
|
||||||
|
'db' => "lookup" ,
|
||||||
|
'user' => "lookup",
|
||||||
|
'pass' => "<%= @mysql_user_password %>",
|
||||||
|
],
|
||||||
|
|
||||||
|
'GLOBAL_SCALE' => true,
|
||||||
|
|
||||||
|
'AUTH_KEY' => "<%= @gss_jwt_key %>",
|
||||||
|
];
|
24
templates/lookup/docker-compose_lookup.yml.erb
Normal file
24
templates/lookup/docker-compose_lookup.yml.erb
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
version: '3.2'
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
app:
|
||||||
|
image: docker.sunet.se/drive/nextcloud-lookup:<%= @lookup_version %>
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- /opt/lookup/config.php:/var/www/html/config/config.php
|
||||||
|
networks:
|
||||||
|
- default
|
||||||
|
- proxysql_proxysql
|
||||||
|
dns:
|
||||||
|
- 89.46.20.75
|
||||||
|
- 89.46.21.29
|
||||||
|
- 89.32.32.32
|
||||||
|
ports:
|
||||||
|
- 443:443
|
||||||
|
command: apache2-foreground
|
||||||
|
tty: true
|
||||||
|
|
||||||
|
networks:
|
||||||
|
proxysql_proxysql:
|
||||||
|
external: true
|
2
templates/mariadb/02-backup_user.sql.erb
Normal file
2
templates/mariadb/02-backup_user.sql.erb
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
CREATE USER 'backup'@'%' IDENTIFIED BY '<%= @backup_password %>';
|
||||||
|
GRANT SELECT, INSERT, CREATE, RELOAD, PROCESS, SUPER, LOCK TABLES, REPLICATION SLAVE, REPLICATION CLIENT, SHOW VIEW, EVENT, CREATE TABLESPACE ON *.* TO 'backup'@'%';
|
2
templates/mariadb/03-proxysql.sql.erb
Normal file
2
templates/mariadb/03-proxysql.sql.erb
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
CREATE USER 'proxysql'@'%' IDENTIFIED BY '<%= @proxysql_password %>';
|
||||||
|
GRANT USAGE ON *.* TO 'proxysql'@'%';
|
3
templates/mariadb/04-nextcloud.sql.erb
Normal file
3
templates/mariadb/04-nextcloud.sql.erb
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
CREATE SCHEMA nextcloud;
|
||||||
|
CREATE USER 'nextcloud'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
|
||||||
|
GRANT ALL PRIVILEGES ON nextcloud.* TO 'nextcloud'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
|
62
templates/mariadb/05-lookup.sql.erb
Normal file
62
templates/mariadb/05-lookup.sql.erb
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
|
||||||
|
SET time_zone = "+00:00";
|
||||||
|
|
||||||
|
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
|
||||||
|
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
|
||||||
|
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
|
||||||
|
/*!40101 SET NAMES utf8mb4 */;
|
||||||
|
|
||||||
|
CREATE DATABASE IF NOT EXISTS `lookup` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
|
||||||
|
USE `lookup`;
|
||||||
|
|
||||||
|
CREATE USER 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
|
||||||
|
GRANT ALL PRIVILEGES ON lookup.* TO 'lookup'@'%' IDENTIFIED BY '<%= @mysql_user_password %>';
|
||||||
|
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS `emailValidation`;
|
||||||
|
CREATE TABLE IF NOT EXISTS `emailValidation` (
|
||||||
|
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||||
|
`storeId` int(11) NOT NULL,
|
||||||
|
`token` varchar(16) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||||
|
PRIMARY KEY (`id`),
|
||||||
|
KEY `token` (`token`),
|
||||||
|
KEY `storeId` (`storeId`)
|
||||||
|
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS `store`;
|
||||||
|
CREATE TABLE IF NOT EXISTS `store` (
|
||||||
|
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||||
|
`userId` int(11) NOT NULL,
|
||||||
|
`k` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||||
|
`v` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||||
|
`valid` tinyint(1) NOT NULL DEFAULT '0',
|
||||||
|
PRIMARY KEY (`id`),
|
||||||
|
KEY `key` (`k`(191)),
|
||||||
|
KEY `value` (`v`(191)),
|
||||||
|
KEY `userId` (`userId`)
|
||||||
|
) ENGINE=InnoDB AUTO_INCREMENT=51 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS `users`;
|
||||||
|
CREATE TABLE IF NOT EXISTS `users` (
|
||||||
|
`id` int(10) UNSIGNED NOT NULL AUTO_INCREMENT,
|
||||||
|
`federationId` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||||
|
`timestamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||||
|
PRIMARY KEY (`id`),
|
||||||
|
KEY `federationId` (`federationId`(191))
|
||||||
|
) ENGINE=InnoDB AUTO_INCREMENT=15 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||||
|
|
||||||
|
DROP TABLE IF EXISTS `toVerify`;
|
||||||
|
CREATE TABLE IF NOT EXISTS `toVerify` (
|
||||||
|
`id` int(11) NOT NULL AUTO_INCREMENT,
|
||||||
|
`userId` int(11) NOT NULL,
|
||||||
|
`storeId` int(11) NOT NULL,
|
||||||
|
`property` varchar(512) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||||
|
`location` varchar(255) COLLATE utf8mb4_unicode_ci NOT NULL,
|
||||||
|
`tries` int(11) NOT NULL,
|
||||||
|
PRIMARY KEY (`id`)
|
||||||
|
) ENGINE=InnoDB AUTO_INCREMENT=16 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||||
|
|
||||||
|
|
||||||
|
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
|
||||||
|
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
|
||||||
|
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
303
templates/mariadb/check_galera_cluster.erb
Normal file
303
templates/mariadb/check_galera_cluster.erb
Normal file
|
@ -0,0 +1,303 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
#It is a changed version of the original https://github.com/fridim/nagios-plugin-check_galera_cluster/blob/master/check_galera_cluster
|
||||||
|
|
||||||
|
|
||||||
|
PROGNAME=`basename $0`
|
||||||
|
VERSION="Version 1.1.5"
|
||||||
|
AUTHOR="Guillaume Coré <fridim@onfi.re>, Ales Nosek <ales.nosek@gmail.com>, Staf Wagemakers <staf@wagemakers.be>, Claudio Kuenzler <claudiokuenzler.com>"
|
||||||
|
|
||||||
|
ST_OK=0
|
||||||
|
ST_WR=1
|
||||||
|
ST_CR=2
|
||||||
|
ST_UK=3
|
||||||
|
|
||||||
|
warnAlerts=0
|
||||||
|
critAlerts=0
|
||||||
|
unknAlerts=0
|
||||||
|
|
||||||
|
warnText=""
|
||||||
|
critText=""
|
||||||
|
|
||||||
|
print_version() {
|
||||||
|
echo "$VERSION $AUTHOR"
|
||||||
|
}
|
||||||
|
|
||||||
|
print_help() {
|
||||||
|
print_version $PROGNAME $VERSION
|
||||||
|
echo ""
|
||||||
|
echo "$PROGNAME is a monitoring plugin to monitor Galera cluster status."
|
||||||
|
echo ""
|
||||||
|
echo "$PROGNAME [-u USER] [-p PASSWORD] [-H HOST] [-P PORT] [-m file] [-w SIZE] [-c SIZE] [-s statefile] [-0]"
|
||||||
|
echo ""
|
||||||
|
echo "Options:"
|
||||||
|
echo " u)"
|
||||||
|
echo " MySQL user."
|
||||||
|
echo " p)"
|
||||||
|
echo " MySQL password."
|
||||||
|
echo " H)"
|
||||||
|
echo " MySQL host."
|
||||||
|
echo " P)"
|
||||||
|
echo " MySQL port."
|
||||||
|
echo " m)"
|
||||||
|
echo " MySQL extra my.cnf configuration file."
|
||||||
|
echo " w)"
|
||||||
|
echo " Sets minimum number of nodes in the cluster when WARNING is raised. (default is same as critical)."
|
||||||
|
echo " c)"
|
||||||
|
echo " Sets minimum number of nodes in the cluster when CRITICAL is raised. (default is 2)."
|
||||||
|
#echo " f)"
|
||||||
|
#echo " Sets critical value of wsrep_flow_control_paused (default is 0.1)."
|
||||||
|
echo " 0)"
|
||||||
|
echo " Rise CRITICAL if the node is not primary"
|
||||||
|
echo " s)"
|
||||||
|
echo " Create state file, detect disconnected nodes"
|
||||||
|
exit $ST_UK
|
||||||
|
}
|
||||||
|
|
||||||
|
# default values
|
||||||
|
crit=2
|
||||||
|
#fcp=0.1
|
||||||
|
|
||||||
|
check_executable() {
|
||||||
|
if [ -z "$1" ]; then
|
||||||
|
echo "check_executable: no parameter given!"
|
||||||
|
exit $ST_UK
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! command -v "$1" &>/dev/null; then
|
||||||
|
echo "UNKNOWN: Cannot find $1"
|
||||||
|
exit $ST_UK
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
check_executable docker
|
||||||
|
check_executable bc
|
||||||
|
|
||||||
|
while getopts “hvu:p:H:P:w:c:m:s:0” OPTION; do
|
||||||
|
case $OPTION in
|
||||||
|
h)
|
||||||
|
print_help
|
||||||
|
exit $ST_UK
|
||||||
|
;;
|
||||||
|
v)
|
||||||
|
print_version $PROGNAME $VERSION
|
||||||
|
exit $ST_UK
|
||||||
|
;;
|
||||||
|
u)
|
||||||
|
mysqluser=$OPTARG
|
||||||
|
;;
|
||||||
|
p)
|
||||||
|
password=$OPTARG
|
||||||
|
;;
|
||||||
|
H)
|
||||||
|
mysqlhost=$OPTARG
|
||||||
|
;;
|
||||||
|
P)
|
||||||
|
port=$OPTARG
|
||||||
|
;;
|
||||||
|
m)
|
||||||
|
myconfig=$OPTARG
|
||||||
|
;;
|
||||||
|
w)
|
||||||
|
warn=$OPTARG
|
||||||
|
;;
|
||||||
|
c)
|
||||||
|
crit=$OPTARG
|
||||||
|
;;
|
||||||
|
0)
|
||||||
|
primary='TRUE'
|
||||||
|
;;
|
||||||
|
s)
|
||||||
|
stateFile=$OPTARG
|
||||||
|
;;
|
||||||
|
?)
|
||||||
|
echo "Unknown argument: $1"
|
||||||
|
print_help
|
||||||
|
exit $ST_UK
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -z "$warn" ]; then
|
||||||
|
warn=$crit
|
||||||
|
fi
|
||||||
|
|
||||||
|
create_param() {
|
||||||
|
if [ -n "$2" ]; then
|
||||||
|
echo $1$2
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
param_mysqlhost=$(create_param -h "$mysqlhost")
|
||||||
|
param_port=$(create_param -P "$port")
|
||||||
|
param_mysqluser=$(create_param -u "$mysqluser")
|
||||||
|
param_password=$(create_param -p "$password")
|
||||||
|
param_configfile=$(create_param --defaults-extra-file= "$myconfig")
|
||||||
|
export MYSQL_PWD=$password
|
||||||
|
|
||||||
|
param_mysql="$param_mysqlhost $param_port $param_mysqluser $param_password $param_configfile"
|
||||||
|
|
||||||
|
#
|
||||||
|
# verify the database connection
|
||||||
|
#
|
||||||
|
|
||||||
|
/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e '\s;' >/dev/null 2>&1 || {
|
||||||
|
echo "CRITICAL: mysql connection check failed"
|
||||||
|
exit $ST_CR
|
||||||
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# retrieve the mysql status
|
||||||
|
#
|
||||||
|
|
||||||
|
#rMysqlStatus=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "show status like 'wsrep_%';")
|
||||||
|
|
||||||
|
#
|
||||||
|
# verify that the node is part of a cluster
|
||||||
|
#
|
||||||
|
|
||||||
|
rClusterStateUuid=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_cluster_state_uuid'")
|
||||||
|
|
||||||
|
if [ -z $rClusterStateUuid ]; then
|
||||||
|
echo "CRITICAL: node is not part of a cluster."
|
||||||
|
exit $ST_CR
|
||||||
|
fi
|
||||||
|
|
||||||
|
#rFlowControl=$(echo "$rMysqlStatus" | awk '/wsrep_flow_control_paused\t/ {print $2}') # < 0.1
|
||||||
|
#rFlowControl=$(printf "%.14f" $rFlowControl) # issue #4
|
||||||
|
|
||||||
|
|
||||||
|
rClusterSize=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_cluster_size'")
|
||||||
|
|
||||||
|
|
||||||
|
rClusterStatus=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_cluster_status'") # Primary
|
||||||
|
|
||||||
|
|
||||||
|
rReady=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_ready'") # ON
|
||||||
|
|
||||||
|
|
||||||
|
rConnected=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_connected'") # ON
|
||||||
|
|
||||||
|
|
||||||
|
rLocalStateComment=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_state_comment'") # Synced
|
||||||
|
|
||||||
|
#rIncommingAddresses=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_incoming_addresses'")
|
||||||
|
|
||||||
|
rSSTMethod=$(/usr/bin/docker exec mariadb_db_1 mysql --defaults-group-suffix=_backup -B -N -e "select variable_value from information_schema.GLOBAL_VARIABLES where VARIABLE_NAME = 'wsrep_sst_method'") # mariabackup
|
||||||
|
|
||||||
|
|
||||||
|
#if [ -z "$rFlowControl" ]; then
|
||||||
|
# echo "UNKNOWN: wsrep_flow_control_paused is empty"
|
||||||
|
# unknAlerts=$(($unknAlerts+1))
|
||||||
|
#fi
|
||||||
|
|
||||||
|
#if [ $(echo "$rFlowControl > $fcp" | bc) = 1 ]; then
|
||||||
|
# echo "CRITICAL: wsrep_flow_control_paused is > $fcp"
|
||||||
|
# critAlerts=$(($criticalAlerts+1))
|
||||||
|
#fi
|
||||||
|
|
||||||
|
if [ "$primary" = 'TRUE' ]; then
|
||||||
|
if [ $rClusterStatus != 'Primary' ]; then
|
||||||
|
critText+="CRITICAL: node is not primary (wsrep_cluster_status). "
|
||||||
|
critAlerts=$(($criticalAlerts+1))
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $rReady != 'ON' ]; then
|
||||||
|
critText+="CRITICAL: node is not ready (wsrep_ready). "
|
||||||
|
critAlerts=$(($criticalAlerts+1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $rConnected != 'ON' ]; then
|
||||||
|
critText+="CRITICAL: node is not connected (wsrep_connected). "
|
||||||
|
critAlerts=$(($criticalAlerts+1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $rLocalStateComment != 'Synced' ]; then
|
||||||
|
critText+="CRITICAL: node is not synced - actual state is: $rLocalStateComment (wsrep_local_state_comment). "
|
||||||
|
critAlerts=$(($criticalAlerts+1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $rSSTMethod != 'mariabackup' ]; then
|
||||||
|
critText+="CRITICAL: node is not backed up - actual state is: $rSSTMethod (wsrep_sst_method). "
|
||||||
|
critAlerts=$(($criticalAlerts+1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ $rClusterSize -gt $warn ]; then
|
||||||
|
# only display the ok message if the state check not enabled
|
||||||
|
if [ -z "$stateFile" ]; then
|
||||||
|
echo "OK: number of NODES = $rClusterSize"
|
||||||
|
fi
|
||||||
|
elif [ $rClusterSize -le $crit ]; then
|
||||||
|
critText+="CRITICAL: number of NODES = $rClusterSize. "
|
||||||
|
critAlerts=$(($criticalAlerts+1))
|
||||||
|
elif [ $rClusterSize -le $warn ]; then
|
||||||
|
warnText+="WARNING: number of NODES = $rClusterSize."
|
||||||
|
warnAlerts=$(($warnAlerts+1))
|
||||||
|
else
|
||||||
|
exit $ST_UK
|
||||||
|
fi
|
||||||
|
|
||||||
|
#
|
||||||
|
# detect is the connection is lost automatically
|
||||||
|
#
|
||||||
|
|
||||||
|
if [ ! -z "$stateFile" ]; then
|
||||||
|
|
||||||
|
touch $stateFile
|
||||||
|
|
||||||
|
if [ $? != "0" ]; then
|
||||||
|
|
||||||
|
echo "UNKNOWN: stateFile \"$stateFile\" is not writeable"
|
||||||
|
unknAlerts=$(($unknAlerts+1))
|
||||||
|
|
||||||
|
else
|
||||||
|
|
||||||
|
if [ $rConnected = "ON" ]; then
|
||||||
|
# get the current connected Nodes
|
||||||
|
currentNodes=$(echo $rIncommingAddresses} | tr "," "\n" | sort -u)
|
||||||
|
if [ -f "$stateFile" ]; then
|
||||||
|
# get the nodes added to the cluster
|
||||||
|
newNodes=$(echo $currentNodes | tr " " "\n" | comm -2 -3 - $stateFile)
|
||||||
|
# get the nodes that were removed from the cluster
|
||||||
|
missingNodes=$(echo $currentNodes | tr " " "\n" | comm -1 -3 - $stateFile)
|
||||||
|
if [ ! -z "$newNodes" ]; then
|
||||||
|
# add the new nodes to the cluster to the state file
|
||||||
|
echo $newNodes | tr " " "\n" >> $stateFile
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# there is no state file yet, creating new one.
|
||||||
|
echo $currentNodes | tr " " "\n" > $stateFile
|
||||||
|
fi # -f stateFile
|
||||||
|
# get the numeber of nodes that were part of the cluster before
|
||||||
|
maxClusterSize=$(cat $stateFile | wc -l)
|
||||||
|
|
||||||
|
if [ $maxClusterSize -eq $rClusterSize ]; then
|
||||||
|
if [ $maxClusterSize -eq 1 ]; then
|
||||||
|
if [ $crit -eq 0 -a $warn -eq 0 ]; then
|
||||||
|
echo "OK: running single-node database cluster"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "OK: running redundant $rClusterSize online / $maxClusterSize total"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "WARNING: redundant $rClusterSize online / $maxClusterSize total, missing peers: $missingNodes"
|
||||||
|
warnAlerts=$(($warnAlerts+1))
|
||||||
|
fi
|
||||||
|
|
||||||
|
fi # rConnected
|
||||||
|
|
||||||
|
fi # -w stateFile
|
||||||
|
|
||||||
|
fi # -z stateFile
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# exit
|
||||||
|
#
|
||||||
|
|
||||||
|
[ "$critAlerts" -gt "0" ] && echo $critText && exit $ST_CR
|
||||||
|
[ "$unknAlerts" -gt "0" ] && exit $ST_UK
|
||||||
|
[ "$warnAlerts" -gt "0" ] && echo $warnText && exit $ST_WR
|
||||||
|
|
||||||
|
exit 0
|
14
templates/mariadb/credentials.cnf.erb
Normal file
14
templates/mariadb/credentials.cnf.erb
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
[mysqld]
|
||||||
|
wsrep_sst_auth = "backup:<%= @backup_password %>"
|
||||||
|
|
||||||
|
[mariabackup]
|
||||||
|
user = backup
|
||||||
|
password = '<%= @backup_password %>'
|
||||||
|
|
||||||
|
[mysqldump]
|
||||||
|
user = backup
|
||||||
|
password = '<%= @backup_password %>'
|
||||||
|
|
||||||
|
[mysql_backup]
|
||||||
|
user = backup
|
||||||
|
password = '<%= @backup_password %>'
|
30
templates/mariadb/docker-compose_mariadb.yml.erb
Normal file
30
templates/mariadb/docker-compose_mariadb.yml.erb
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
db:
|
||||||
|
image: docker.sunet.se/drive/mariadb:<%= @mariadb_version %>
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- /etc/mariadb/backups:/backups
|
||||||
|
- /etc/mariadb/conf:/etc/mysql/mariadb.conf.d
|
||||||
|
- /etc/mariadb/datadir:/var/lib/mysql
|
||||||
|
- /etc/mariadb/init:/docker-entrypoint-initdb.d
|
||||||
|
- /etc/mariadb/scripts:/scripts
|
||||||
|
network_mode: host
|
||||||
|
dns:
|
||||||
|
- 89.46.20.75
|
||||||
|
- 89.46.21.29
|
||||||
|
- 89.32.32.32
|
||||||
|
environment:
|
||||||
|
- MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %>
|
||||||
|
- BOOTSTRAP=<%= @bootstrap %>
|
||||||
|
- FORCE_BOOTSTRAP=0
|
||||||
|
ports:
|
||||||
|
- 3306:3306
|
||||||
|
- 4444:4444
|
||||||
|
- 4567:4567
|
||||||
|
- 4568:4568
|
||||||
|
command: "--wsrep_cluster_address=gcomm://<%= @db_ip[0] %>,<%= @db_ip[1] %>,<%= @db_ip[2] %>"
|
||||||
|
tty: true
|
||||||
|
|
47
templates/mariadb/my.cnf.erb
Normal file
47
templates/mariadb/my.cnf.erb
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
[mysqld]
|
||||||
|
|
||||||
|
# General
|
||||||
|
binlog_format = ROW
|
||||||
|
character_set_server = utf8mb4
|
||||||
|
collation_server = utf8mb4_general_ci
|
||||||
|
default_storage_engine = InnoDB
|
||||||
|
local_infile = 0
|
||||||
|
max_allowed_packet = 256M
|
||||||
|
max_connections = 750
|
||||||
|
skip_external_locking = ON
|
||||||
|
skip_name_resolve = ON
|
||||||
|
|
||||||
|
# Binary logging and replication
|
||||||
|
expire_logs_days = 7
|
||||||
|
gtid_domain_id = 1000
|
||||||
|
gtid_ignore_duplicates = ON
|
||||||
|
gtid_strict_mode = ON
|
||||||
|
log_bin = binlog
|
||||||
|
log_slave_updates = ON
|
||||||
|
server_id = <%= @server_id %>
|
||||||
|
|
||||||
|
# Innodb
|
||||||
|
innodb_autoinc_lock_mode = 2
|
||||||
|
innodb_buffer_pool_size = 4G # 60-75% of total RAM
|
||||||
|
innodb_file_per_table = 1
|
||||||
|
innodb_flush_log_at_trx_commit = 0
|
||||||
|
innodb_flush_method = O_DIRECT
|
||||||
|
innodb_io_capacity = 2000 # IOPS dependent
|
||||||
|
innodb_io_capacity_max = 4000 # IOPS dependent
|
||||||
|
innodb_log_file_size = 512M
|
||||||
|
innodb_read_io_threads = 4 # CPU dependent
|
||||||
|
innodb_rollback_on_timeout = 1
|
||||||
|
innodb_write_io_threads = 4 # CPU dependent
|
||||||
|
transaction_isolation = 'READ-COMMITTED'
|
||||||
|
|
||||||
|
# Galera
|
||||||
|
wsrep_cluster_name = "Nextcloud_MariaDB_Cluster"
|
||||||
|
wsrep_gtid_domain_id = 1000 # same on all Galera nodes in the same segment
|
||||||
|
wsrep_gtid_mode = ON
|
||||||
|
wsrep_log_conflicts = ON
|
||||||
|
wsrep_on = ON
|
||||||
|
wsrep_provider = /usr/lib/galera/libgalera_smm.so
|
||||||
|
wsrep_provider_options = "gcache.size=2G;gmcast.segment=0" # gmcast.segment same on all Galera nodes in the same segment
|
||||||
|
wsrep_slave_threads = 4 # CPU dependent
|
||||||
|
wsrep_sst_method = mariabackup
|
||||||
|
wsrep_sync_wait = 1
|
10
templates/mariadb/quorum.conf.erb
Normal file
10
templates/mariadb/quorum.conf.erb
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
quorum_alive_command='[[ "Primary" == $(/usr/bin/mysql -p<%= @quorum_password %> -NB -e \'show status like "wsrep_cluster_status"\' | awk \'{print $2}\') ]]'
|
||||||
|
quorum_db='quorum'
|
||||||
|
quorum_db_command='/usr/bin/mysql'
|
||||||
|
quorum_host='localhost'
|
||||||
|
quorum_id=<%= @quorum_id %>
|
||||||
|
quorum_interval='20'
|
||||||
|
quorum_notify_command='/usr/bin/echo alive status:${QUORUM_ALIVE_STATUS}, leader: ${QUORUM_LEADER}, my leader status: ${QUORUM_LEADER_STATUS}'
|
||||||
|
quorum_password='<%= @quorum_password %>'
|
||||||
|
quorum_table='service_election'
|
||||||
|
quorum_user='quorum'
|
99
templates/mariadb/quorum.erb.sh
Executable file
99
templates/mariadb/quorum.erb.sh
Executable file
|
@ -0,0 +1,99 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
# A bash implementation of http://code.openark.org/blog/mysql/leader-election-using-mysql
|
||||||
|
|
||||||
|
# Defaults
|
||||||
|
quorum_alive_command='/bin/true'
|
||||||
|
quorum_config='/etc/quorum.conf'
|
||||||
|
quorum_db='quorum'
|
||||||
|
quorum_db_command='/usr/bin/mysql'
|
||||||
|
quorum_host='localhost'
|
||||||
|
quorum_id=$(hostname -f)
|
||||||
|
quorum_interval='20'
|
||||||
|
quorum_notify_command='/usr/bin/echo alive status:${QUORUM_ALIVE_STATUS}, leader: ${QUORUM_LEADER}, my leader status: ${QUORUM_LEADER_STATUS}'
|
||||||
|
quorum_password='quorum'
|
||||||
|
quorum_table='service_election'
|
||||||
|
quorum_user='quorum'
|
||||||
|
|
||||||
|
# Override default config path from env
|
||||||
|
if [[ "x${QUORUM_CONFIG}" != "x" ]]; then
|
||||||
|
quorum_config="${QUORUM_CONFIG}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Override default config with settings from config file
|
||||||
|
if [[ -f "${quorum_config}" ]]; then
|
||||||
|
. "${quorum_config}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Override with env
|
||||||
|
if [[ "x${QUORUM_ALIVE_COMMAND}" != "x" ]]; then
|
||||||
|
quorum_alive_command=${QUORUM_ALIVE_COMMAND}
|
||||||
|
fi
|
||||||
|
if [[ "x${QUORUM_DB}" != "x" ]]; then
|
||||||
|
quorum_db=${QUORUM_DB}
|
||||||
|
fi
|
||||||
|
if [[ "x${QUORUM_DB_COMMAND}" != "x" ]]; then
|
||||||
|
quorum_db_command=${QUORUM_DB_COMMAND}
|
||||||
|
fi
|
||||||
|
if [[ "x${QUORUM_HOST}" != "x" ]]; then
|
||||||
|
quorum_host=${QUORUM_HOST}
|
||||||
|
fi
|
||||||
|
if [[ "x${QUORUM_ID}" != "x" ]]; then
|
||||||
|
quorum_id=${QUORUM_ID}
|
||||||
|
fi
|
||||||
|
if [[ "x${QUORUM_INTERVAL}" != "x" ]]; then
|
||||||
|
quorum_interval=${QUORUM_INTERVAL}
|
||||||
|
fi
|
||||||
|
if [[ "x${QUORUM_NOTIFY_COMMAND}" != "x" ]]; then
|
||||||
|
quorum_notify_command=${QUORUM_NOTIFY_COMMAND}
|
||||||
|
fi
|
||||||
|
if [[ "x${QUORUM_PASSWORD}" != "x" ]]; then
|
||||||
|
quorum_password=${QUORUM_PASSWORD}
|
||||||
|
fi
|
||||||
|
if [[ "x${QUORUM_TABLE}" != "x" ]]; then
|
||||||
|
quorum_table=${QUORUM_TABLE}
|
||||||
|
fi
|
||||||
|
if [[ "x${QUORUM_USER}" != "x" ]]; then
|
||||||
|
quorum_user=${QUORUM_USER}
|
||||||
|
fi
|
||||||
|
|
||||||
|
# MySQL command
|
||||||
|
quorum_db_command="${quorum_db_command} --database=${quorum_db} --user ${quorum_user} --password ${quorum_password}"
|
||||||
|
|
||||||
|
# Queries
|
||||||
|
create_table_query='CREATE TABLE ${quorum_table} (
|
||||||
|
anchor tinyint(3) unsigned NOT NULL,
|
||||||
|
service_id varchar(128) NOT NULL,
|
||||||
|
last_seen_active timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
PRIMARY KEY (anchor)
|
||||||
|
) ENGINE=InnoDB'
|
||||||
|
|
||||||
|
table_exists_query="SELECT *
|
||||||
|
FROM information_schema.tables
|
||||||
|
WHERE table_schema = '${quorum_db}'
|
||||||
|
AND table_name = '${quorum_table}'
|
||||||
|
LIMIT 1"
|
||||||
|
|
||||||
|
election_query="insert ignore into ${quorum_table} ( anchor, service_id, last_seen_active ) values ( 1, '${quorum_id}', now() ) on duplicate key update service_id = if(last_seen_active < now() - interval ${quorum_interval} second, values(service_id), service_id), last_seen_active = if(service_id = values(service_id), values(last_seen_active), last_seen_active)"
|
||||||
|
|
||||||
|
is_leader_query="select count(*) as is_leader from ${quorum_table} where anchor=1 and service_id='${quorum_id}'"
|
||||||
|
|
||||||
|
who_is_leader_query="select max(service_id) as leader from ${quorum_table} where anchor=1"
|
||||||
|
|
||||||
|
# Set up table if it does not exist
|
||||||
|
${quorum_db_command} -e "${table_exists_query}" >/dev/null 2>&1
|
||||||
|
if [[ ${?} -ne 0 ]]; then
|
||||||
|
${quorum_db_command} -e "${create_table_query}" >/dev/null 2>&1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run the algorithm
|
||||||
|
${quorum_alive_command} >/dev/null 2>&1
|
||||||
|
alive_status=${?}
|
||||||
|
if [[ ${alive_status} -eq 0 ]]; then
|
||||||
|
${quorum_db_command} -e "${election_query}" >/dev/null 2>&1
|
||||||
|
fi
|
||||||
|
leader_status=$(${quorum_db_command} -e "${is_leader_query}")
|
||||||
|
leader=$(${quorum_db_command} -e "${who_is_leader_query}")
|
||||||
|
|
||||||
|
QUORUM_ALIVE_STATUS=${alive_status} QUORUM_LEADER=${leader} QUORUM_LEADER_STATUS=${leader_status} eval ${quorum_notify_command}
|
||||||
|
exit ${alive_status}
|
8
templates/mariadb/rename-docker.sh
Normal file
8
templates/mariadb/rename-docker.sh
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
#!/bin/bash
|
||||||
|
touch /etc/no-automatic-cosmos
|
||||||
|
for i in $(systemctl list-unit-files | grep sunet-docker | grep enabled | awk '{print $1}'); do systemctl disable --now ${i}; done
|
||||||
|
for i in $(systemctl list-unit-files | grep mariadb | grep disabled | awk '{print $1}'); do rm /etc/systemd/system/${i}; done
|
||||||
|
rm -r /opt/docker*mariadb
|
||||||
|
run-cosmos -v
|
||||||
|
docker ps
|
||||||
|
rm /etc/no-automatic-cosmos
|
9
templates/mariadb/run_manual_backup_dump.erb.sh
Normal file
9
templates/mariadb/run_manual_backup_dump.erb.sh
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
dump_name="mariadb-dump-$(date +%Y-%m-%dT%H.%M.%S).sql.gz"
|
||||||
|
dump_args="--all-databases --single-transaction --master-data=2 -u root -p${MYSQL_ROOT_PASSWORD}"
|
||||||
|
|
||||||
|
# Replication slave priv was not in backup user creation script previously
|
||||||
|
mysql -u root -p${MYSQL_ROOT_PASSWORD} -e "GRANT REPLICATION SLAVE ON *.* TO 'backup'@'%'"
|
||||||
|
echo "Running backup as root user"
|
||||||
|
mysqldump ${dump_args} | gzip >"/backups/${dump_name}"
|
3
templates/mariadb/size-test.erb
Normal file
3
templates/mariadb/size-test.erb
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
docker exec mariadb_db_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "show status like 'wsrep_cluster_size'"
|
3
templates/mariadb/status-test.erb
Normal file
3
templates/mariadb/status-test.erb
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
docker exec mariadb_db_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "show status like 'wsrep_cluster_status'"
|
2
templates/mariadb_backup/02-backup_user.sql.erb
Normal file
2
templates/mariadb_backup/02-backup_user.sql.erb
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
CREATE USER 'backup'@'localhost' IDENTIFIED BY '<%= @backup_password %>';
|
||||||
|
GRANT SELECT, INSERT, CREATE, RELOAD, PROCESS, SUPER, LOCK TABLES, REPLICATION SLAVE, REPLICATION CLIENT, SHOW VIEW, EVENT, CREATE TABLESPACE ON *.* TO 'backup'@'localhost';
|
10
templates/mariadb_backup/check_replication.erb
Executable file
10
templates/mariadb_backup/check_replication.erb
Executable file
|
@ -0,0 +1,10 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
result="$(docker exec mariadbbackup_mariadb_backup_1 mysql -p<%= @mysql_root_password %> -BN -e 'show status like "slave_running"')"
|
||||||
|
if [[ "${result}" == "Slave_running ON" ]]; then
|
||||||
|
echo "OK: Replica running"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
echo "CRITICAL: Replica not running"
|
||||||
|
exit 2
|
||||||
|
fi
|
14
templates/mariadb_backup/credentials.cnf.erb
Normal file
14
templates/mariadb_backup/credentials.cnf.erb
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
[mysqld]
|
||||||
|
wsrep_sst_auth = "backup:<%= @backup_password %>"
|
||||||
|
|
||||||
|
[mariabackup]
|
||||||
|
user = backup
|
||||||
|
password = '<%= @backup_password %>'
|
||||||
|
|
||||||
|
[mysqldump]
|
||||||
|
user = backup
|
||||||
|
password = '<%= @backup_password %>'
|
||||||
|
|
||||||
|
[mysql_backup]
|
||||||
|
user = backup
|
||||||
|
password = '<%= @backup_password %>'
|
17
templates/mariadb_backup/do_backup.erb.sh
Normal file
17
templates/mariadb_backup/do_backup.erb.sh
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
#!/bin/bash
|
||||||
|
customer=${1}
|
||||||
|
stream_name="${customer}mariadb-stream-$(date +%Y-%m-%dT%H.%M.%S).gz"
|
||||||
|
dump_name="${customer}mariadb-dump-$(date +%Y-%m-%dT%H.%M.%S).sql.gz"
|
||||||
|
backup_dir="/backups/$(date +%Y/%m/%d)"
|
||||||
|
mkdir -p "${backup_dir}"
|
||||||
|
|
||||||
|
if [[ -z ${customer} ]]; then
|
||||||
|
buopts="--slave-info --safe-slave-backup"
|
||||||
|
dumpopts="--dump-slave"
|
||||||
|
mysql -p${MYSQL_ROOT_PASSWORD} -e "stop slave"
|
||||||
|
fi
|
||||||
|
mariadb-backup --backup ${buopts} -u root -p${MYSQL_ROOT_PASSWORD} --stream=xbstream | gzip >"${backup_dir}/${stream_name}"
|
||||||
|
mysqldump --all-databases --single-transaction ${dumpopts} -u root -p${MYSQL_ROOT_PASSWORD} | gzip >"${backup_dir}/${dump_name}"
|
||||||
|
if [[ -z ${customer} ]]; then
|
||||||
|
mysql -p${MYSQL_ROOT_PASSWORD} -e "start slave"
|
||||||
|
fi
|
|
@ -0,0 +1,20 @@
|
||||||
|
version: '3.2'
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
mariadb_backup:
|
||||||
|
image: docker.sunet.se/drive/mariadb
|
||||||
|
dns:
|
||||||
|
- 89.46.20.75
|
||||||
|
- 89.46.21.29
|
||||||
|
- 89.32.32.32
|
||||||
|
environment:
|
||||||
|
- MYSQL_ROOT_PASSWORD=<%= @mysql_root_password %>
|
||||||
|
volumes:
|
||||||
|
- /opt/mariadb_backup/backups:/backups
|
||||||
|
- /opt/mariadb_backup/conf:/etc/mysql/mariadb.conf.d
|
||||||
|
- /opt/mariadb_backup/datadir:/var/lib/mysql
|
||||||
|
- /opt/mariadb_backup/init:/docker-entrypoint-initdb.d
|
||||||
|
- /opt/mariadb_backup/start_replica_from_init.sh:/start_replica_from_init.sh
|
||||||
|
- /opt/mariadb_backup/do_backup.sh:/do_backup.sh
|
||||||
|
restart: always
|
33
templates/mariadb_backup/find_disabled_sharing.erb.sh
Normal file
33
templates/mariadb_backup/find_disabled_sharing.erb.sh
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
container=${1}
|
||||||
|
customer=${2}
|
||||||
|
if [[ -z ${container} ]]; then
|
||||||
|
container='mariadbbackup_mariadb_backup_1'
|
||||||
|
fi
|
||||||
|
if [[ -z ${customer} ]]; then
|
||||||
|
location='<%= @location %>'
|
||||||
|
fi
|
||||||
|
|
||||||
|
dexec="docker exec ${container}"
|
||||||
|
|
||||||
|
password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}')
|
||||||
|
|
||||||
|
mysql="${dexec} mysql -p${password}"
|
||||||
|
|
||||||
|
project="statistics"
|
||||||
|
bucket="drive-server-coms"
|
||||||
|
base_dir="${project}:${bucket}"
|
||||||
|
mountpoint="/opt/statistics"
|
||||||
|
customer_dir="${mountpoint}/${location}"
|
||||||
|
mkdir -p "${customer_dir}"
|
||||||
|
rclone mkdir "${base_dir}/${location}"
|
||||||
|
|
||||||
|
${mysql} -NB -e \
|
||||||
|
"select JSON_OBJECT('option_id',oc_external_options.option_id,'mount_id',oc_external_options.mount_id,'sharing_enabled',oc_external_options.value,'mount_point',oc_external_mounts.mount_point,'storage_backend',oc_external_mounts.storage_backend,'auth_backend',oc_external_mounts.auth_backend,'priority',oc_external_mounts.priority,'type',oc_external_mounts.type,'bucket',oc_external_config.value) as data from oc_external_options join oc_external_mounts on oc_external_options.mount_id=oc_external_mounts.mount_id and oc_external_options.key = 'enable_sharing' and oc_external_options.value = 'false' join oc_external_config on oc_external_config.mount_id=oc_external_mounts.mount_id where oc_external_config.key='bucket'" \
|
||||||
|
nextcloud | jq -s . >"${customer_dir}/sharing_disabled.json"
|
||||||
|
status=${?}
|
||||||
|
if [[ ${status} == 0 ]]; then
|
||||||
|
rclone move "${customer_dir}/sharing_disabled.json" "${base_dir}/${location}/"
|
||||||
|
fi
|
||||||
|
exit ${status}
|
41
templates/mariadb_backup/listusers.erb.sh
Normal file
41
templates/mariadb_backup/listusers.erb.sh
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
container=${1}
|
||||||
|
customer=${2}
|
||||||
|
if [[ -z ${container} ]]; then
|
||||||
|
container='mariadbbackup_mariadb_backup_1'
|
||||||
|
fi
|
||||||
|
if [[ -z ${customer} ]]; then
|
||||||
|
location='<%= @location %>'
|
||||||
|
fi
|
||||||
|
|
||||||
|
dexec="docker exec ${container}"
|
||||||
|
|
||||||
|
password=$(${dexec} env | grep MYSQL_ROOT_PASSWORD | awk -F '=' '{print $2}')
|
||||||
|
|
||||||
|
mysql="${dexec} mysql -p${password}"
|
||||||
|
|
||||||
|
users="$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_global_scale_users')"
|
||||||
|
users="${users}
|
||||||
|
$(${mysql} -NB -e 'select uid,displayname from nextcloud.oc_users')"
|
||||||
|
|
||||||
|
project="statistics"
|
||||||
|
bucket="drive-server-coms"
|
||||||
|
base_dir="${project}:${bucket}"
|
||||||
|
mountpoint="/opt/statistics"
|
||||||
|
customer_dir="${mountpoint}/${location}"
|
||||||
|
mkdir -p "${customer_dir}"
|
||||||
|
rclone mkdir "${base_dir}/${location}"
|
||||||
|
|
||||||
|
echo "${users}" | awk 'BEGIN{print "{"} {print t "\""$1"\": \""$2"\""} {t=","} END{print "}"}' | jq . >"${customer_dir}/users.json"
|
||||||
|
status=0
|
||||||
|
if ! jq . "${customer_dir}/users.json" &>/dev/null; then
|
||||||
|
status=1
|
||||||
|
fi
|
||||||
|
if [[ ${status} -eq 0 ]]; then
|
||||||
|
# something is wrong if we cant copy the file in 30 seconds, so we should note that
|
||||||
|
if ! timeout 30s rclone copy --use-cookies "${customer_dir}/users.json" "${base_dir}/${location}/"; then
|
||||||
|
status=1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
exit ${status}
|
37
templates/mariadb_backup/my.cnf.erb
Normal file
37
templates/mariadb_backup/my.cnf.erb
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
[mysqld]
|
||||||
|
|
||||||
|
# General
|
||||||
|
binlog_format = ROW
|
||||||
|
character_set_server = utf8mb4
|
||||||
|
collation_server = utf8mb4_general_ci
|
||||||
|
default_storage_engine = InnoDB
|
||||||
|
local_infile = 0
|
||||||
|
max_allowed_packet = 256M
|
||||||
|
max_connections = 750
|
||||||
|
skip_external_locking = ON
|
||||||
|
skip_name_resolve = ON
|
||||||
|
skip_slave_start = OFF
|
||||||
|
|
||||||
|
# Binary logging and replication
|
||||||
|
expire_logs_days = 7
|
||||||
|
gtid_domain_id = 1000
|
||||||
|
gtid_ignore_duplicates = ON
|
||||||
|
gtid_strict_mode = OFF
|
||||||
|
log_bin = binlog
|
||||||
|
log_slave_updates = ON
|
||||||
|
relay_log = 'relay-log'
|
||||||
|
server_id = 2000
|
||||||
|
|
||||||
|
# Innodb
|
||||||
|
innodb_autoinc_lock_mode = 2
|
||||||
|
innodb_buffer_pool_size = 4G # 60-75% of total RAM
|
||||||
|
innodb_file_per_table = 1
|
||||||
|
innodb_flush_log_at_trx_commit = 0
|
||||||
|
innodb_flush_method = O_DIRECT
|
||||||
|
innodb_io_capacity = 2000 # IOPS dependent
|
||||||
|
innodb_io_capacity_max = 4000 # IOPS dependent
|
||||||
|
innodb_log_file_size = 512M
|
||||||
|
innodb_read_io_threads = 4 # CPU dependent
|
||||||
|
innodb_rollback_on_timeout = 1
|
||||||
|
innodb_write_io_threads = 4 # CPU dependent
|
||||||
|
transaction_isolation = 'READ-COMMITTED'
|
6
templates/mariadb_backup/rclone.conf.erb
Normal file
6
templates/mariadb_backup/rclone.conf.erb
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
[statistics]
|
||||||
|
type = webdav
|
||||||
|
url = https://sunet.drive.sunet.se/remote.php/dav/files/_script/
|
||||||
|
vendor = nextcloud
|
||||||
|
user = _script
|
||||||
|
pass = <%= @statistics_secret %>
|
17
templates/mariadb_backup/start_replica_from_init.erb.sh
Normal file
17
templates/mariadb_backup/start_replica_from_init.erb.sh
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
mysql="mysql -u root -p${MYSQL_ROOT_PASSWORD}"
|
||||||
|
init_file='/backups/init.sql.gz'
|
||||||
|
if [[ -f ${init_file} ]]; then
|
||||||
|
${mysql} -e "STOP SLAVE;RESET SLAVE;"
|
||||||
|
master_command=$(zgrep 'CHANGE MASTER TO MASTER_LOG_FILE' ${init_file} | sed -e 's/^-- //' -e 's/;$//')
|
||||||
|
master_command="${master_command}, MASTER_HOST='<%= @first_db %>', MASTER_USER='backup'"
|
||||||
|
master_command="${master_command}, MASTER_PASSWORD='<%= @backup_password%>', MASTER_SSL=1"
|
||||||
|
master_command="${master_command}, MASTER_CONNECT_RETRY=20"
|
||||||
|
zcat ${init_file} | ${mysql}
|
||||||
|
${mysql} -e "${master_command}"
|
||||||
|
${mysql} -e "START SLAVE"
|
||||||
|
sleep 3s
|
||||||
|
${mysql} -e "SHOW SLAVE STATUS\G"
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 0
|
3
templates/mariadb_backup/status-test.erb
Normal file
3
templates/mariadb_backup/status-test.erb
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
docker exec mariadbbackup_mariadb_backup_1 mysql -u root -p'<%= @mysql_root_password %>' -N -B -e "show status like 'Slave_running'"
|
24
templates/monitor/000-default.conf.erb
Normal file
24
templates/monitor/000-default.conf.erb
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
<VirtualHost *:80>
|
||||||
|
RewriteEngine On
|
||||||
|
RewriteCond %{HTTPS} off
|
||||||
|
RewriteCond %{REQUEST_URI} !^\/\.well-known\/.*$
|
||||||
|
RewriteRule ^/?(.*) https://%{SERVER_NAME}/$1 [R,L]
|
||||||
|
ProxyPass /.well-known/acme-challenge http://acme-c.sunet.se/.well-known/acme-challenge/
|
||||||
|
ProxyPassReverse /.well-known/acme-challenge http://acme-c.sunet.se/.well-known/acme-challenge/
|
||||||
|
<IfModule mod_headers.c>
|
||||||
|
Header always set Strict-Transport-Security "max-age=15768000; includeSubDomains; preload"
|
||||||
|
Header always set X-Frame-Options "SAMEORIGIN"
|
||||||
|
Header always set X-XSS-Protection "1; mode=block"
|
||||||
|
Header always set X-Content-Type-Options "nosniff"
|
||||||
|
Header always set Content-Security-Policy "default-src https://<%= @certname %>"
|
||||||
|
</IfModule>
|
||||||
|
|
||||||
|
ServerAdmin webmaster@localhost
|
||||||
|
DocumentRoot /var/www/html
|
||||||
|
|
||||||
|
ErrorLog ${APACHE_LOG_DIR}/error.log
|
||||||
|
CustomLog ${APACHE_LOG_DIR}/access.log combined
|
||||||
|
|
||||||
|
</VirtualHost>
|
||||||
|
|
||||||
|
# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
|
22
templates/monitor/check_nrpe.cfg.erb
Normal file
22
templates/monitor/check_nrpe.cfg.erb
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
# this command runs a program $ARG1$ with arguments $ARG2$
|
||||||
|
# this file is managed with puppet.
|
||||||
|
# this command runs a program $ARG1$ with no arguments
|
||||||
|
define command {
|
||||||
|
command_name check_nrpe_1arg
|
||||||
|
command_line /usr/lib/nagios/plugins/check_nrpe -H $HOSTADDRESS$ -c $ARG1$
|
||||||
|
}
|
||||||
|
|
||||||
|
# this command runs a program $ARG1$ with no arguments
|
||||||
|
define command {
|
||||||
|
command_name check_nrpe_1arg_to30
|
||||||
|
command_line /usr/lib/nagios/plugins/check_nrpe -t 30 -H $HOSTADDRESS$ -c $ARG1$
|
||||||
|
}
|
||||||
|
# this command runs a program $ARG1$ with no arguments
|
||||||
|
define command {
|
||||||
|
command_name check_nrpe_1arg_to300
|
||||||
|
command_line /usr/lib/nagios/plugins/check_nrpe -t 300 -H $HOSTADDRESS$ -c $ARG1$
|
||||||
|
}
|
||||||
|
define command {
|
||||||
|
command_name check_nrpe_1arg_to600
|
||||||
|
command_line /usr/lib/nagios/plugins/check_nrpe -t 600 -H $HOSTADDRESS$ -c $ARG1$
|
||||||
|
}
|
229
templates/monitor/commands.cfg.erb
Normal file
229
templates/monitor/commands.cfg.erb
Normal file
|
@ -0,0 +1,229 @@
|
||||||
|
###############################################################################
|
||||||
|
# COMMANDS.CFG - SAMPLE COMMAND DEFINITIONS FOR NAEMON 1.3.0
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# NOTES: This config file provides you with some example command definitions
|
||||||
|
# that you can reference in host, service, and contact definitions.
|
||||||
|
#
|
||||||
|
# You don't need to keep commands in a separate file from your other
|
||||||
|
# object definitions. This has been done just to make things easier to
|
||||||
|
# understand.
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
###############################
|
||||||
|
# Slack notifications
|
||||||
|
###############################
|
||||||
|
|
||||||
|
# 'notify-service-by-slack' command definition
|
||||||
|
define command {
|
||||||
|
command_name notify-service-by-slack
|
||||||
|
command_line SERVICESTATE="$SERVICESTATE$" \
|
||||||
|
HOSTNAME="$HOSTNAME$" \
|
||||||
|
SERVICEDISPLAYNAME="$SERVICEDISPLAYNAME$" \
|
||||||
|
SERVICEOUTPUT="$SERVICEOUTPUT$" \
|
||||||
|
HOSTNAME="$HOSTNAME$" \
|
||||||
|
/usr/local/bin/slack_nagios.sh > /tmp/slack.log 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
# 'notify-host-by-slack' command definition
|
||||||
|
define command {
|
||||||
|
command_name notify-host-by-slack
|
||||||
|
command_line SERVICESTATE="$SERVICESTATE$" \
|
||||||
|
HOSTNAME="$HOSTNAME$" \
|
||||||
|
SERVICEDISPLAYNAME="$SERVICEDISPLAYNAME$" \
|
||||||
|
SERVICEOUTPUT="$SERVICEOUTPUT$" \
|
||||||
|
HOSTNAME="$HOSTNAME$" \
|
||||||
|
/usr/local/bin/slack_nagios.sh > /tmp/slack.log 2>&1
|
||||||
|
}
|
||||||
|
################################################################################
|
||||||
|
#
|
||||||
|
# SAMPLE NOTIFICATION COMMANDS
|
||||||
|
#
|
||||||
|
# These are some example notification commands. They may or may not work on
|
||||||
|
# your system without modification. As an example, some systems will require
|
||||||
|
# you to use "/usr/bin/mailx" instead of "/usr/bin/mail" in the commands below.
|
||||||
|
#
|
||||||
|
################################################################################
|
||||||
|
# 'notify-host-by-email' command definition
|
||||||
|
define command {
|
||||||
|
command_name notify-host-by-email
|
||||||
|
command_line /usr/bin/printf "%b" "***** Naemon *****\n\nNotification Type: $NOTIFICATIONTYPE$\nHost: $HOSTNAME$\nState: $HOSTSTATE$\nAddress: $HOSTADDRESS$\nInfo: $HOSTOUTPUT$\n\nDate/Time: $LONGDATETIME$\n" \
|
||||||
|
| /usr/bin/mail \
|
||||||
|
-s "** $NOTIFICATIONTYPE$ Host Alert: $HOSTNAME$ is $HOSTSTATE$ **" $CONTACTEMAIL$
|
||||||
|
}
|
||||||
|
|
||||||
|
# 'notify-service-by-email' command definition
|
||||||
|
define command {
|
||||||
|
command_name notify-service-by-email
|
||||||
|
command_line /usr/bin/printf "%b" "***** Naemon *****\n\nNotification Type: $NOTIFICATIONTYPE$\n\nService: $SERVICEDESC$\nHost: $HOSTALIAS$\nAddress: $HOSTADDRESS$\nState: $SERVICESTATE$\n\nDate/Time: $LONGDATETIME$\n\nAdditional Info:\n\n$SERVICEOUTPUT$\n" \
|
||||||
|
| /usr/bin/mail \
|
||||||
|
-s "** $NOTIFICATIONTYPE$ Service Alert: $HOSTALIAS$/$SERVICEDESC$ is $SERVICESTATE$ **" $CONTACTEMAIL$
|
||||||
|
}
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
#
|
||||||
|
# SAMPLE HOST CHECK COMMANDS
|
||||||
|
#
|
||||||
|
################################################################################
|
||||||
|
# This command checks to see if a host is "alive" by pinging it
|
||||||
|
# The check must result in a 100% packet loss or 5 second (5000ms) round trip
|
||||||
|
# average time to produce a critical error.
|
||||||
|
# Note: Five ICMP echo packets are sent (determined by the '-p 5' argument)
|
||||||
|
# 'check-host-alive' command definition
|
||||||
|
#define command {
|
||||||
|
# command_name check-host-alive
|
||||||
|
# command_line $USER1$/check_ping -H $HOSTADDRESS$ -w 3000.0,80% -c 5000.0,100% -p 5
|
||||||
|
#}
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
#
|
||||||
|
# SAMPLE SERVICE CHECK COMMANDS
|
||||||
|
#
|
||||||
|
# These are some example service check commands. They may or may not work on
|
||||||
|
# your system, as they must be modified for your plugins. See the HTML
|
||||||
|
# documentation on the plugins for examples of how to configure command definitions.
|
||||||
|
#
|
||||||
|
# NOTE: The following 'check_local_...' functions are designed to monitor
|
||||||
|
# various metrics on the host that Naemon is running on (i.e. this one).
|
||||||
|
################################################################################
|
||||||
|
# 'check_local_disk' command definition
|
||||||
|
define command {
|
||||||
|
command_name check_local_disk
|
||||||
|
command_line $USER1$/check_disk -w $ARG1$ -c $ARG2$ -p $ARG3$
|
||||||
|
}
|
||||||
|
|
||||||
|
# 'check_local_load' command definition
|
||||||
|
define command {
|
||||||
|
command_name check_local_load
|
||||||
|
command_line $USER1$/check_load -w $ARG1$ -c $ARG2$
|
||||||
|
}
|
||||||
|
|
||||||
|
# 'check_local_procs' command definition
|
||||||
|
define command {
|
||||||
|
command_name check_local_procs
|
||||||
|
command_line $USER1$/check_procs -w $ARG1$ -c $ARG2$ -s $ARG3$
|
||||||
|
}
|
||||||
|
|
||||||
|
# 'check_local_users' command definition
|
||||||
|
define command {
|
||||||
|
command_name check_local_users
|
||||||
|
command_line $USER1$/check_users -w $ARG1$ -c $ARG2$
|
||||||
|
}
|
||||||
|
|
||||||
|
# 'check_local_swap' command definition
|
||||||
|
define command {
|
||||||
|
command_name check_local_swap
|
||||||
|
command_line $USER1$/check_swap -w $ARG1$ -c $ARG2$
|
||||||
|
}
|
||||||
|
|
||||||
|
# 'check_local_mrtgtraf' command definition
|
||||||
|
define command {
|
||||||
|
command_name check_local_mrtgtraf
|
||||||
|
command_line $USER1$/check_mrtgtraf -F $ARG1$ -a $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$
|
||||||
|
}
|
||||||
|
#
|
||||||
|
#################################################################################
|
||||||
|
## NOTE: The following 'check_...' commands are used to monitor services on
|
||||||
|
## both local and remote hosts.
|
||||||
|
#################################################################################
|
||||||
|
## 'check_ftp' command definition
|
||||||
|
#define command {
|
||||||
|
# command_name check_ftp
|
||||||
|
# command_line $USER1$/check_ftp -H $HOSTADDRESS$ $ARG1$
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## 'check_hpjd' command definition
|
||||||
|
#define command {
|
||||||
|
# command_name check_hpjd
|
||||||
|
# command_line $USER1$/check_hpjd -H $HOSTADDRESS$ $ARG1$
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
# 'check_snmp' command definition
|
||||||
|
define command {
|
||||||
|
command_name check_snmp
|
||||||
|
command_line $USER1$/check_snmp -H $HOSTADDRESS$ $ARG1$
|
||||||
|
}
|
||||||
|
#
|
||||||
|
## 'check_http' command definition
|
||||||
|
#define command {
|
||||||
|
# command_name check_http
|
||||||
|
# command_line $USER1$/check_http -I $HOSTADDRESS$ $ARG1$
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## 'check_ssh' command definition
|
||||||
|
#define command {
|
||||||
|
# command_name check_ssh
|
||||||
|
# command_line $USER1$/check_ssh $ARG1$ $HOSTADDRESS$
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## 'check_dhcp' command definition
|
||||||
|
#define command {
|
||||||
|
# command_name check_dhcp
|
||||||
|
# command_line $USER1$/check_dhcp $ARG1$
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## 'check_ping' command definition
|
||||||
|
#define command {
|
||||||
|
# command_name check_ping
|
||||||
|
# command_line $USER1$/check_ping -H $HOSTADDRESS$ -w $ARG1$ -c $ARG2$ -p 5
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## 'check_pop' command definition
|
||||||
|
#define command {
|
||||||
|
# command_name check_pop
|
||||||
|
# command_line $USER1$/check_pop -H $HOSTADDRESS$ $ARG1$
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## 'check_imap' command definition
|
||||||
|
#define command {
|
||||||
|
# command_name check_imap
|
||||||
|
# command_line $USER1$/check_imap -H $HOSTADDRESS$ $ARG1$
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## 'check_smtp' command definition
|
||||||
|
#define command {
|
||||||
|
# command_name check_smtp
|
||||||
|
# command_line $USER1$/check_smtp -H $HOSTADDRESS$ $ARG1$
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## 'check_tcp' command definition
|
||||||
|
#define command {
|
||||||
|
# command_name check_tcp
|
||||||
|
# command_line $USER1$/check_tcp -H $HOSTADDRESS$ -p $ARG1$ $ARG2$
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## 'check_udp' command definition
|
||||||
|
#define command {
|
||||||
|
# command_name check_udp
|
||||||
|
# command_line $USER1$/check_udp -H $HOSTADDRESS$ -p $ARG1$ $ARG2$
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## 'check_nt' command definition
|
||||||
|
#define command {
|
||||||
|
# command_name check_nt
|
||||||
|
# command_line $USER1$/check_nt -H $HOSTADDRESS$ -p 12489 -v $ARG1$ $ARG2$
|
||||||
|
#}
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
#
|
||||||
|
# SAMPLE PERFORMANCE DATA COMMANDS
|
||||||
|
#
|
||||||
|
# These are sample performance data commands that can be used to send performance
|
||||||
|
# data output to two text files (one for hosts, another for services). If you
|
||||||
|
# plan on simply writing performance data out to a file, consider using the
|
||||||
|
# host_perfdata_file and service_perfdata_file options in the main config file.
|
||||||
|
#
|
||||||
|
################################################################################
|
||||||
|
# 'process-host-perfdata' command definition
|
||||||
|
define command {
|
||||||
|
command_name process-host-perfdata
|
||||||
|
command_line /usr/bin/printf "%b" "$LASTHOSTCHECK$\t$HOSTNAME$\t$HOSTSTATE$\t$HOSTATTEMPT$\t$HOSTSTATETYPE$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$\n" \
|
||||||
|
>> /var/lib/naemon/host-perfdata.out
|
||||||
|
}
|
||||||
|
|
||||||
|
# 'process-service-perfdata' command definition
|
||||||
|
define command {
|
||||||
|
command_name process-service-perfdata
|
||||||
|
command_line /usr/bin/printf "%b" "$LASTSERVICECHECK$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICESTATE$\t$SERVICEATTEMPT$\t$SERVICESTATETYPE$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$\n" \
|
||||||
|
>> /var/lib/naemon/service-perfdata.out
|
||||||
|
}
|
54
templates/monitor/contacts.cfg.erb
Normal file
54
templates/monitor/contacts.cfg.erb
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
###############################################################################
|
||||||
|
# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# NOTES: This config file provides you with some example contact and contact
|
||||||
|
# group definitions that you can reference in host and service
|
||||||
|
# definitions.
|
||||||
|
#
|
||||||
|
# You don't need to keep these definitions in a separate file from your
|
||||||
|
# other object definitions. This has been done just to make things
|
||||||
|
# easier to understand.
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
###############################################################################
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# CONTACTS
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
###############################################################################
|
||||||
|
# Just one contact defined by default - the Naemon admin (that's you)
|
||||||
|
# This contact definition inherits a lot of default values from the 'generic-contact'
|
||||||
|
# template which is defined elsewhere.
|
||||||
|
define contact {
|
||||||
|
contact_name nagiosadmin ; Short name of user
|
||||||
|
alias Naemon Admin ; Full name of user
|
||||||
|
use generic-contact ; Inherit default values from generic-contact template (defined above)
|
||||||
|
email naemon@localhost ; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
|
||||||
|
}
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# CONTACT GROUPS
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
###############################################################################
|
||||||
|
# We only have one contact in this simple configuration file, so there is
|
||||||
|
# no need to create more than one contact group.
|
||||||
|
define contactgroup {
|
||||||
|
contactgroup_name admins
|
||||||
|
alias Naemon Administrators
|
||||||
|
members nagiosadmin
|
||||||
|
}
|
||||||
|
define contact {
|
||||||
|
contact_name slack
|
||||||
|
alias Slack
|
||||||
|
service_notification_period 24x7
|
||||||
|
host_notification_period 24x7
|
||||||
|
service_notification_options w,u,c,r
|
||||||
|
host_notification_options d,r
|
||||||
|
service_notification_commands notify-service-by-slack
|
||||||
|
host_notification_commands notify-host-by-slack
|
||||||
|
}
|
14
templates/monitor/grafana.conf.erb
Normal file
14
templates/monitor/grafana.conf.erb
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
<Location /grafana>
|
||||||
|
ProxyPass http://127.0.0.1:3000 retry=0 disablereuse=On
|
||||||
|
ProxyPassReverse http://127.0.0.1:3000/grafana
|
||||||
|
RewriteEngine On
|
||||||
|
RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER},NS]
|
||||||
|
SetEnvIf Request_Protocol ^HTTPS.* IS_HTTPS=1
|
||||||
|
SetEnvIf Authorization "^.+$" IS_BASIC_AUTH=1
|
||||||
|
# without thruk cookie auth, use the proxy user from the rewrite rule above
|
||||||
|
RequestHeader set X-WEBAUTH-USER "%{PROXY_USER}s" env=IS_HTTPS
|
||||||
|
RequestHeader set X-WEBAUTH-USER "%{PROXY_USER}e" env=!IS_HTTPS
|
||||||
|
# when thruk cookie auth is used, fallback to remote user directly
|
||||||
|
RequestHeader set X-WEBAUTH-USER "%{REMOTE_USER}e" env=!IS_BASIC_AUTH
|
||||||
|
RequestHeader unset Authorization
|
||||||
|
</Location>
|
42
templates/monitor/grafana.ini.erb
Normal file
42
templates/monitor/grafana.ini.erb
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
# This file is managed by puppet
|
||||||
|
[paths]
|
||||||
|
logs = /var/log/grafana
|
||||||
|
|
||||||
|
[log]
|
||||||
|
mode = file
|
||||||
|
|
||||||
|
[log.file]
|
||||||
|
level = Info
|
||||||
|
daily_rotate = true
|
||||||
|
|
||||||
|
[server]
|
||||||
|
root_url = %(protocol)s://%(domain)s:%(http_port)s/grafana/
|
||||||
|
serve_from_sub_path = true
|
||||||
|
|
||||||
|
[snapshots]
|
||||||
|
external_enabled = false
|
||||||
|
|
||||||
|
[security]
|
||||||
|
disable_gravatar = true
|
||||||
|
|
||||||
|
[users]
|
||||||
|
allow_sign_up = false
|
||||||
|
default_theme = light
|
||||||
|
|
||||||
|
[auth.basic]
|
||||||
|
enabled = false
|
||||||
|
|
||||||
|
[auth.proxy]
|
||||||
|
enabled = true
|
||||||
|
header_name = X-WEBAUTH-USER
|
||||||
|
header_property = username
|
||||||
|
auto_sign_up = true
|
||||||
|
sync_ttl = 60
|
||||||
|
whitelist = #89.46.21.220, 001:6b0:6c::2de
|
||||||
|
headers = Email:X-User-Email, Name:X-User-Name
|
||||||
|
headers_encoded = false
|
||||||
|
enable_login_token = false
|
||||||
|
|
||||||
|
[alerting]
|
||||||
|
enabled = true
|
||||||
|
execute_alerts = true
|
22
templates/monitor/histou.cfg.erb
Normal file
22
templates/monitor/histou.cfg.erb
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
define command {
|
||||||
|
command_name process-host-perfdata-file-nagflux
|
||||||
|
command_line /bin/mv /var/naemon/host-perfdata /var/nagflux/perfdata/$TIMET$.perfdata.host
|
||||||
|
}
|
||||||
|
|
||||||
|
define command {
|
||||||
|
command_name process-service-perfdata-file-nagflux
|
||||||
|
command_line /bin/mv /var/naemon/service-perfdata /var/nagflux/perfdata/$TIMET$.perfdata.service
|
||||||
|
}
|
||||||
|
|
||||||
|
define host {
|
||||||
|
name host-grafana
|
||||||
|
action_url https:<%= @monitorhost %>/grafana/dashboard/script/histou.js?host=$HOSTNAME$&theme=light&annotations=true
|
||||||
|
notes_url https:<%= @monitorhost %>/dokuwiki/doku.php?id=inventory:$HOSTNAME$
|
||||||
|
register 0
|
||||||
|
}
|
||||||
|
|
||||||
|
define service {
|
||||||
|
name service-grafana
|
||||||
|
action_url https:<%= @monitorhost %>/grafana/dashboard/script/histou.js?host=$HOSTNAME$&service=$SERVICEDESC$&theme=light&annotations=true
|
||||||
|
register 0
|
||||||
|
}
|
24
templates/monitor/histou.ini.erb
Normal file
24
templates/monitor/histou.ini.erb
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
[general]
|
||||||
|
socketTimeout = 10
|
||||||
|
phpCommand = "php"
|
||||||
|
;leave tmpFolder empty to use default php folder
|
||||||
|
tmpFolder = ""
|
||||||
|
specialChar = "&"
|
||||||
|
; set the backend: influxdb, elasticsearch
|
||||||
|
databaseType = "influxdb"
|
||||||
|
forecastDatasourceName = "nagflux_forecast"
|
||||||
|
; disable the Panel Titel globally
|
||||||
|
disablePanelTitle = false
|
||||||
|
|
||||||
|
[folder]
|
||||||
|
defaultTemplateFolder = "templates/default/"
|
||||||
|
customTemplateFolder = "templates/custom/"
|
||||||
|
forecastTemplateFolder = "forecasts/"
|
||||||
|
|
||||||
|
[influxdb]
|
||||||
|
url = "http://127.0.0.1:8086/query?db=nagflux"
|
||||||
|
hostcheckAlias = "hostcheck"
|
||||||
|
|
||||||
|
[elasticsearch]
|
||||||
|
url = "http://127.0.0.1:8086/nagflux"
|
||||||
|
hostcheckAlias = "hostcheck"
|
223
templates/monitor/histou.js.erb
Normal file
223
templates/monitor/histou.js.erb
Normal file
|
@ -0,0 +1,223 @@
|
||||||
|
/* global _ */
|
||||||
|
|
||||||
|
// accessible variables in this scope
|
||||||
|
var window, document, ARGS, $, jQuery, moment, kbn;
|
||||||
|
|
||||||
|
//parse arguments
|
||||||
|
parseArgs()
|
||||||
|
|
||||||
|
|
||||||
|
return function (callback) {
|
||||||
|
if (window.location.href.search('/dashboard-solo/') != -1) {
|
||||||
|
document.documentElement.style.background = '#FFF';
|
||||||
|
}
|
||||||
|
|
||||||
|
var url = 'https://<%= @monitorhost%>/histou/';
|
||||||
|
var configUrl = url+'index.php?host='+host+'&service='+service+'&height='+height+'&legend='+legend+debug+disablePanelTitle+disablePerfdataLookup+specificTemplate+'&annotations='+annotations;
|
||||||
|
|
||||||
|
var flotAddons = url + 'flotAddons.js';
|
||||||
|
$.getScript(flotAddons, function (){});
|
||||||
|
if (!_.isUndefined(ARGS.customCSSFile)) {
|
||||||
|
$('head').append('<link rel="stylesheet" href="' + ARGS.customCSSFile + '" type="text/css" />');
|
||||||
|
}
|
||||||
|
cssLoaded = false;
|
||||||
|
jQuery('body').on('DOMNodeInserted', 'DIV.drop-popover', function (e) {
|
||||||
|
var cssUrl = url+'lightbox/css/light.css'
|
||||||
|
if (!cssLoaded) {
|
||||||
|
$('head').append('<link rel="stylesheet" href="'+url+'lightbox/css/light.css" type="text/css" />');
|
||||||
|
$.getScript(url+'lightbox/js/light.js', function(){});
|
||||||
|
cssLoaded = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
var box = $( e.currentTarget ).find( "DIV.sakuli-popup" );
|
||||||
|
if (box.length > 0 ){
|
||||||
|
$(box[0]).attr('class', 'sakuli-image');
|
||||||
|
var sakuliUrl = site[1] + box[0].innerHTML;
|
||||||
|
var svcoutput;
|
||||||
|
var imagename;
|
||||||
|
jQuery.when(
|
||||||
|
// fetch Sakuli serviceoutput file
|
||||||
|
$.get( sakuliUrl + "output.txt").always(function(data ,state) {
|
||||||
|
if (state != "success" ) {
|
||||||
|
data = "Could not find Sakuli service outputfile at " + sakuliUrl + "output.txt !"
|
||||||
|
}
|
||||||
|
console.log(data);
|
||||||
|
svcoutput = $("<div>").text(data).html().replace(/['"]+/g, '');
|
||||||
|
console.log("Sakuli service output: " + svcoutput);
|
||||||
|
}) &&
|
||||||
|
// fetch Sakuli screenshot (jpg/png)
|
||||||
|
$.get( sakuliUrl ).always(function(imgdata ,state) {
|
||||||
|
if (state != "success" ) {
|
||||||
|
imgdata = "Could not access screenshot list page at " + sakuliUrl + "!"
|
||||||
|
}
|
||||||
|
// the 3rd href on the apache index page contains the img name
|
||||||
|
imagename = $(imgdata).find('a')[2].text.trim();
|
||||||
|
console.log("Sakuli screenshot image name: " + imagename);
|
||||||
|
})
|
||||||
|
).then ( function() {
|
||||||
|
box[0].innerHTML = '<a href="' + sakuliUrl + imagename + '" data-lightbox="sakuli" data-title="'+ svcoutput +'"><img src="'+ sakuliUrl + imagename +'" alt="Sakuli error image" width=250px /></a>';
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
$.ajax(
|
||||||
|
{
|
||||||
|
method: 'GET',
|
||||||
|
url: configUrl,
|
||||||
|
dataType: "jsonp",
|
||||||
|
}
|
||||||
|
).done(
|
||||||
|
function (result) {
|
||||||
|
console.log(result);
|
||||||
|
callback(result);
|
||||||
|
}
|
||||||
|
).fail(
|
||||||
|
function (result) {
|
||||||
|
console.log(result);
|
||||||
|
console.log(configUrl);
|
||||||
|
if (result.status == 200) {
|
||||||
|
callback(createErrorDashboard('# HTTP code: '+result.status+'\n# Message: '+result.statusText+'\n# Url: '+configUrl+'\n# Probably the output is not valid json, because the returncode is 200!'));
|
||||||
|
} else {
|
||||||
|
callback(createErrorDashboard('# HTTP code: '+result.status+'\n# Message: '+result.statusText+'\n# Url: '+configUrl));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function createErrorDashboard(message)
|
||||||
|
{
|
||||||
|
return {
|
||||||
|
rows : [{
|
||||||
|
title: 'Chart',
|
||||||
|
height: '300px',
|
||||||
|
panels : [{
|
||||||
|
title: 'Error Message below',
|
||||||
|
type: 'text',
|
||||||
|
span: 12,
|
||||||
|
fill: 1,
|
||||||
|
content: message,
|
||||||
|
}]
|
||||||
|
}],
|
||||||
|
services : {},
|
||||||
|
title : 'JS Error / HTTP Error'
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseArgs()
|
||||||
|
{
|
||||||
|
if (!_.isUndefined(ARGS.reduce)) {
|
||||||
|
$('head').append('<style>.panel-fullscreen {top:0}</style>');
|
||||||
|
|
||||||
|
//change ui to our needs
|
||||||
|
clearUi();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!_.isUndefined(ARGS.dynUnit)) {
|
||||||
|
dynUnit = true;
|
||||||
|
} else {
|
||||||
|
dynUnit = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!_.isUndefined(ARGS.host)) {
|
||||||
|
host = ARGS.host;
|
||||||
|
} else {
|
||||||
|
host = "";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!_.isUndefined(ARGS.service)) {
|
||||||
|
service = ARGS.service;
|
||||||
|
} else {
|
||||||
|
service = "";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!_.isUndefined(ARGS.command)) {
|
||||||
|
command = ARGS.command;
|
||||||
|
} else {
|
||||||
|
command = "";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!_.isUndefined(ARGS.perf)) {
|
||||||
|
perf = ARGS.perf;
|
||||||
|
} else {
|
||||||
|
perf = "";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!_.isUndefined(ARGS.height)) {
|
||||||
|
height = ARGS.height;
|
||||||
|
} else {
|
||||||
|
height = "";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_.isUndefined(ARGS.debug)) {
|
||||||
|
debug = '';
|
||||||
|
} else {
|
||||||
|
debug = "&debug";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!_.isUndefined(ARGS.legend)) {
|
||||||
|
legend = ARGS.legend;
|
||||||
|
} else {
|
||||||
|
legend = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!_.isUndefined(ARGS.annotations)) {
|
||||||
|
annotations = ARGS.annotations;
|
||||||
|
} else {
|
||||||
|
annotations = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(_.isUndefined(ARGS.disablePanelTitle)) {
|
||||||
|
disablePanelTitle = '';
|
||||||
|
}else{
|
||||||
|
disablePanelTitle = "&disablePanelTitle";
|
||||||
|
}
|
||||||
|
|
||||||
|
if(_.isUndefined(ARGS.disablePerfdataLookup)) {
|
||||||
|
disablePerfdataLookup = '';
|
||||||
|
}else{
|
||||||
|
disablePerfdataLookup = "&disablePerfdataLookup";
|
||||||
|
}
|
||||||
|
|
||||||
|
if(_.isUndefined(ARGS.specificTemplate)) {
|
||||||
|
specificTemplate = '';
|
||||||
|
}else{
|
||||||
|
specificTemplate = "&specificTemplate="+ARGS.specificTemplate;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function clearUi()
|
||||||
|
{
|
||||||
|
//removes white space
|
||||||
|
var checkExist = setInterval(
|
||||||
|
function () {
|
||||||
|
if ($('.panel-content').length) {
|
||||||
|
clearInterval(checkExist);
|
||||||
|
document.getElementsByClassName("panel-content")[0].style.paddingBottom = '0px';
|
||||||
|
}
|
||||||
|
},
|
||||||
|
100
|
||||||
|
);
|
||||||
|
/*
|
||||||
|
.panel-header removes the headline of the graphs
|
||||||
|
.navbar-static-top removes the menubar on the top
|
||||||
|
.row-control-inner removes the row controll button on the left
|
||||||
|
.span12 removes the add new row button on the bottom
|
||||||
|
*/
|
||||||
|
divs = ['.panel-header','.navbar-static-top','.row-control-inner','.span12']
|
||||||
|
for (index = 0; index < divs.length; index++) {
|
||||||
|
waitForDivAndDeleteIt(divs[index]);
|
||||||
|
}
|
||||||
|
function waitForDivAndDeleteIt(div)
|
||||||
|
{
|
||||||
|
var checkExist = setInterval(
|
||||||
|
function () {
|
||||||
|
if ($(div).length) {
|
||||||
|
clearInterval(checkExist);
|
||||||
|
$(div).remove();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
100
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
8
templates/monitor/hostgroups_nagios4.cfg.erb
Normal file
8
templates/monitor/hostgroups_nagios4.cfg.erb
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
# Some generic hostgroup definitions
|
||||||
|
|
||||||
|
# A simple wildcard hostgroup
|
||||||
|
define hostgroup {
|
||||||
|
hostgroup_name all
|
||||||
|
alias All Servers
|
||||||
|
members *
|
||||||
|
}
|
122
templates/monitor/localhost.cfg.erb
Normal file
122
templates/monitor/localhost.cfg.erb
Normal file
|
@ -0,0 +1,122 @@
|
||||||
|
###############################################################################
|
||||||
|
# LOCALHOST.CFG - SAMPLE OBJECT CONFIG FILE FOR MONITORING THIS MACHINE
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# NOTE: This config file is intended to serve as an *extremely* simple
|
||||||
|
# example of how you can create configuration entries to monitor
|
||||||
|
# the local (Linux) machine.
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
###############################################################################
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# HOST DEFINITION
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
###############################################################################
|
||||||
|
# Define a host for the local machine
|
||||||
|
# This host definition will inherit all variables that are defined
|
||||||
|
# in (or inherited by) the linux-server host template definition.
|
||||||
|
#define host {
|
||||||
|
# host_name localhost
|
||||||
|
# alias localhost
|
||||||
|
# address 127.0.0.1
|
||||||
|
# use linux-server ; Name of host template to use
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
################################################################################
|
||||||
|
################################################################################
|
||||||
|
##
|
||||||
|
## HOST GROUP DEFINITION
|
||||||
|
##
|
||||||
|
################################################################################
|
||||||
|
################################################################################
|
||||||
|
## Define an optional hostgroup for Linux machines
|
||||||
|
define hostgroup {
|
||||||
|
hostgroup_name linux-servers ; The name of the hostgroup
|
||||||
|
alias Linux Servers ; Long name of the group
|
||||||
|
# members localhost ; Comma separated list of hosts that belong to this group
|
||||||
|
}
|
||||||
|
#
|
||||||
|
################################################################################
|
||||||
|
################################################################################
|
||||||
|
##
|
||||||
|
## SERVICE DEFINITIONS
|
||||||
|
##
|
||||||
|
################################################################################
|
||||||
|
################################################################################
|
||||||
|
## Define a service to "ping" the local machine
|
||||||
|
#define service {
|
||||||
|
# service_description PING
|
||||||
|
# host_name localhost
|
||||||
|
# use local-service ; Name of service template to use
|
||||||
|
# check_command check_ping!100.0,20%!500.0,60%
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## Define a service to check the disk space of the root partition
|
||||||
|
## on the local machine. Warning if < 20% free, critical if
|
||||||
|
## < 10% free space on partition.
|
||||||
|
#define service {
|
||||||
|
# service_description Root Partition
|
||||||
|
# host_name localhost
|
||||||
|
# use local-service ; Name of service template to use
|
||||||
|
# check_command check_local_disk!20%!10%!/
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## Define a service to check the number of currently logged in
|
||||||
|
## users on the local machine. Warning if > 20 users, critical
|
||||||
|
## if > 50 users.
|
||||||
|
#define service {
|
||||||
|
# service_description Current Users
|
||||||
|
# host_name localhost
|
||||||
|
# use local-service ; Name of service template to use
|
||||||
|
# check_command check_local_users!20!50
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## Define a service to check the number of currently running procs
|
||||||
|
## on the local machine. Warning if > 250 processes, critical if
|
||||||
|
## > 400 users.
|
||||||
|
#define service {
|
||||||
|
# service_description Total Processes
|
||||||
|
# host_name localhost
|
||||||
|
# use local-service ; Name of service template to use
|
||||||
|
# check_command check_local_procs!250!400!RSZDT
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## Define a service to check the load on the local machine.
|
||||||
|
#define service {
|
||||||
|
# service_description Current Load
|
||||||
|
# host_name localhost
|
||||||
|
# use local-service ; Name of service template to use
|
||||||
|
# check_command check_local_load!5.0,4.0,3.0!10.0,6.0,4.0
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## Define a service to check the swap usage the local machine.
|
||||||
|
## Critical if less than 10% of swap is free, warning if less than 20% is free
|
||||||
|
#define service {
|
||||||
|
# service_description Swap Usage
|
||||||
|
# host_name localhost
|
||||||
|
# use local-service ; Name of service template to use
|
||||||
|
# check_command check_local_swap!20!10
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## Define a service to check SSH on the local machine.
|
||||||
|
## Disable notifications for this service by default, as not all users may have SSH enabled.
|
||||||
|
#define service {
|
||||||
|
# service_description SSH
|
||||||
|
# host_name localhost
|
||||||
|
# use local-service ; Name of service template to use
|
||||||
|
# check_command check_ssh
|
||||||
|
# notifications_enabled 0
|
||||||
|
#}
|
||||||
|
#
|
||||||
|
## Define a service to check HTTP on the local machine.
|
||||||
|
## Disable notifications for this service by default, as not all users may have HTTP enabled.
|
||||||
|
#define service {
|
||||||
|
# service_description HTTP
|
||||||
|
# host_name localhost
|
||||||
|
# use local-service ; Name of service template to use
|
||||||
|
# check_command check_http!-u /naemon/
|
||||||
|
# notifications_enabled 0
|
||||||
|
#}
|
||||||
|
#
|
19
templates/monitor/monitor-host_nagios4.cfg.erb
Normal file
19
templates/monitor/monitor-host_nagios4.cfg.erb
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
# Generic host definition template - This is NOT a real host, just a template!
|
||||||
|
|
||||||
|
define host{
|
||||||
|
name monitor-host ; The name of this host template
|
||||||
|
action_url /grafana/dashboard/script/histou.js?host=$HOSTNAME$&theme=light&annotations=true
|
||||||
|
notifications_enabled 1 ; Host notifications are enabled
|
||||||
|
event_handler_enabled 1 ; Host event handler is enabled
|
||||||
|
flap_detection_enabled 1 ; Flap detection is enabled
|
||||||
|
process_perf_data 1 ; Process performance data
|
||||||
|
retain_status_information 1 ; Retain status information across program restarts
|
||||||
|
retain_nonstatus_information 1 ; Retain non-status information across program restarts
|
||||||
|
check_command check-host-alive
|
||||||
|
max_check_attempts 10
|
||||||
|
notification_interval 0
|
||||||
|
notification_period 24x7
|
||||||
|
notification_options d,u,r
|
||||||
|
contact_groups admins
|
||||||
|
register 0 ; DONT REGISTER THIS DEFINITION - ITS NOT A REAL HOST, JUST A TEMPLATE!
|
||||||
|
}
|
50
templates/monitor/monitor-ssl.conf.erb
Normal file
50
templates/monitor/monitor-ssl.conf.erb
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
<IfModule mod_ssl.c>
|
||||||
|
<VirtualHost *:443>
|
||||||
|
ServerName <%= @certname %>
|
||||||
|
SSLEngine on
|
||||||
|
SSLCertificateFile /etc/dehydrated/certs/<%= @certname %>/fullchain.pem
|
||||||
|
SSLCertificateKeyFile /etc/dehydrated/certs/<%= @certname %>/privkey.pem
|
||||||
|
|
||||||
|
Header always set Strict-Transport-Security "max-age=63072000"
|
||||||
|
|
||||||
|
SSLProtocol all -SSLv3 -TLSv1 -TLSv1.1
|
||||||
|
SSLCipherSuite ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
|
||||||
|
SSLHonorCipherOrder off
|
||||||
|
SSLSessionTickets off
|
||||||
|
|
||||||
|
SSLUseStapling On
|
||||||
|
SSLCompression off
|
||||||
|
|
||||||
|
SSLOptions +StrictRequire
|
||||||
|
|
||||||
|
# Add vhost name to log entries:
|
||||||
|
LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" vhost_combined
|
||||||
|
LogFormat "%v %h %l %u %t \"%r\" %>s %b" vhost_common
|
||||||
|
BrowserMatch "MSIE [2-6]" \
|
||||||
|
nokeepalive ssl-unclean-shutdown \
|
||||||
|
downgrade-1.0 force-response-1.0
|
||||||
|
BrowserMatch "MSIE [17-9]" ssl-unclean-shutdown
|
||||||
|
RedirectMatch ^/$ /thruk/
|
||||||
|
|
||||||
|
<Location /grafana>
|
||||||
|
AuthName "Thruk Monitoring"
|
||||||
|
AuthType Basic
|
||||||
|
AuthUserFile /etc/thruk/htpasswd
|
||||||
|
Require valid-user
|
||||||
|
ProxyPass http://127.0.0.1:3000 retry=0 disablereuse=On
|
||||||
|
ProxyPassReverse http://127.0.0.1:3000/grafana
|
||||||
|
RewriteEngine On
|
||||||
|
RewriteRule .* - [E=PROXY_USER:%{LA-U:REMOTE_USER},NS]
|
||||||
|
SetEnvIf Request_Protocol ^HTTPS.* IS_HTTPS=1
|
||||||
|
SetEnvIf Authorization "^.+$" IS_BASIC_AUTH=1
|
||||||
|
# without thruk cookie auth, use the proxy user from the rewrite rule above
|
||||||
|
RequestHeader set X-WEBAUTH-USER "%{PROXY_USER}s" env=IS_HTTPS
|
||||||
|
RequestHeader set X-WEBAUTH-USER "%{PROXY_USER}e" env=!IS_HTTPS
|
||||||
|
# when thruk cookie auth is used, fallback to remote user directly
|
||||||
|
RequestHeader set X-WEBAUTH-USER "%{REMOTE_USER}e" env=!IS_BASIC_AUTH
|
||||||
|
RequestHeader unset Authorization
|
||||||
|
</Location>
|
||||||
|
|
||||||
|
</VirtualHost>
|
||||||
|
SSLStaplingCache "shmcb:logs/ssl_stapling(32768)"
|
||||||
|
</IfModule>
|
14
templates/monitor/nagflux.service.erb
Normal file
14
templates/monitor/nagflux.service.erb
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
[Unit]
|
||||||
|
Description=A connector which transforms performancedata from Nagios/Icinga(2)/Naemon to InfluxDB/Elasticsearch
|
||||||
|
Documentation=https://github.com/Griesbacher/nagflux
|
||||||
|
After=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=root
|
||||||
|
Group=root
|
||||||
|
ExecStart=/usr/local/bin/nagflux -configPath /etc/nagflux/config.gcfg
|
||||||
|
Restart=on-failure
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
Alias=nagflux.service
|
14
templates/monitor/nagflux.yaml.erb
Normal file
14
templates/monitor/nagflux.yaml.erb
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
apiVersion: 1
|
||||||
|
|
||||||
|
deleteDatasources:
|
||||||
|
- name: nagflux
|
||||||
|
|
||||||
|
datasources:
|
||||||
|
- name: nagflux
|
||||||
|
type: influxdb
|
||||||
|
url: http://localhost:8086
|
||||||
|
access: proxy
|
||||||
|
database: nagflux
|
||||||
|
isDefault: true
|
||||||
|
version: 1
|
||||||
|
editable: true
|
36
templates/monitor/nagflux_config.gcfg.erb
Normal file
36
templates/monitor/nagflux_config.gcfg.erb
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
[main]
|
||||||
|
NagiosSpoolfileFolder = "/var/nagflux/perfdata"
|
||||||
|
NagiosSpoolfileWorker = 1
|
||||||
|
InfluxWorker = 2
|
||||||
|
MaxInfluxWorker = 5
|
||||||
|
DumpFile = "/var/log/nagflux/nagflux.dump"
|
||||||
|
NagfluxSpoolfileFolder = "/var/nagflux/spool"
|
||||||
|
FieldSeparator = "&"
|
||||||
|
BufferSize = 1000
|
||||||
|
FileBufferSize = 65536
|
||||||
|
DefaultTarget = "Influxdb"
|
||||||
|
|
||||||
|
[Log]
|
||||||
|
LogFile = "/var/log/nagflux/nagflux.log"
|
||||||
|
MinSeverity = "INFO"
|
||||||
|
|
||||||
|
[InfluxDBGlobal]
|
||||||
|
CreateDatabaseIfNotExists = true
|
||||||
|
NastyString = ""
|
||||||
|
NastyStringToReplace = ""
|
||||||
|
HostcheckAlias = "hostcheck"
|
||||||
|
|
||||||
|
[InfluxDB "nagflux"]
|
||||||
|
Enabled = true
|
||||||
|
Version = 1.0
|
||||||
|
Address = "http://localhost:8086"
|
||||||
|
Arguments = "precision=ms&db=nagflux&u=admin&p=<%= @influx_passwd%>"
|
||||||
|
StopPullingDataIfDown = true
|
||||||
|
|
||||||
|
[Livestatus]
|
||||||
|
#tcp or file
|
||||||
|
Type = "file"
|
||||||
|
#tcp: 127.0.0.1:6557 or file /var/run/live
|
||||||
|
Address = "/var/cache/naemon/live"
|
||||||
|
MinutesToWait = 3
|
||||||
|
Version = ""
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue