Compare commits
No commits in common. "1e647d34bf6b4d4bf354179b611d88f7e3a53bb0" and "028ba3d60872c4147953082f8da08a0a8c3762be" have entirely different histories.
1e647d34bf
...
028ba3d608
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
*.pyc
|
16
Makefile
Normal file
16
Makefile
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
DIST := "ubuntu:latest"
|
||||||
|
|
||||||
|
cosmos:
|
||||||
|
fab all cosmos
|
||||||
|
|
||||||
|
upgrade:
|
||||||
|
fab upgrade
|
||||||
|
|
||||||
|
tag:
|
||||||
|
./bump-tag
|
||||||
|
|
||||||
|
test_in_docker:
|
||||||
|
docker run --rm -it \
|
||||||
|
-v ${CURDIR}:/multiverse:ro \
|
||||||
|
\
|
||||||
|
$(DIST) /multiverse/scripts/test-in-docker.sh
|
3
README
Normal file
3
README
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
|
||||||
|
The system documentation is in the docs directory of the multiverse repository.
|
||||||
|
|
69
addhost
Executable file
69
addhost
Executable file
|
@ -0,0 +1,69 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
cmd_hostname=""
|
||||||
|
cmd_do_bootstrap="no"
|
||||||
|
cmd_fqdn=""
|
||||||
|
|
||||||
|
function usage() {
|
||||||
|
echo "Usage: $0 [-h] [-b] [-n fqdn] [--] [<host>]"
|
||||||
|
echo " -h show help"
|
||||||
|
echo " -b bootstrap <host> (using ssh)"
|
||||||
|
echo " -n specify FQDN (if not the same as <host>)"
|
||||||
|
echo ""
|
||||||
|
echo " <host> can be an IP number, or something that resolves to one"
|
||||||
|
}
|
||||||
|
|
||||||
|
while getopts "bhnp:" this; do
|
||||||
|
case "${this}" in
|
||||||
|
h) usage; exit 0;;
|
||||||
|
b) cmd_do_bootstrap="yes" ;;
|
||||||
|
n) cmd_fqdn="${OPTARG}" ; shift ;;
|
||||||
|
p) cmd_proxy="${OPTARG}" ; shift ;;
|
||||||
|
*) echo "Unknown option ${this}"; echo ""; usage; exit 1;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
shift $((OPTIND-1))
|
||||||
|
|
||||||
|
if [[ ! $cmd_hostname ]]; then
|
||||||
|
cmd_hostname="$1"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! $cmd_fqdn ]]; then
|
||||||
|
cmd_fqdn="$cmd_hostname"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if test -z "$cmd_hostname"; then
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n $cmd_proxy ]]; then
|
||||||
|
proxyjump="-o ProxyJump=${cmd_proxy}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
test -f cosmos.conf && . ./cosmos.conf
|
||||||
|
|
||||||
|
_remote=${remote:='ro'}
|
||||||
|
defrepo=$(git remote get-url "${_remote}" 2>/dev/null)
|
||||||
|
rrepo=${repo:="$defrepo"}
|
||||||
|
rtag=${tag:="changeme"}
|
||||||
|
|
||||||
|
if [[ ! $rrepo ]]; then
|
||||||
|
echo "$0: repo not set in cosmos.conf and no git remote named '${_remote}' found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -d "$cmd_fqdn" ]; then
|
||||||
|
cp -pr default "$cmd_fqdn"
|
||||||
|
git add "$cmd_fqdn"
|
||||||
|
git commit -m "$cmd_fqdn added" "$cmd_fqdn"
|
||||||
|
./bump-tag
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$cmd_do_bootstrap" = "yes" ]; then
|
||||||
|
cosmos_deb=$(find apt/ -maxdepth 1 -name 'cosmos_*.deb' | sort -V | tail -1)
|
||||||
|
scp $proxyjump "$cosmos_deb" apt/bootstrap-cosmos.sh root@"$cmd_hostname":
|
||||||
|
ssh root@"$cmd_hostname" $proxyjump ./bootstrap-cosmos.sh "$cmd_fqdn" "$rrepo" "$rtag"
|
||||||
|
ssh root@"$cmd_hostname" $proxyjump cosmos update
|
||||||
|
ssh root@"$cmd_hostname" $proxyjump cosmos apply
|
||||||
|
fi
|
275
bump-tag
Executable file
275
bump-tag
Executable file
|
@ -0,0 +1,275 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
echo "Fetching updates from $(git remote get-url origin) ..."
|
||||||
|
echo ""
|
||||||
|
if ! git pull --verify-signatures; then
|
||||||
|
echo "WARNING: git pull did not exit successfully."
|
||||||
|
echo ""
|
||||||
|
echo "EXITING the script. In order to tag your changes,"
|
||||||
|
echo "investigate and then run bump-tag again."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -f ./cosmos.conf ]]; then
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
source ./cosmos.conf
|
||||||
|
fi
|
||||||
|
|
||||||
|
# A tab will be used in multiple commands for git
|
||||||
|
t=$'\t'
|
||||||
|
|
||||||
|
# Set the default tag according to the repo
|
||||||
|
# or by entering a name as the first argument.
|
||||||
|
if [[ -z "${1}" ]]; then
|
||||||
|
deftag="$(basename "${PWD}")"
|
||||||
|
else
|
||||||
|
deftag="${1}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Set the tag prefix according to:
|
||||||
|
# 1. $tag, if specified in cosmos.conf,
|
||||||
|
# 2. or $deftag, as specified above.
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
if [[ -n "${tag}" ]]; then
|
||||||
|
tagpfx="${tag}"
|
||||||
|
else
|
||||||
|
tagpfx="${deftag}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# This is the current branch that Git will diff against.
|
||||||
|
this_branch=$(git rev-parse --abbrev-ref HEAD)
|
||||||
|
|
||||||
|
# Check why the tag couldn't be verified
|
||||||
|
# First argument: the tag to investigate
|
||||||
|
check_tag_sig_failure()
|
||||||
|
{
|
||||||
|
local __tag_to_check="${1}"
|
||||||
|
|
||||||
|
# shellcheck disable=SC2155
|
||||||
|
local __verify_tag_output="$(git verify-tag --raw "${__tag_to_check}" 2>&1)"
|
||||||
|
|
||||||
|
if echo "${__verify_tag_output}" | grep -q "VALIDSIG"; then
|
||||||
|
|
||||||
|
if echo "${__verify_tag_output}" | grep -q "EXPKEYSIG"; then
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "WARNING: The tag was correctly signed, but the copy of"
|
||||||
|
echo "the key that you have stored on your computer has expired."
|
||||||
|
echo "Check for an updated key in:"
|
||||||
|
echo "global/overlay/etc/cosmos/keys/"
|
||||||
|
echo ""
|
||||||
|
echo "EXITING the script. In order to tag your changes,"
|
||||||
|
echo "investigate and then run bump-tag again."
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
else
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "WARNING: The tag was probably correctly signed,"
|
||||||
|
echo "but it still didn't pass the verification check."
|
||||||
|
echo ""
|
||||||
|
echo "EXITING the script. In order to tag your changes,"
|
||||||
|
echo "investigate and then run bump-tag again."
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
else
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "WARNING: The signature of the tag could not be verified."
|
||||||
|
echo "Please make sure that you have imported the key and that"
|
||||||
|
echo "the key is signed by a trusted party."
|
||||||
|
echo "Keys used for signing in a Cosmos repo can be found at:"
|
||||||
|
echo "global/overlay/etc/cosmos/keys/"
|
||||||
|
echo ""
|
||||||
|
echo "EXITING the script. In order to tag your changes,"
|
||||||
|
echo "investigate and then run bump-tag again."
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
check_commit_sig_failure()
|
||||||
|
{
|
||||||
|
local __commit_to_check="${1}"
|
||||||
|
local __file_related_to_commit="${2}"
|
||||||
|
|
||||||
|
# shellcheck disable=SC2155
|
||||||
|
local __verify_commit_output="$(git verify-commit --raw "${__commit_to_check}" 2>&1)"
|
||||||
|
|
||||||
|
if echo "${__verify_commit_output}" | grep -q "VALIDSIG"; then
|
||||||
|
|
||||||
|
if echo "${__verify_commit_output}" | grep -q "EXPKEYSIG"; then
|
||||||
|
|
||||||
|
echo "WARNING: The commit to ${__file_related_to_commit}"
|
||||||
|
echo "was correctly signed, but the copy of the key that"
|
||||||
|
echo "you have stored on your computer has expired."
|
||||||
|
echo "Check for an updated key in:"
|
||||||
|
echo "global/overlay/etc/cosmos/keys/"
|
||||||
|
echo ""
|
||||||
|
echo "EXITING the script. In order to tag your changes,"
|
||||||
|
echo "investigate and then run bump-tag again."
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
else
|
||||||
|
|
||||||
|
echo "WARNING: The commit to ${__file_related_to_commit}"
|
||||||
|
echo "was probably correctly signed, but it still didn't"
|
||||||
|
echo "pass the verification check."
|
||||||
|
echo ""
|
||||||
|
echo "EXITING the script. In order to tag your changes,"
|
||||||
|
echo "investigate and then run bump-tag again."
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
else
|
||||||
|
|
||||||
|
echo "WARNING: The commit to ${__file_related_to_commit}"
|
||||||
|
echo "could not be verified. Please make sure that you have"
|
||||||
|
echo "imported the key and that the key is signed by a trusted party."
|
||||||
|
echo ""
|
||||||
|
echo "EXITING the script. In order to tag your changes,"
|
||||||
|
echo "investigate and then run bump-tag again."
|
||||||
|
exit 1
|
||||||
|
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Verify the last commit of a file
|
||||||
|
# First argument: the file to verify
|
||||||
|
verify_last_commit()
|
||||||
|
{
|
||||||
|
local __file_to_verify="${1}"
|
||||||
|
|
||||||
|
if [[ ! -f "${__file_to_verify}" ]]; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "$(git status --porcelain "${__file_to_verify}")" ]]; then
|
||||||
|
echo ""
|
||||||
|
echo "INFO: local changes detected in ${__file_to_verify},"
|
||||||
|
echo "Not checking the signature of the last commit to ${__file_to_verify}."
|
||||||
|
echo ""
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# shellcheck disable=SC2155
|
||||||
|
local __last_commit="$(git log -n 1 --pretty=format:%H -- "${__file_to_verify}")"
|
||||||
|
|
||||||
|
if ! git verify-commit "${__last_commit}" 2> /dev/null; then
|
||||||
|
echo ""
|
||||||
|
echo "WARNING: Untrusted modification to ${__file_to_verify}:"
|
||||||
|
echo "----------------------------"
|
||||||
|
git verify-commit "$(git log -n 1 --pretty=format:%H -- "${__file_to_verify}")"
|
||||||
|
echo "----------------------------"
|
||||||
|
|
||||||
|
check_commit_sig_failure "${__last_commit}" "${__file_to_verify}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
tag_list="$(git tag -l "${tagpfx}-*")"
|
||||||
|
# shellcheck disable=SC2181
|
||||||
|
if [[ ${?} -ne 0 ]] || [[ -z "${tag_list}" ]]; then
|
||||||
|
|
||||||
|
if [[ -z ${ALLOW_UNSIGNED_COMMITS_WITHOUT_TAGS} ]]; then
|
||||||
|
echo "No tags found, verifying all commits instead."
|
||||||
|
echo "Please set environment variable ALLOW_UNSIGNED_COMMITS_WITHOUT_TAGS if you want to disable this check."
|
||||||
|
# %H = commit hash
|
||||||
|
# %G? = show "G" for a good (valid) signature
|
||||||
|
git_log="$(git log --pretty="format:%H${t}%G?" \
|
||||||
|
--first-parent \
|
||||||
|
| grep -v "${t}G$")"
|
||||||
|
fi
|
||||||
|
|
||||||
|
else
|
||||||
|
|
||||||
|
last_tag="$(echo "${tag_list}" | sort | tail -1)"
|
||||||
|
echo "Verifying last tag: ${last_tag} and the commits after that"
|
||||||
|
|
||||||
|
if ! git verify-tag "${last_tag}"; then
|
||||||
|
check_tag_sig_failure "${last_tag}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
tag_object="$(git verify-tag -v "${last_tag}" 2>&1 | grep ^object | cut -d' ' -f2)"
|
||||||
|
|
||||||
|
# The commits after the last valid signed git tag that we need to check
|
||||||
|
revision_range="${tag_object}..HEAD"
|
||||||
|
|
||||||
|
# Filter out the commits that are unsigned or untrusted
|
||||||
|
# %H = commit hash
|
||||||
|
# %G? = show "G" for a good (valid) signature
|
||||||
|
git_log="$(git log --pretty="format:%H${t}%G?" "${revision_range}" \
|
||||||
|
--first-parent \
|
||||||
|
| grep -v "${t}G$")"
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "${git_log}" ]]; then
|
||||||
|
echo ""
|
||||||
|
echo -e "------WARNING: unsigned or untrusted commits after the last tag------"
|
||||||
|
echo "${git_log}"
|
||||||
|
echo -e "---------------------------------------------------------------------"
|
||||||
|
echo "Quick referens on how to configure signing of commits in ~/.gitconfig:"
|
||||||
|
echo "[user]"
|
||||||
|
echo " signingkey = your-prefered-key-id"
|
||||||
|
echo "[commit]"
|
||||||
|
echo " gpgsign = true"
|
||||||
|
echo ""
|
||||||
|
echo "EXITING the script. In order to tag your changes,"
|
||||||
|
echo "please make sure that you have configured signing of"
|
||||||
|
echo "your own commits and that the listed unsigned commits"
|
||||||
|
echo "have been made by a trusted party and are not malicous."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Always check that the last commit of certain
|
||||||
|
# sensitive files is trusted, without taking into
|
||||||
|
# account whether the last tag was trusted or not.
|
||||||
|
verify_last_commit "./scripts/jsonyaml-no-output.py"
|
||||||
|
verify_last_commit "./bump-tag"
|
||||||
|
|
||||||
|
# Test the syntax of each YAML-file to be tagged.
|
||||||
|
for file in $(git diff --name-only "${last_tag}..${this_branch}" | grep -E "^.*\.(yaml|yml)$"); do
|
||||||
|
if [[ -f "${file}" ]]; then
|
||||||
|
./scripts/jsonyaml-no-output.py yaml "${file}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "Differences between tag ${last_tag} and what you are about to sign:"
|
||||||
|
# With PAGER=cat, git diff will simply dump the output to the screen.
|
||||||
|
# shellcheck disable=SC2037
|
||||||
|
PAGER="cat" git diff --color "${last_tag}..${this_branch}"
|
||||||
|
|
||||||
|
# Iterate over the $last_tag until $this_tag is set to a later version
|
||||||
|
iter=1
|
||||||
|
ok=
|
||||||
|
while [[ -z "${ok}" ]]; do
|
||||||
|
this_tag="$(date +"${tagpfx}-%Y-%m-%d-v$(printf "%02d" "${iter}")")"
|
||||||
|
iter="$(( iter + 1))"
|
||||||
|
|
||||||
|
case "$( (echo "${this_tag}"; echo "${last_tag}") | sort | tail -1 )" in
|
||||||
|
"${last_tag}")
|
||||||
|
;;
|
||||||
|
"${this_tag}")
|
||||||
|
ok=yes
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ "${deftag}" != "${tagpfx}" ]]; then
|
||||||
|
echo -e "Using new tag \e[94m${this_tag}\e[0m according to pattern in cosmos.conf"
|
||||||
|
else
|
||||||
|
echo -e "Using new tag \e[94m${this_tag}\e[0m"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e "\e[1mONLY SIGN IF YOU APPROVE OF VERIFICATION AND DIFF ABOVE\e[0m"
|
||||||
|
|
||||||
|
# GITTAGEXTRA is for putting things like "-u 2117364A"
|
||||||
|
# Note that this variable cannot be quoted if left empty.
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
git tag ${GITTAGEXTRA} -m bump. -s "${this_tag}"
|
||||||
|
|
||||||
|
git push
|
||||||
|
git push --tags
|
1
cosmos-rules.yaml
Symbolic link
1
cosmos-rules.yaml
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
global/overlay/etc/puppet/cosmos-rules.yaml
|
2
cosmos.conf
Normal file
2
cosmos.conf
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
tag="cosmos-ops"
|
||||||
|
#repo=git://override-repo-URL
|
1
default/README
Symbolic link
1
default/README
Symbolic link
|
@ -0,0 +1 @@
|
||||||
|
../README
|
2
docs/.gitignore
vendored
Normal file
2
docs/.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
*.html
|
||||||
|
*.pdf
|
10
docs/Makefile
Normal file
10
docs/Makefile
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
|
||||||
|
.PHONY: all
|
||||||
|
DOCS := $(wildcard *.mkd)
|
||||||
|
all: $(DOCS:.mkd=.pdf)
|
||||||
|
|
||||||
|
%.pdf: %.mkd
|
||||||
|
pandoc -o $@ $<
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -f *.html *.pdf *~
|
486
docs/cosmos-puppet-ops.mkd
Normal file
486
docs/cosmos-puppet-ops.mkd
Normal file
|
@ -0,0 +1,486 @@
|
||||||
|
% System Operations using Cosmos & Puppet
|
||||||
|
% Leif Johansson / SUNET / 2017 / v0.0.5
|
||||||
|
|
||||||
|
|
||||||
|
Introduction
|
||||||
|
============
|
||||||
|
|
||||||
|
This document describes how to setup and run systems and service operations for a small to mid-sized
|
||||||
|
systems collection while maintaining scalability, security and auditability for changes.
|
||||||
|
The process described below is based on open source components and assumes a Linux-based hosting
|
||||||
|
infrastructure. These limitations could easily be removed though. This document describes the
|
||||||
|
multiverse template for combining cosmos and puppet.
|
||||||
|
|
||||||
|
|
||||||
|
Design Requirements
|
||||||
|
===================
|
||||||
|
|
||||||
|
The cosmos system has been used to operate security-critical infrastructure for a few years before
|
||||||
|
it was combined with puppet into the multiverse template.
|
||||||
|
|
||||||
|
Several of the design requirements below are fulfilled by cosmos alone, while some (eg consistency)
|
||||||
|
are easier to achieve using puppet than with cosmos alone.
|
||||||
|
|
||||||
|
Consistency
|
||||||
|
-----------
|
||||||
|
|
||||||
|
Changes should be applied atomically (locally on each host) across multiple system components on multiple
|
||||||
|
physical and logical hosts (aka system state). The change mechanism should permit verification of state
|
||||||
|
consistency and all modifications should be idempotents, i.e the same operation
|
||||||
|
performed twice on the same system state should not in itself cause a problem.
|
||||||
|
|
||||||
|
Auditability
|
||||||
|
------------
|
||||||
|
|
||||||
|
It must be possible to review changes in advance of applying them to system state. It
|
||||||
|
must also be possible to trace changes that have already been applied to privileged
|
||||||
|
system operators.
|
||||||
|
|
||||||
|
Authenticity
|
||||||
|
------------
|
||||||
|
|
||||||
|
All changes must be authenticated by private keys in the personal possession of privileged
|
||||||
|
system operators before applied to system state as well as at any point in the future.
|
||||||
|
|
||||||
|
Simplicity
|
||||||
|
----------
|
||||||
|
|
||||||
|
The system must be simple and must not rely on external services to be online to maintain
|
||||||
|
state except when new state is being requested and applied. When new state is being requested
|
||||||
|
external dependencies must be kept to a minimum.
|
||||||
|
|
||||||
|
Architecture
|
||||||
|
============
|
||||||
|
|
||||||
|
The basic architecture of puppet is to use a VCS (git) to manage and distribute changes to a
|
||||||
|
staging area on each managed host. At the staging area the changes are authenticated (using
|
||||||
|
tag signatures) and if valid, distributed to the host using local rsync. Before and after
|
||||||
|
hooks (using run-parts) are used to provide programmatic hooks.
|
||||||
|
|
||||||
|
Administrative Scope
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
The repository constitutes the administrative domain of a multiverse setup: each host is
|
||||||
|
connected to (i.e runs cosmos off of) a single GIT repository and derives trust from signed
|
||||||
|
tags on that repository. A host cannot belong to more than 1 administrative domain but each
|
||||||
|
administrative domains can host multiple DNS domains - all hosts in a single repository
|
||||||
|
doesn't need to be in the same zone.
|
||||||
|
|
||||||
|
The role of Puppet
|
||||||
|
------------------
|
||||||
|
|
||||||
|
In the multiverse template, the cosmos system is used to authenticate and distribute changes
|
||||||
|
and prepare the system state for running puppet. Puppet is used to apply idempotent changes
|
||||||
|
to the system state using "puppet apply".
|
||||||
|
|
||||||
|
~~~~~ {.ditaa .no-separation}
|
||||||
|
+------------+ +------+
|
||||||
|
| cosmo repo |---->| host |-----+
|
||||||
|
+------------+ +------+ |
|
||||||
|
^ |
|
||||||
|
| |
|
||||||
|
(change) (manifests)
|
||||||
|
| |
|
||||||
|
+--------+ |
|
||||||
|
| puppet |<---+
|
||||||
|
+--------+
|
||||||
|
~~~~~
|
||||||
|
|
||||||
|
Note that there is no puppet master in this setup so collective resources cannot be used
|
||||||
|
in multiverse. Instead 'fabric' is used to provide a simple way to loop over subsets of
|
||||||
|
the hosts in a managed domain.
|
||||||
|
|
||||||
|
Private data (eg system credentials, application passwords, or private keys) are encrypted
|
||||||
|
to a master host-specific PGP key before stored in the cosmos repo.
|
||||||
|
|
||||||
|
System state can be tied to classes used to classify systems into roles (eg "database server"
|
||||||
|
or "webserver"). System classes can be assigned by regular expressions on the fqdn (eg all
|
||||||
|
hosts named db-\* is assigned to the "database server" class) using a custom puppet ENC.
|
||||||
|
|
||||||
|
The system classes are also made available to 'fabric' in a custom fabfile. Fabric (or fab)
|
||||||
|
is a simple frontend to ssh that allows an operator to run commands on multiple remote
|
||||||
|
hosts at once.
|
||||||
|
|
||||||
|
Trust
|
||||||
|
-----
|
||||||
|
|
||||||
|
All data in the system is maintained in a cosmos GIT repository. A change is
|
||||||
|
requested by signing a tag in the repository with a system-wide well-known name-prefix.
|
||||||
|
The tag name typically includes the date and a counter to make it unique.
|
||||||
|
|
||||||
|
The signature on the tag is authenticated against a set of trusted keys maintained in the
|
||||||
|
repository itself - so that one trusted system operator must be present to authenticate addition or
|
||||||
|
removal of another trusted system operator. This authentication of tags is done in addition
|
||||||
|
to authenticating access to the GIT repository when the changes are pushed. Trust is typically
|
||||||
|
bootstrapped when a repository is first established. This model also serves to provide auditability
|
||||||
|
of all changes for as long as repository history is retained.
|
||||||
|
|
||||||
|
Access to hosts is done through ssh with ssh-key access. The ssh keys are typically maintained
|
||||||
|
using either puppet or cosmos natively.
|
||||||
|
|
||||||
|
Consistency
|
||||||
|
-----------
|
||||||
|
|
||||||
|
As a master-less architecture, multiverse relies on _eventual consistency_: changes will eventually
|
||||||
|
be applied to all hosts. In such a model it becomes very important that changes are idempotent, so
|
||||||
|
that applying a change multiple times (in an effort to get dependent changes through) won't cause
|
||||||
|
an issue. Using native cosmos, such changes are archived using timestamp-files that control entry
|
||||||
|
into code-blocks:
|
||||||
|
|
||||||
|
```
|
||||||
|
stamp="${COSMOS\_BASE}/stamps/foo-v04.stamp"
|
||||||
|
if ! test -f $stamp; then
|
||||||
|
# do something here
|
||||||
|
touch $stamp
|
||||||
|
fi
|
||||||
|
```
|
||||||
|
|
||||||
|
This pattern is mostly replaced in multiverse by using puppet manifests and modules that
|
||||||
|
are inherently idempotent but it can nevertheless be a useful addition to the toolchain.
|
||||||
|
|
||||||
|
Implementation
|
||||||
|
==============
|
||||||
|
|
||||||
|
Implementation is based on two major components: cosmos and puppet. The cosmos system was
|
||||||
|
created by Simon Josefsson and Fredrik Thulin as a simple and secure way to distribute files
|
||||||
|
and run pre- and post-processors (using run-parts). This allows for a simple, yet complete
|
||||||
|
mechanism for updating system state.
|
||||||
|
|
||||||
|
The second component is puppet which is run in masterless (aka puppet apply) mode on files
|
||||||
|
distributed and authenticated using cosmos. Puppet is a widely deployed way to describe
|
||||||
|
system state using a set of idempotent operations. In theory, anything that can de done
|
||||||
|
using puppet can be done using cosmos post-processors but puppet allows for greater
|
||||||
|
abstraction which greatly increases readability.
|
||||||
|
|
||||||
|
The combination of puppet and cosmos is maintained on github in the 'SUNET/multiverse'
|
||||||
|
project.
|
||||||
|
|
||||||
|
The Cosmos Puppet Module
|
||||||
|
========================
|
||||||
|
|
||||||
|
Although not necessary, a few nice-to-have utilities in the form of puppet modules have
|
||||||
|
been collected as the cosmos puppet module (for want of a better name). The source for
|
||||||
|
this module is at https://github.com/SUNET/puppet-cosmos and it is included (but commented
|
||||||
|
out) in the cosmos-modules.conf file (cf below) for easy inclusion.
|
||||||
|
|
||||||
|
|
||||||
|
Operations
|
||||||
|
==========
|
||||||
|
|
||||||
|
Setting up a new administrative domain
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
The simplest way is to clone the multiverse repository. First install 'git'. On ubuntu/debian
|
||||||
|
this is in the 'git-core' package:
|
||||||
|
|
||||||
|
```
|
||||||
|
# apt-get install git-core
|
||||||
|
```
|
||||||
|
|
||||||
|
Also install 'fabric' - a very useful too for multiple-host-ssh that is integrated into
|
||||||
|
multiverse. Fabric provides the 'fab' command which will be introduced later on.
|
||||||
|
|
||||||
|
```
|
||||||
|
# apt-get install fabric
|
||||||
|
```
|
||||||
|
|
||||||
|
These two tools (git & fabric) are only needed on machines where system operators work.
|
||||||
|
|
||||||
|
Next clone git@github.com:SUNET/multiverse.git - this will form the basis of your cosmos+puppet
|
||||||
|
repository:
|
||||||
|
|
||||||
|
```
|
||||||
|
# git clone git@github.com:SUNET/multiverse.git myproj-cosmos
|
||||||
|
# cd myproj-cosmos
|
||||||
|
```
|
||||||
|
|
||||||
|
Next rename the upstream from github - you will want to keep this around to get new
|
||||||
|
features as the multiverse codebase evolves.
|
||||||
|
|
||||||
|
```
|
||||||
|
# git remote rename origin multiverse
|
||||||
|
```
|
||||||
|
|
||||||
|
Now add a new remote pointing to the git repo where you are going to be pushing
|
||||||
|
changes for your administrative domain. Also add a read-only version of this remote
|
||||||
|
as 'ro'. The read-only remote is used by multiverse scripts during host bootstrap.
|
||||||
|
|
||||||
|
```
|
||||||
|
# git remote add origin git+ssh://git@yourhost:myproj-cosmos.git
|
||||||
|
# git remote add ro https://yourhost/myproj-cosmos.git
|
||||||
|
```
|
||||||
|
|
||||||
|
Now edit .git/config and rename the 'main' branch to use the new 'origin' remote or
|
||||||
|
you'll try to push to the multiverse remote!
|
||||||
|
|
||||||
|
```
|
||||||
|
[branch "main"]
|
||||||
|
remote = origin
|
||||||
|
merge = refs/heads/main
|
||||||
|
```
|
||||||
|
|
||||||
|
Now create a branch for the 'multiverse' upstream so you can merge changes to multiverse:
|
||||||
|
|
||||||
|
```
|
||||||
|
# git checkout -b multiverse --track multiverse/main
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, you might need to push you main branch upstream to the new origin
|
||||||
|
```
|
||||||
|
# git checkout main
|
||||||
|
# git push -u origin main
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that you can maintain your repo on just about any git hosting platform, including
|
||||||
|
github, gitorious or your own local setup as long as it supports read-only access to your
|
||||||
|
repository. It is important that the remotes called 'origin' and 'ro' refer to your
|
||||||
|
repository and not to anything else (like a private version of multiverse).
|
||||||
|
|
||||||
|
Now add at least one key to 'global/overlay/etc/cosmos/keys/' in a file with a .pub extension
|
||||||
|
(eg 'operator.pub') - the name of the file doesn't matter other than the extension.
|
||||||
|
|
||||||
|
```
|
||||||
|
# cp mykey.pub global/overlay/etc/cosmos/keys/
|
||||||
|
# git add global/overlay/etc/cosmos/keys/mykey.pub
|
||||||
|
# git commit -m "initial trust" global/overlay/etc/cosmos/keys/mykey.pub
|
||||||
|
```
|
||||||
|
|
||||||
|
At this point you should create and sign your first tag:
|
||||||
|
|
||||||
|
```
|
||||||
|
# ./bump-tag
|
||||||
|
```
|
||||||
|
|
||||||
|
If Git complains during the first run of bump-tag that "Your configuration specifies to
|
||||||
|
merge with the ref 'main' from the remote, but no such ref was fetched." then you
|
||||||
|
have run 'git push' to initialize the connection with the remote repository.
|
||||||
|
|
||||||
|
Make sure that you are using the key whose public key you just added to the repository! You
|
||||||
|
can now start adding hosts.
|
||||||
|
|
||||||
|
Adding a host
|
||||||
|
-------------
|
||||||
|
|
||||||
|
Bootstrapping a host is done using the 'addhost' command:
|
||||||
|
|
||||||
|
```
|
||||||
|
# ./addhost -b $fqdn
|
||||||
|
```
|
||||||
|
|
||||||
|
The -b flag causes addhost to attempt to bootstrap cosmos on the remote host using
|
||||||
|
ssh as root. This requires that root key trust be established in advance. The addhost
|
||||||
|
command creates and commits the necessary changes to the repository to add a host named
|
||||||
|
$fqdn. Only fully qualified hostnames should ever be used in cosmos+puppet.
|
||||||
|
|
||||||
|
The bootstrap process will create a cron-job on $fqdn that runs
|
||||||
|
|
||||||
|
```
|
||||||
|
# cosmos update && cosmos apply
|
||||||
|
```
|
||||||
|
|
||||||
|
every 15 minutes. This should be a good starting point for your domain. Now you may
|
||||||
|
want to add some 'naming rules'.
|
||||||
|
|
||||||
|
To bootstrap a machine that is not yet configured in DNS, use the following options:
|
||||||
|
|
||||||
|
```
|
||||||
|
# ./addhost -b -n $fqdn-to-add-later-in-dns -- IP-address
|
||||||
|
```
|
||||||
|
|
||||||
|
Defining naming rules
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
A naming rule is a mapping from a name to a set of puppet classes. These are defined in
|
||||||
|
the file 'global/overlay/etc/puppet/cosmos-rules.yaml' (linked to the top level directory
|
||||||
|
in multiverse). This is a YAML format file whose keys are regular expressions and whose
|
||||||
|
values are lists of puppet class definitions. Here is an example that assigns all hosts
|
||||||
|
with names on the form ns\<number\>.example.com to the 'nameserver' class.
|
||||||
|
|
||||||
|
```
|
||||||
|
'ns[0-9]?.example.com$':
|
||||||
|
nameserver:
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that the value is a hash with an empty value ('nameserver:') and not just a string
|
||||||
|
value.
|
||||||
|
|
||||||
|
Since regular expressions can also match on whole strings so the following is also
|
||||||
|
valid:
|
||||||
|
|
||||||
|
```
|
||||||
|
smtp.example.com:
|
||||||
|
mailserver:
|
||||||
|
relay: smtp.upstream.example.com
|
||||||
|
```
|
||||||
|
|
||||||
|
In this example the mailserver puppet class is given the relay argument (cf puppet
|
||||||
|
documentation).
|
||||||
|
|
||||||
|
Fabric integration
|
||||||
|
------------------
|
||||||
|
|
||||||
|
Given the above example the following command would reload all nameservers:
|
||||||
|
|
||||||
|
```
|
||||||
|
# fab --roles=nameservers -- rndc reload
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
Creating a change-request
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
After performing whatever changes you want to the repository, commit the changes as usual
|
||||||
|
and then sign an appropriately formatted tag. This last operation is wrapped in the 'bump-tag' command:
|
||||||
|
|
||||||
|
```
|
||||||
|
# git commit -m "some changes" global/overlay/something or/other/files
|
||||||
|
# ./bump-tag
|
||||||
|
```
|
||||||
|
|
||||||
|
The bump-tag command will ask for confirmation before signing and will rely on the git and
|
||||||
|
gpg commands to create, sign and push the correct tag.
|
||||||
|
|
||||||
|
Puppet modules
|
||||||
|
--------------
|
||||||
|
|
||||||
|
Puppet modules can be maintained using a designated cosmos pre-task that reads the file
|
||||||
|
/etc/puppet/cosmos-modules.conf. This file is a simple text-format file
|
||||||
|
with either three (for puppetlabs modules) or four columns:
|
||||||
|
|
||||||
|
```
|
||||||
|
#
|
||||||
|
# name source (puppetlabs fq name or git url) upgrade (yes/no) tag_pattern
|
||||||
|
#
|
||||||
|
apt puppetlabs/apt no
|
||||||
|
concat puppetlabs/concat no
|
||||||
|
cosmos https://github.com/SUNET/puppet-cosmos.git yes sunet-2*
|
||||||
|
#golang elithrar/golang yes
|
||||||
|
python https://github.com/SUNET/puppet-python.git yes sunet-2*
|
||||||
|
stdlib puppetlabs/stdlib no
|
||||||
|
ufw https://github.com/SUNET/puppet-module-ufw.git yes sunet-2*
|
||||||
|
vcsrepo puppetlabs/vcsrepo no
|
||||||
|
xinetd puppetlabs/xinetd no
|
||||||
|
```
|
||||||
|
|
||||||
|
This is an example file - the first field is the name of the module, the second is
|
||||||
|
the source: either a puppetlabs path or a git URL. The third field is 'yes' if the
|
||||||
|
module should be automatically updated or 'no' if it should only be installed. The
|
||||||
|
fourth field is a tag pattern to use (same style as the cosmos tag pattern).
|
||||||
|
As usual lines beginning with '#' are silently ignored.
|
||||||
|
|
||||||
|
This file is processed in a cosmos pre-hook so the modules should be available for
|
||||||
|
use in the puppet post-hook. By default the file contains several lines that are
|
||||||
|
commented out so review this file as you start a new multiverse setup.
|
||||||
|
|
||||||
|
In order to add a new module, the best way is to commit a change to this file and
|
||||||
|
tag this change, allowing time for the module to get installed everywhere before
|
||||||
|
adding a change that relies on this module.
|
||||||
|
|
||||||
|
As there might be a need to use different sets of modules (or different tag patterns)
|
||||||
|
on different hosts in an ops-repo, the contents of this file can be controlled in
|
||||||
|
different ways:
|
||||||
|
|
||||||
|
1. If the file is present in the model, it is used as such.
|
||||||
|
2. If there is a script called /etc/puppet/setup_cosmos_modules, that script is executed.
|
||||||
|
If the file /etc/puppet/cosmos-modules.conf does not exist after this script runs,
|
||||||
|
proceed to step 3, otherwise use this dynamically generated list of modules.
|
||||||
|
3. Use a (very small) default set of modules from the pre-hook global/post-tasks.d/010cosmos-modules.
|
||||||
|
|
||||||
|
There is an example implementation of the script to help you get started with writing your own,
|
||||||
|
available in docs/setup_cosmos_modules.example.
|
||||||
|
|
||||||
|
HOWTO and Common Tasks
|
||||||
|
======================
|
||||||
|
|
||||||
|
Adding a new operator
|
||||||
|
---------------------
|
||||||
|
|
||||||
|
Add the ascii-armoured key in a file in `global/overlay/etc/cosmos/keys` with a `.pub` extension
|
||||||
|
|
||||||
|
```
|
||||||
|
# git add global/overlay/etc/cosmos/keys/thenewoperator.pub
|
||||||
|
# git commit -m "the new operator" \
|
||||||
|
global/overlay/etc/cosmos/keys/thenewoperator.pub
|
||||||
|
# ./bump-tag
|
||||||
|
```
|
||||||
|
|
||||||
|
Removing an operator
|
||||||
|
--------------------
|
||||||
|
|
||||||
|
Identify the public key file in `global/overlay/etc/cosmos/keys`
|
||||||
|
|
||||||
|
```
|
||||||
|
# git rm global/overlay/etc/cosmos/keys/X.pub
|
||||||
|
# git commit -m "remove operator X" \
|
||||||
|
global/overlay/etc/cosmos/keys/X.pub
|
||||||
|
# ./bump-tag
|
||||||
|
```
|
||||||
|
|
||||||
|
Merging new features from multiverse
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
The multiverse template will continue to evolve and sometimes it may be desirable to fetch a new feature from the upstream multiverse repository. If you followed the setup guide and kept the 'multiverse' remote this how you go about synchronizing with that version:
|
||||||
|
|
||||||
|
```
|
||||||
|
# git checkout multiverse
|
||||||
|
# git pull
|
||||||
|
# git checkout main
|
||||||
|
# git merge multiverse
|
||||||
|
```
|
||||||
|
|
||||||
|
Now resolve any conflicts (hopefully few and far between) and you should end up with a _combination_ of the features in your domain and those in multiverse. Note that you can optionally add more remotes referencing other development branches of multiverse and merge changes from more than one upstream. The sky is the limit.
|
||||||
|
|
||||||
|
Changing administrative domain for a host
|
||||||
|
-----------------------------------------
|
||||||
|
|
||||||
|
Below `$old` and `$new` refers to local copies (git clone) of the old and new repository.
|
||||||
|
|
||||||
|
In the `$new` repository add the host and use fabric to change the repository of the host to the git URL of the new repository.
|
||||||
|
|
||||||
|
```
|
||||||
|
# ./addhost -b $hostname
|
||||||
|
# fab -H $hostname chrepo repository:git://other/repo.git
|
||||||
|
```
|
||||||
|
|
||||||
|
In the `$old` repository:
|
||||||
|
|
||||||
|
```
|
||||||
|
# rsync -avz $hostname/ $new/$hostname/
|
||||||
|
```
|
||||||
|
|
||||||
|
In the `$new` repository:
|
||||||
|
|
||||||
|
```
|
||||||
|
# git add $hostname/
|
||||||
|
# git commit -m "transfer from $old" $hostname
|
||||||
|
# ./bump-tag
|
||||||
|
```
|
||||||
|
|
||||||
|
In the `$old` repository:
|
||||||
|
|
||||||
|
```
|
||||||
|
# git rm -rf $hostname
|
||||||
|
# git commit -m "remove $hostname"
|
||||||
|
# ./bump-tag
|
||||||
|
```
|
||||||
|
|
||||||
|
Running a command on multiple hosts
|
||||||
|
-----------------------------------
|
||||||
|
|
||||||
|
On a single host:
|
||||||
|
|
||||||
|
```
|
||||||
|
# fab -H $hostname -- command -a one -b another -c
|
||||||
|
```
|
||||||
|
|
||||||
|
On multiple hosts based on category:
|
||||||
|
|
||||||
|
```
|
||||||
|
# fab --roles=webserver -- ls /tmp
|
||||||
|
```
|
||||||
|
|
||||||
|
On all hosts:
|
||||||
|
|
||||||
|
```
|
||||||
|
# fab -- reboot # danger Will Robinsson!
|
||||||
|
```
|
300
docs/setup_cosmos_modules.eduid.example
Executable file
300
docs/setup_cosmos_modules.eduid.example
Executable file
|
@ -0,0 +1,300 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# This script is responsible for creating/updating /etc/puppet/cosmos-modules.conf.
|
||||||
|
#
|
||||||
|
# If this script exits without creating that file, a default list of modules will be
|
||||||
|
# selected (by post-tasks.d/010cosmos-modules, the script that invokes this script).
|
||||||
|
#
|
||||||
|
# NOTES ABOUT THE IMPLEMENTATION:
|
||||||
|
#
|
||||||
|
# - Avoid any third party modules. We want this script to be re-usable in all ops-repos.
|
||||||
|
# - To make merging easier, try to keep all local alterations in the local_* functions.
|
||||||
|
# - Format with black and isort. Line width 120.
|
||||||
|
# - You probably ONLY want to change things in the local_get_modules_hook() function.
|
||||||
|
#
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import csv
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import logging.handlers
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, NewType, Optional, cast
|
||||||
|
|
||||||
|
from pkg_resources import parse_version
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__) # will be overwritten by _setup_logging()
|
||||||
|
|
||||||
|
# Set up types for data that is passed around in functions in this script.
|
||||||
|
# Need to use Dict (not dict) here since these aren't stripped by strip-hints, and doesn't work on Ubuntu <= 20.04.
|
||||||
|
Arguments = NewType("Arguments", argparse.Namespace)
|
||||||
|
OSInfo = Dict[str, str]
|
||||||
|
HostInfo = Dict[str, Optional[str]]
|
||||||
|
Modules = Dict[str, Dict[str, str]]
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args() -> Arguments:
|
||||||
|
"""
|
||||||
|
Parse the command line arguments
|
||||||
|
"""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Setup cosmos-modules.conf",
|
||||||
|
add_help=True,
|
||||||
|
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument("--debug", dest="debug", action="store_true", default=False, help="Enable debug operation")
|
||||||
|
parser.add_argument(
|
||||||
|
"--filename", dest="filename", type=str, default="/etc/puppet/cosmos-modules.conf", help="Filename to write to"
|
||||||
|
)
|
||||||
|
|
||||||
|
return cast(Arguments, parser.parse_args())
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_info() -> OSInfo:
|
||||||
|
"""Load info about the current OS (distro, release etc.)"""
|
||||||
|
os_info: OSInfo = {}
|
||||||
|
if Path("/etc/os-release").exists():
|
||||||
|
os_info.update({k.lower(): v for k, v in _parse_bash_vars("/etc/os-release").items()})
|
||||||
|
res = local_os_info_hook(os_info)
|
||||||
|
logger.debug(f"OS info:\n{json.dumps(res, sort_keys=True, indent=4)}")
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def get_host_info() -> HostInfo:
|
||||||
|
"""Load info about the current host (hostname, fqdn, domain name etc.)"""
|
||||||
|
try:
|
||||||
|
fqdn = socket.getfqdn()
|
||||||
|
hostname = socket.gethostname()
|
||||||
|
except OSError:
|
||||||
|
host_info = {}
|
||||||
|
else:
|
||||||
|
_domainname = fqdn[len(hostname + ".") :]
|
||||||
|
|
||||||
|
host_info: HostInfo = {
|
||||||
|
"domainname": _domainname,
|
||||||
|
"fqdn": fqdn,
|
||||||
|
"hostname": hostname,
|
||||||
|
}
|
||||||
|
res = local_host_info_hook(host_info)
|
||||||
|
logger.debug(f"Host info: {json.dumps(res, sort_keys=True, indent=4)}")
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_bash_vars(path: str) -> dict[str, str]:
|
||||||
|
"""
|
||||||
|
Parses a bash script and returns a dictionary representing the
|
||||||
|
variables declared in that script.
|
||||||
|
|
||||||
|
Source: https://dev.to/htv2012/how-to-parse-bash-variables-b4f
|
||||||
|
|
||||||
|
:param path: The path to the bash script
|
||||||
|
:return: Variables as a dictionary
|
||||||
|
"""
|
||||||
|
with open(path) as stream:
|
||||||
|
contents = stream.read().strip()
|
||||||
|
|
||||||
|
var_declarations = re.findall(r"^[a-zA-Z0-9_]+=.*$", contents, flags=re.MULTILINE)
|
||||||
|
reader = csv.reader(var_declarations, delimiter="=")
|
||||||
|
bash_vars = dict(reader)
|
||||||
|
return bash_vars
|
||||||
|
|
||||||
|
|
||||||
|
def get_modules(os_info: OSInfo, host_info: HostInfo) -> Modules:
|
||||||
|
"""Load the list of default modules.
|
||||||
|
|
||||||
|
This is more or less an inventory of all the modules we have. If you don't want
|
||||||
|
to use all modules in your OPS repo, you can filter them in the local hook.
|
||||||
|
|
||||||
|
If you want to use a different tag for a module on a specific host/os, you can
|
||||||
|
do that in the local hook as well.
|
||||||
|
"""
|
||||||
|
default_modules = """
|
||||||
|
# name repo upgrade tag
|
||||||
|
apparmor https://github.com/SUNET/puppet-apparmor.git yes sunet-2*
|
||||||
|
apt https://github.com/SUNET/puppetlabs-apt.git yes sunet-2*
|
||||||
|
augeas https://github.com/SUNET/puppet-augeas.git yes sunet-2*
|
||||||
|
bastion https://github.com/SUNET/puppet-bastion.git yes sunet-2*
|
||||||
|
concat https://github.com/SUNET/puppetlabs-concat.git yes sunet-2*
|
||||||
|
cosmos https://github.com/SUNET/puppet-cosmos.git yes sunet-2*
|
||||||
|
dhcp https://github.com/SUNET/puppetlabs-dhcp.git yes sunet_dev-2*
|
||||||
|
docker https://github.com/SUNET/garethr-docker.git yes sunet-2*
|
||||||
|
hiera-gpg https://github.com/SUNET/hiera-gpg.git yes sunet-2*
|
||||||
|
munin https://github.com/SUNET/ssm-munin.git yes sunet-2*
|
||||||
|
nagioscfg https://github.com/SUNET/puppet-nagioscfg.git yes sunet-2*
|
||||||
|
network https://github.com/SUNET/attachmentgenie-network.git yes sunet-2*
|
||||||
|
pound https://github.com/SUNET/puppet-pound.git yes sunet-2*
|
||||||
|
pyff https://github.com/samlbits/puppet-pyff.git yes puppet-pyff-*
|
||||||
|
python https://github.com/SUNET/puppet-python.git yes sunet-2*
|
||||||
|
stdlib https://github.com/SUNET/puppetlabs-stdlib.git yes sunet-2*
|
||||||
|
sunet https://github.com/SUNET/puppet-sunet.git yes sunet-2*
|
||||||
|
sysctl https://github.com/SUNET/puppet-sysctl.git yes sunet-2*
|
||||||
|
ufw https://github.com/SUNET/puppet-module-ufw.git yes sunet-2*
|
||||||
|
varnish https://github.com/samlbits/puppet-varnish.git yes puppet-varnish-*
|
||||||
|
vcsrepo https://github.com/SUNET/puppetlabs-vcsrepo.git yes sunet-2*
|
||||||
|
xinetd https://github.com/SUNET/puppetlabs-xinetd.git yes sunet-2*
|
||||||
|
"""
|
||||||
|
modules: Modules = {}
|
||||||
|
for line in default_modules.splitlines():
|
||||||
|
try:
|
||||||
|
if not line.strip() or line.strip().startswith("#"):
|
||||||
|
continue
|
||||||
|
_name, _url, _upgrade, _tag = line.split()
|
||||||
|
modules[_name] = {
|
||||||
|
"repo": _url,
|
||||||
|
"upgrade": _upgrade,
|
||||||
|
"tag": _tag,
|
||||||
|
}
|
||||||
|
except ValueError:
|
||||||
|
logger.error(f"Failed to parse line: {repr(line)}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Remove the UFW module on Ubuntu >= 22.04 (nftables is used there instead)
|
||||||
|
if os_info.get("name") == "Ubuntu":
|
||||||
|
ver = os_info.get("version_id")
|
||||||
|
if ver:
|
||||||
|
if parse_version(ver) >= parse_version("22.04"):
|
||||||
|
logger.debug("Removing UFW module for Ubuntu >= 22.04")
|
||||||
|
del modules["ufw"]
|
||||||
|
else:
|
||||||
|
logger.debug("Keeping UFW module for Ubuntu < 22.04")
|
||||||
|
else:
|
||||||
|
logger.debug("Unknown Ubuntu module version, keeping UFW module")
|
||||||
|
|
||||||
|
return local_get_modules_hook(os_info, host_info, modules)
|
||||||
|
|
||||||
|
|
||||||
|
def local_os_info_hook(os_info: OSInfo) -> OSInfo:
|
||||||
|
"""Local hook to modify os_info in an OPS repo."""
|
||||||
|
# Start local changes in this repository
|
||||||
|
# End local changes
|
||||||
|
return os_info
|
||||||
|
|
||||||
|
|
||||||
|
def local_host_info_hook(host_info: HostInfo) -> HostInfo:
|
||||||
|
"""Local hook to modify host_info in an OPS repo."""
|
||||||
|
# Start local changes in this repository
|
||||||
|
|
||||||
|
# Regular expression to tease apart an eduID hostname
|
||||||
|
hostname_re = re.compile(
|
||||||
|
r"""^
|
||||||
|
(\w+) # function ('idp', 'apps', ...)
|
||||||
|
-
|
||||||
|
(\w+) # site ('tug', 'sthb', ...)
|
||||||
|
-
|
||||||
|
(\d+) # 1 for staging, 3 for production
|
||||||
|
""",
|
||||||
|
re.VERBOSE,
|
||||||
|
)
|
||||||
|
_hostname = host_info.get("hostname")
|
||||||
|
if _hostname:
|
||||||
|
m = hostname_re.match(_hostname)
|
||||||
|
if m:
|
||||||
|
_function, _site, _num = m.groups()
|
||||||
|
host_info["function"] = _function
|
||||||
|
host_info["site"] = _site
|
||||||
|
if _num == "1":
|
||||||
|
host_info["environment"] = "staging"
|
||||||
|
|
||||||
|
# End local changes
|
||||||
|
return host_info
|
||||||
|
|
||||||
|
|
||||||
|
def local_get_modules_hook(os_info: OSInfo, host_info: HostInfo, modules: Modules) -> Modules:
|
||||||
|
"""Local hook to modify default set of modules in an OPS repo."""
|
||||||
|
# Start local changes in this repository
|
||||||
|
|
||||||
|
_eduid_modules = {
|
||||||
|
"apparmor",
|
||||||
|
"apt",
|
||||||
|
"augeas",
|
||||||
|
"bastion",
|
||||||
|
"concat",
|
||||||
|
"docker",
|
||||||
|
"munin",
|
||||||
|
"stdlib",
|
||||||
|
"sunet",
|
||||||
|
"ufw",
|
||||||
|
}
|
||||||
|
# Only keep the modules eduID actually uses
|
||||||
|
modules = {k: v for k, v in modules.items() if k in _eduid_modules}
|
||||||
|
logger.debug(f"Adding modules: {json.dumps(modules, sort_keys=True, indent=4)}")
|
||||||
|
|
||||||
|
# Use eduID tag for puppet-sunet
|
||||||
|
modules["sunet"]["tag"] = "eduid-stable-2*"
|
||||||
|
if host_info.get("environment") == "staging":
|
||||||
|
modules["sunet"]["tag"] = "eduid_dev-2*"
|
||||||
|
|
||||||
|
# use sunet_dev-2* for some modules in staging
|
||||||
|
for dev_module in ["munin"]:
|
||||||
|
if host_info.get("environment") == "staging" and dev_module in modules:
|
||||||
|
modules[dev_module]["tag"] = "sunet_dev-2*"
|
||||||
|
|
||||||
|
# End local changes
|
||||||
|
return modules
|
||||||
|
|
||||||
|
|
||||||
|
def update_cosmos_modules(filename: str, modules: Modules) -> None:
|
||||||
|
"""Create/update the cosmos-modules.conf file.
|
||||||
|
|
||||||
|
First, we check if the file already have the right content. If so, we do nothing.
|
||||||
|
"""
|
||||||
|
content = "# This file is automatically generated by the setup_cosmos_modules script.\n# Do not edit it manually.\n"
|
||||||
|
for k, v in sorted(modules.items()):
|
||||||
|
content += f"{k:15} {v['repo']:55} {v['upgrade']:5} {v['tag']}\n"
|
||||||
|
_file = Path(filename)
|
||||||
|
if _file.exists():
|
||||||
|
# Check if the content is already correct, and avoid updating the file if so (so that the timestamp
|
||||||
|
# of the file at least indicates when the content was last updated)
|
||||||
|
with _file.open("r") as f:
|
||||||
|
current = f.read()
|
||||||
|
if current == content:
|
||||||
|
logger.debug(f"{filename} is up to date")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Create/update the file by writing the content to a temporary file and then renaming it
|
||||||
|
_tmp_file = _file.with_suffix(".tmp")
|
||||||
|
with _tmp_file.open("w") as f:
|
||||||
|
f.write(content)
|
||||||
|
_tmp_file.rename(_file)
|
||||||
|
logger.debug(f"Updated {filename}")
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_logging(my_name: str, args: Arguments):
|
||||||
|
level = logging.INFO
|
||||||
|
if args.debug:
|
||||||
|
level = logging.DEBUG
|
||||||
|
logging.basicConfig(level=level, stream=sys.stderr, format="{asctime} | {levelname:7} | {message}", style="{")
|
||||||
|
global logger
|
||||||
|
logger = logging.getLogger(my_name)
|
||||||
|
# If stderr is not a TTY, change the log level of the StreamHandler (stream = sys.stderr above) to ERROR
|
||||||
|
if not sys.stderr.isatty() and not args.debug:
|
||||||
|
for this_h in logging.getLogger("").handlers:
|
||||||
|
this_h.setLevel(logging.ERROR)
|
||||||
|
if args.debug:
|
||||||
|
logger.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
|
||||||
|
def main(my_name: str, args: Arguments) -> bool:
|
||||||
|
_setup_logging(my_name, args)
|
||||||
|
|
||||||
|
os_info = get_os_info()
|
||||||
|
host_info = get_host_info()
|
||||||
|
modules = get_modules(os_info, host_info)
|
||||||
|
|
||||||
|
update_cosmos_modules(args.filename, modules)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
my_name = os.path.basename(sys.argv[0])
|
||||||
|
args = parse_args()
|
||||||
|
res = main(my_name, args=args)
|
||||||
|
if res:
|
||||||
|
sys.exit(0)
|
||||||
|
sys.exit(1)
|
216
docs/setup_cosmos_modules.example
Executable file
216
docs/setup_cosmos_modules.example
Executable file
|
@ -0,0 +1,216 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
""" Write out a puppet cosmos-modules.conf """
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
from configobj import ConfigObj
|
||||||
|
|
||||||
|
OS_INFO = ConfigObj("/etc/os-release")
|
||||||
|
except (IOError, ModuleNotFoundError):
|
||||||
|
OS_INFO = None
|
||||||
|
|
||||||
|
|
||||||
|
def get_file_hash(modulesfile):
|
||||||
|
"""
|
||||||
|
Based on https://github.com/python/cpython/pull/31930: should use
|
||||||
|
hashlib.file_digest() but it is only available in python 3.11
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(modulesfile, "rb") as fileobj:
|
||||||
|
digestobj = hashlib.sha256()
|
||||||
|
_bufsize = 2**18
|
||||||
|
buf = bytearray(_bufsize) # Reusable buffer to reduce allocations.
|
||||||
|
view = memoryview(buf)
|
||||||
|
while True:
|
||||||
|
size = fileobj.readinto(buf)
|
||||||
|
if size == 0:
|
||||||
|
break # EOF
|
||||||
|
digestobj.update(view[:size])
|
||||||
|
except FileNotFoundError:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
return digestobj.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def get_list_hash(file_lines):
|
||||||
|
"""Get hash of list contents"""
|
||||||
|
|
||||||
|
file_lines_hash = hashlib.sha256()
|
||||||
|
for line in file_lines:
|
||||||
|
file_lines_hash.update(line)
|
||||||
|
|
||||||
|
return file_lines_hash.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def create_file_content(modules):
|
||||||
|
"""
|
||||||
|
Write out the expected file contents to a list so we can check the
|
||||||
|
expected checksum before writing anything
|
||||||
|
"""
|
||||||
|
file_lines = []
|
||||||
|
file_lines.append(
|
||||||
|
"# Generated by {}\n".format( # pylint: disable=consider-using-f-string
|
||||||
|
os.path.basename(sys.argv[0])
|
||||||
|
).encode("utf-8")
|
||||||
|
)
|
||||||
|
for key in modules:
|
||||||
|
file_lines.append(
|
||||||
|
"{0:11} {1} {2} {3}\n".format( # pylint: disable=consider-using-f-string
|
||||||
|
key,
|
||||||
|
modules[key]["repo"],
|
||||||
|
modules[key]["upgrade"],
|
||||||
|
modules[key]["tag"],
|
||||||
|
).encode("utf-8")
|
||||||
|
)
|
||||||
|
|
||||||
|
return file_lines
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Starting point of the program"""
|
||||||
|
|
||||||
|
modulesfile: str = "/etc/puppet/cosmos-modules.conf"
|
||||||
|
modulesfile_tmp: str = modulesfile + ".tmp"
|
||||||
|
|
||||||
|
modules: dict = {
|
||||||
|
"concat": {
|
||||||
|
"repo": "https://github.com/SUNET/puppetlabs-concat.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"stdlib": {
|
||||||
|
"repo": "https://github.com/SUNET/puppetlabs-stdlib.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"cosmos": {
|
||||||
|
"repo": "https://github.com/SUNET/puppet-cosmos.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"ufw": {
|
||||||
|
"repo": "https://github.com/SUNET/puppet-module-ufw.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"apt": {
|
||||||
|
"repo": "https://github.com/SUNET/puppetlabs-apt.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"vcsrepo": {
|
||||||
|
"repo": "https://github.com/SUNET/puppetlabs-vcsrepo.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"xinetd": {
|
||||||
|
"repo": "https://github.com/SUNET/puppetlabs-xinetd.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"python": {
|
||||||
|
"repo": "https://github.com/SUNET/puppet-python.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"hiera-gpg": {
|
||||||
|
"repo": "https://github.com/SUNET/hiera-gpg.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"pound": {
|
||||||
|
"repo": "https://github.com/SUNET/puppet-pound.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"augeas": {
|
||||||
|
"repo": "https://github.com/SUNET/puppet-augeas.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"bastion": {
|
||||||
|
"repo": "https://github.com/SUNET/puppet-bastion.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"pyff": {
|
||||||
|
"repo": "https://github.com/samlbits/puppet-pyff.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "puppet-pyff-*",
|
||||||
|
},
|
||||||
|
"dhcp": {
|
||||||
|
"repo": "https://github.com/SUNET/puppetlabs-dhcp.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet_dev-2*",
|
||||||
|
},
|
||||||
|
"varnish": {
|
||||||
|
"repo": "https://github.com/samlbits/puppet-varnish.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "puppet-varnish-*",
|
||||||
|
},
|
||||||
|
"apparmor": {
|
||||||
|
"repo": "https://github.com/SUNET/puppet-apparmor.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"docker": {
|
||||||
|
"repo": "https://github.com/SUNET/garethr-docker.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"network": {
|
||||||
|
"repo": "https://github.com/SUNET/attachmentgenie-network.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"sunet": {
|
||||||
|
"repo": "https://github.com/SUNET/puppet-sunet.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"sysctl": {
|
||||||
|
"repo": "https://github.com/SUNET/puppet-sysctl.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
"nagioscfg": {
|
||||||
|
"repo": "https://github.com/SUNET/puppet-nagioscfg.git",
|
||||||
|
"upgrade": "yes",
|
||||||
|
"tag": "sunet-2*",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
# When/if we want we can do stuff to modules here
|
||||||
|
if OS_INFO:
|
||||||
|
if OS_INFO["VERSION_CODENAME"] == "bullseye":
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Build list of expected file content
|
||||||
|
file_lines = create_file_content(modules)
|
||||||
|
|
||||||
|
# Get hash of the list
|
||||||
|
list_hash = get_list_hash(file_lines)
|
||||||
|
|
||||||
|
# Get hash of the existing file on disk
|
||||||
|
file_hash = get_file_hash(modulesfile)
|
||||||
|
|
||||||
|
# Update the file if necessary
|
||||||
|
if list_hash != file_hash:
|
||||||
|
# Since we are reading the file with 'rb' when computing our hash use 'wb' when
|
||||||
|
# writing so we dont end up creating a file that does not match the
|
||||||
|
# expected hash
|
||||||
|
with open(modulesfile_tmp, "wb") as fileobj:
|
||||||
|
for line in file_lines:
|
||||||
|
fileobj.write(line)
|
||||||
|
|
||||||
|
# Rename it in place so the update is atomic for anything else trying to
|
||||||
|
# read the file
|
||||||
|
os.rename(modulesfile_tmp, modulesfile)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
307
edit-secrets
Executable file
307
edit-secrets
Executable file
|
@ -0,0 +1,307 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Script to edit secrets for a host.
|
||||||
|
#
|
||||||
|
# This script is used by an administrator on his/hers local machine. The
|
||||||
|
# general principle is for this script to ssh to the target host, decrypt
|
||||||
|
# the secrets and allow changes to be made, and then fetch the encrypted
|
||||||
|
# secrets from the host and add it to the Cosmos repository on the
|
||||||
|
# administrators machine.
|
||||||
|
#
|
||||||
|
# Funnily enough, this script will execute itself (with the argument
|
||||||
|
# '--on-host') on the target host in order to do the decryption etc. Don't
|
||||||
|
# allow this to confuse you and everything will be fine.
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
umask 077
|
||||||
|
|
||||||
|
LAST_OUTPUT_FILENAME="/root/.last_edit-secrets_output"
|
||||||
|
|
||||||
|
test -d /dev/shm && export TMPDIR='/dev/shm'
|
||||||
|
|
||||||
|
TMPFILE=$(mktemp edit-secrets.XXXXXXXXXX)
|
||||||
|
TMPFILE2=$(mktemp edit-secrets.XXXXXXXXXX)
|
||||||
|
|
||||||
|
if [ ! -f $TMPFILE ]; then
|
||||||
|
echo "$TMPFILE"
|
||||||
|
echo "$0: Failed creating temporary file"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ ! -f $TMPFILE2 ]; then
|
||||||
|
echo "$TMPFILE2"
|
||||||
|
echo "$0: Failed creating temporary file 2"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
trap "rm -f $TMPFILE $TMPFILE2" EXIT
|
||||||
|
|
||||||
|
|
||||||
|
if [[ ! $1 ]]; then
|
||||||
|
# deliberately don't mention the --on-host argument
|
||||||
|
echo "Syntax: $0 fqdn"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
function patch_broken_eyaml {
|
||||||
|
#
|
||||||
|
# Ubuntu 22.04 (jammy) has a broken hiera-eyaml package, a bug report
|
||||||
|
# exists here: https://bugs.launchpad.net/ubuntu/+source/hiera-eyaml/+bug/1974059
|
||||||
|
#
|
||||||
|
|
||||||
|
if [ "$(lsb_release -cs)" == "jammy" ]; then
|
||||||
|
plugins_file="/usr/share/rubygems-integration/all/gems/hiera-eyaml-3.2.2/lib/hiera/backend/eyaml/plugins.rb"
|
||||||
|
if [ -f $plugins_file ]; then
|
||||||
|
# We only want to try patching the file if it is the known broken version
|
||||||
|
bad_sum="1d0f14765ebcfcdae300d8ac5d715845ef9b283345d19114a23d96161556618f"
|
||||||
|
sum=$(sha256sum $plugins_file | awk '{print $1}')
|
||||||
|
if [ "$sum" == "$bad_sum" ]; then
|
||||||
|
patch --fuzz=0 --directory=/ --strip=0 <<'EOF'
|
||||||
|
--- /usr/share/rubygems-integration/all/gems/hiera-eyaml-3.2.2/lib/hiera/backend/eyaml/plugins.rb.orig 2023-01-18 08:20:22.140338419 +0000
|
||||||
|
+++ /usr/share/rubygems-integration/all/gems/hiera-eyaml-3.2.2/lib/hiera/backend/eyaml/plugins.rb 2023-01-18 08:21:05.654053501 +0000
|
||||||
|
@@ -32,6 +32,7 @@
|
||||||
|
specs = Gem::VERSION >= "1.6.0" ? source.latest_specs(true) : source.latest_specs
|
||||||
|
|
||||||
|
specs.each do |spec|
|
||||||
|
+ spec = spec.to_spec if spec.respond_to?(:to_spec)
|
||||||
|
next if @@plugins.include? spec
|
||||||
|
|
||||||
|
dependency = spec.dependencies.find { |d| d.name == "hiera-eyaml" }
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
#
|
||||||
|
# Ubuntu 24.04 (noble) has a hiera-eyaml version that is incompatible with ruby 3.2+ (default in ubuntu24).
|
||||||
|
# This is fixed in hiera-eyaml version 3.3.0: https://github.com/voxpupuli/hiera-eyaml/pull/340/files
|
||||||
|
# https://github.com/voxpupuli/hiera-eyaml/blob/master/CHANGELOG.md
|
||||||
|
# But there is no modern version of hiera-eyaml packaged in debian or ubuntu.
|
||||||
|
# https://github.com/puppetlabs/puppet/wiki/Puppet-8-Compatibility#filedirexists-removed
|
||||||
|
#
|
||||||
|
|
||||||
|
. /etc/os-release
|
||||||
|
if [ "${VERSION_CODENAME}" == "noble" ]; then
|
||||||
|
plugins_file="/usr/share/rubygems-integration/all/gems/hiera-eyaml-3.3.0/lib/hiera/backend/eyaml/subcommands/edit.rb"
|
||||||
|
if [ -f $plugins_file ]; then
|
||||||
|
# We only want to try patching the file if it is the known broken version
|
||||||
|
bad_sum="59c6eb910ab2eb44f8c75aeaa79bff097038feb673b5c6bdccde23d9b2a393e2"
|
||||||
|
sum=$(sha256sum $plugins_file | awk '{print $1}')
|
||||||
|
if [ "$sum" == "$bad_sum" ]; then
|
||||||
|
patch --fuzz=0 --directory=/ --strip=0 <<'EOF'
|
||||||
|
--- /usr/share/rubygems-integration/all/gems/hiera-eyaml-3.3.0/lib/hiera/backend/eyaml/subcommands/edit.rb.orig 2022-06-11 16:30:10.000000000 +0000
|
||||||
|
+++ /usr/share/rubygems-integration/all/gems/hiera-eyaml-3.3.0/lib/hiera/backend/eyaml/subcommands/edit.rb 2024-09-09 14:13:19.306342025 +0000
|
||||||
|
@@ -59,7 +59,7 @@
|
||||||
|
Optimist::die "You must specify an eyaml file" if ARGV.empty?
|
||||||
|
options[:source] = :eyaml
|
||||||
|
options[:eyaml] = ARGV.shift
|
||||||
|
- if File.exists? options[:eyaml]
|
||||||
|
+ if File.exist? options[:eyaml]
|
||||||
|
begin
|
||||||
|
options[:input_data] = File.read options[:eyaml]
|
||||||
|
rescue
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function edit_copy_and_commit()
|
||||||
|
{
|
||||||
|
#
|
||||||
|
# This code runs on the administrators local machine
|
||||||
|
#
|
||||||
|
local host=$1
|
||||||
|
|
||||||
|
if [[ ${EDITOR} ]]; then
|
||||||
|
declare -r REMOTE_EDITOR="${EDITOR}"
|
||||||
|
else
|
||||||
|
declare -r REMOTE_EDITOR='/usr/bin/vim.tiny'
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Execute this script, on a remote host
|
||||||
|
ssh -t root@"${host}" EDITOR="${REMOTE_EDITOR}" /var/cache/cosmos/repo/edit-secrets --on-host
|
||||||
|
scp -q root@"${host}:${LAST_OUTPUT_FILENAME}" ${TMPFILE}
|
||||||
|
|
||||||
|
local save_to
|
||||||
|
if grep ^"STATUS=UPDATED" $TMPFILE > /dev/null; then
|
||||||
|
save_to="${host}/overlay/etc/hiera/data/secrets.yaml.asc"
|
||||||
|
|
||||||
|
# extract the GPG output
|
||||||
|
perl -e '$a = 0; while (<>) { $a = 1 if ($_ =~ /-+BEGIN PGP MESSAGE-+/);
|
||||||
|
print $_ if $a; $a = 0 if ($_ =~ /-+END PGP MESSAGE-+/); }' < $TMPFILE > $TMPFILE2
|
||||||
|
|
||||||
|
if ! grep "END PGP MESSAGE" $TMPFILE2 > /dev/null; then
|
||||||
|
echo "$0: Failed extracting PGP output from file $TMPFILE into $TMPFILE2"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
elif grep ^"STATUS=EYAML_UPDATED" $TMPFILE > /dev/null; then
|
||||||
|
save_to="${host}/overlay/etc/hiera/data/local.eyaml"
|
||||||
|
|
||||||
|
# extract the eyaml output
|
||||||
|
perl -e '$a = 0; while (<>) { $a = 1 if ($_ =~ /^---$/);
|
||||||
|
print $_ if $a }' < $TMPFILE > $TMPFILE2
|
||||||
|
|
||||||
|
if ! grep "^---$" $TMPFILE2 > /dev/null; then
|
||||||
|
echo "$0: Failed extracting yaml output from file $TMPFILE into $TMPFILE2"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
echo "Not updated"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# use cat to preserve permissions etc.
|
||||||
|
mkdir -p "`dirname ${save_to}`"
|
||||||
|
cat $TMPFILE2 > "${save_to}"
|
||||||
|
git add "${save_to}"
|
||||||
|
|
||||||
|
if grep ^"STATUS=EYAML_UPDATED" $TMPFILE > /dev/null; then
|
||||||
|
git diff --cached "${save_to}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "$save_to updated"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
function edit_file_on_host() {
|
||||||
|
#
|
||||||
|
# Local execution on a host
|
||||||
|
#
|
||||||
|
|
||||||
|
local SECRETFILE=/etc/hiera/data/secrets.yaml.asc
|
||||||
|
local EYAMLFILE=/etc/hiera/data/local.eyaml
|
||||||
|
|
||||||
|
if [ -f "${EYAMLFILE}" ]; then
|
||||||
|
edit_eyaml_file ${EYAMLFILE}
|
||||||
|
elif [ -f "${SECRETFILE}" ]; then
|
||||||
|
edit_gpg_file ${SECRETFILE}
|
||||||
|
elif [ -f /etc/hiera/eyaml/public_certkey.pkcs7.pem ]; then
|
||||||
|
# default to eyaml if the key exists and none of the secrets-file above exist
|
||||||
|
echo "---" > ${EYAMLFILE}
|
||||||
|
edit_eyaml_file ${EYAMLFILE}
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function edit_gpg_file()
|
||||||
|
{
|
||||||
|
local SECRETFILE=$1
|
||||||
|
|
||||||
|
GNUPGHOME=/etc/hiera/gpg/
|
||||||
|
export GNUPGHOME
|
||||||
|
|
||||||
|
local GPG=`which gpg2 || true`
|
||||||
|
if [ ! -x "$GPG" ]; then
|
||||||
|
GPG=`which gpg || true`
|
||||||
|
if [ ! -x "$GPG" ]; then
|
||||||
|
echo "$0: gpg2 or gpg not found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! $GPG --list-secret-keys | grep -q ^"sec\s"; then
|
||||||
|
echo "$0: Secret key does not exist (in $GNUPGHOME)."
|
||||||
|
echo ""
|
||||||
|
echo "Generate it with /var/cache/cosmos/model/pre-tasks.d/040hiera-gpg"
|
||||||
|
echo ""
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -s $SECRETFILE ]; then
|
||||||
|
$GPG -d $SECRETFILE > $TMPFILE
|
||||||
|
fi
|
||||||
|
|
||||||
|
cp $TMPFILE $TMPFILE2
|
||||||
|
sensible-editor $TMPFILE
|
||||||
|
rm -f ${TMPFILE}~ ${TMPFILE2}~
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
local status=0
|
||||||
|
cmp -s $TMPFILE $TMPFILE2 || status=1
|
||||||
|
if [ $status -eq 0 ]; then
|
||||||
|
(
|
||||||
|
echo "STATUS=NOT_CHANGED"
|
||||||
|
) > $LAST_OUTPUT_FILENAME
|
||||||
|
echo ""
|
||||||
|
echo "$0: No changes detected"
|
||||||
|
else
|
||||||
|
# figure out this hosts gpg key id
|
||||||
|
if lsb_release -r | grep -qE '(18|20).04'; then
|
||||||
|
recipient=$($GPG --list-secret-keys | grep -A1 '^sec' | tail -1 | awk '{print $1}')
|
||||||
|
else
|
||||||
|
recipient=$($GPG --list-secret-key | grep ^sec | head -1 | awk '{print $2}' | cut -d / -f 2)
|
||||||
|
fi
|
||||||
|
|
||||||
|
save_to="`hostname --fqdn`/overlay${SECRETFILE}"
|
||||||
|
echo ""
|
||||||
|
(
|
||||||
|
echo "STATUS=UPDATED"
|
||||||
|
echo ""
|
||||||
|
) > $LAST_OUTPUT_FILENAME
|
||||||
|
$GPG --output - --armor --recipient $recipient --sign --encrypt $TMPFILE >> $LAST_OUTPUT_FILENAME
|
||||||
|
echo ""
|
||||||
|
echo "GPG output saved in $LAST_OUTPUT_FILENAME - save it in Cosmos as"
|
||||||
|
echo ""
|
||||||
|
echo " $save_to"
|
||||||
|
echo ""
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function edit_eyaml_file()
|
||||||
|
{
|
||||||
|
local EYAMLFILE=$1
|
||||||
|
|
||||||
|
local FQDN=$(hostname --fqdn)
|
||||||
|
local privkey='/etc/hiera/eyaml/private_key.pkcs7.pem'
|
||||||
|
local pubkey='/etc/hiera/eyaml/public_certkey.pkcs7.pem'
|
||||||
|
for f in $privkey $pubkey; do
|
||||||
|
test -f "${f}" || { echo "$0: eyaml key file ${f} not found"; exit 1; }
|
||||||
|
done
|
||||||
|
|
||||||
|
patch_broken_eyaml
|
||||||
|
|
||||||
|
# save source file for comparision afterwards
|
||||||
|
cp "${EYAMLFILE}" "${TMPFILE}"
|
||||||
|
eyaml edit --pkcs7-private-key "${privkey}" --pkcs7-public-key "${pubkey}" "${EYAMLFILE}"
|
||||||
|
|
||||||
|
local status=0
|
||||||
|
cmp -s "${EYAMLFILE}" $TMPFILE || status=1
|
||||||
|
if [ $status -eq 0 ]; then
|
||||||
|
(
|
||||||
|
echo "STATUS=NOT_CHANGED"
|
||||||
|
) > $LAST_OUTPUT_FILENAME
|
||||||
|
echo ""
|
||||||
|
echo "$0: No changes detected"
|
||||||
|
else
|
||||||
|
echo ""
|
||||||
|
(
|
||||||
|
echo "STATUS=EYAML_UPDATED"
|
||||||
|
echo ""
|
||||||
|
) > $LAST_OUTPUT_FILENAME
|
||||||
|
cat "${EYAMLFILE}" >> $LAST_OUTPUT_FILENAME
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if [[ $1 == '--on-host' ]]; then
|
||||||
|
edit_file_on_host
|
||||||
|
else
|
||||||
|
host=$(echo $1 | sed -e 's!/*$!!') # remove trailing slashes
|
||||||
|
|
||||||
|
if [ ! -d $host ]; then
|
||||||
|
echo "$0: No host-directory for '$host' found - execute in top-level cosmos dir"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
edit_copy_and_commit $host
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 0
|
35
fabfile/__init__.py
Normal file
35
fabfile/__init__.py
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
from fabric.api import run,env
|
||||||
|
from fabric.operations import get,put
|
||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
from fabfile.db import cosmos_db
|
||||||
|
|
||||||
|
env.user = 'root'
|
||||||
|
env.timeout = 30
|
||||||
|
env.connection_attempts = 3
|
||||||
|
env.warn_only = True
|
||||||
|
env.skip_bad_hosts = True
|
||||||
|
env.roledefs = cosmos_db()['members']
|
||||||
|
|
||||||
|
def all():
|
||||||
|
env.hosts = cosmos_db()['members']['all']
|
||||||
|
|
||||||
|
def cosmos():
|
||||||
|
run("/usr/local/bin/run-cosmos");
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
run("apt-get -qq update && apt-get -y -q dist-upgrade");
|
||||||
|
|
||||||
|
def facts():
|
||||||
|
get("/var/run/facts.yaml",local_path="facts/%(host)s.yaml")
|
||||||
|
|
||||||
|
def chassis():
|
||||||
|
run("ipmi-chassis --get-chassis-status")
|
||||||
|
|
||||||
|
def newvm(fqdn,ip,domain):
|
||||||
|
run("vmbuilder kvm ubuntu --domain %s --dest /var/lib/libvirt/images/%s.img --arch x86_64 --hostname %s --mem 512 --ip %s --addpkg openssh-server" % (domain,fqdn,fqdn,ip))
|
||||||
|
|
||||||
|
def cp(local,remote):
|
||||||
|
put(local,remote)
|
BIN
fabfile/__init__.pyc
Normal file
BIN
fabfile/__init__.pyc
Normal file
Binary file not shown.
118
global/overlay/etc/cosmos/apt/bootstrap-cosmos.sh
Executable file
118
global/overlay/etc/cosmos/apt/bootstrap-cosmos.sh
Executable file
|
@ -0,0 +1,118 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cmd_hostname="$1"
|
||||||
|
if test -z "$cmd_hostname"; then
|
||||||
|
echo "Usage: $0 HOSTNAME REPO TAGPATTERN"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
cmd_repo="$2"
|
||||||
|
if test -z "$cmd_repo"; then
|
||||||
|
echo "Usage $0 HOSTNAME REPO TAGPATTERN"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
cmd_tags="$3"
|
||||||
|
if test -z "$cmd_tags"; then
|
||||||
|
echo "Usage $0 HOSTNAME REPO TAGPATTERN"
|
||||||
|
exit 3
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
|
||||||
|
# cloud-init runs with LANG='US-ASCII' which is likely to fail because of non-US-ASCII chars in the manifest
|
||||||
|
export LANG='en_US.UTF-8'
|
||||||
|
|
||||||
|
export DEBIAN_FRONTEND='noninteractive'
|
||||||
|
|
||||||
|
apt-get -y update
|
||||||
|
apt-get -y upgrade
|
||||||
|
for pkg in rsync git git-core wget gpg jq; do
|
||||||
|
# script is running with "set -e", use "|| true" to allow packages to not
|
||||||
|
# exist without stopping the script
|
||||||
|
apt-get -y install $pkg || true
|
||||||
|
done
|
||||||
|
cosmos_deb=$(find ./ -maxdepth 1 -name 'cosmos_*.deb' | sort -V | tail -1)
|
||||||
|
dpkg -i "$cosmos_deb"
|
||||||
|
|
||||||
|
if ! test -d /var/cache/cosmos/repo; then
|
||||||
|
cosmos clone "$cmd_repo"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Re-run cosmos at reboot until it succeeds - use bash -l to get working proxy settings.
|
||||||
|
# It is possible the file does not exist or contains no matching lines,
|
||||||
|
# both cases are OK
|
||||||
|
grep -v "^exit 0" /etc/rc.local > /etc/rc.local.new || true
|
||||||
|
(echo ""
|
||||||
|
echo "test -f /etc/run-cosmos-at-boot && (bash -l cosmos -v update; bash -l cosmos -v apply && rm /etc/run-cosmos-at-boot)"
|
||||||
|
echo ""
|
||||||
|
echo "exit 0"
|
||||||
|
) >> /etc/rc.local.new
|
||||||
|
mv -f /etc/rc.local.new /etc/rc.local
|
||||||
|
|
||||||
|
touch /etc/run-cosmos-at-boot
|
||||||
|
|
||||||
|
# If this cloud-config is set, it will interfere with our changes to /etc/hosts
|
||||||
|
# The configuration seems to move around between cloud-config versions
|
||||||
|
for file in /etc/cloud/cloud.cfg /etc/cloud/cloud.cfg.d/01_debian_cloud.cfg; do
|
||||||
|
if [ -f ${file} ]; then
|
||||||
|
sed -i 's/manage_etc_hosts: true/manage_etc_hosts: false/g' ${file}
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Remove potential $hostname.novalocal, added by cloud-init or Debian default
|
||||||
|
# from /etc/hosts. We add our own further down.
|
||||||
|
#
|
||||||
|
# From # https://www.debian.org/doc/manuals/debian-reference/ch05.en.html#_the_hostname_resolution:
|
||||||
|
# "For a system with a permanent IP address, that permanent IP address should
|
||||||
|
# be used here instead of 127.0.1.1."
|
||||||
|
sed -i.bak -e "/127\.0\.1\.1/d" /etc/hosts
|
||||||
|
|
||||||
|
vendor=$(lsb_release -is)
|
||||||
|
version=$(lsb_release -rs)
|
||||||
|
min_version=1337
|
||||||
|
host_ip=127.0.1.1
|
||||||
|
if [ "${vendor}" = "Ubuntu" ]; then
|
||||||
|
min_version=20.04
|
||||||
|
elif [ "${vendor}" = "Debian" ]; then
|
||||||
|
min_version=11
|
||||||
|
fi
|
||||||
|
|
||||||
|
hostname $cmd_hostname
|
||||||
|
short=`echo ${cmd_hostname} | awk -F. '{print $1}'`
|
||||||
|
# Only change behavior on modern OS where `ip -j` outputs a json predictuble
|
||||||
|
# enought to work with.
|
||||||
|
#
|
||||||
|
# Use `dpkg` to easier compare ubuntu versions.
|
||||||
|
if dpkg --compare-versions "${version}" "ge" "${min_version}"; then
|
||||||
|
# When hostname pointed to loopback in /etc/hosts containers running on the
|
||||||
|
# host tried to connect to the container itself instead of the host.
|
||||||
|
host_ip=$(ip -j address show "$(ip -j route show default | jq -r '.[0].dev')" | jq -r .[0].addr_info[0].local)
|
||||||
|
fi
|
||||||
|
echo "${host_ip} ${cmd_hostname} ${short}" >> /etc/hosts
|
||||||
|
|
||||||
|
# Set up cosmos models. They are in the order of most significant first, so we want
|
||||||
|
# <host> <group (if it exists)> <global>
|
||||||
|
_host_type=`echo $cmd_hostname | cut -d - -f 1`
|
||||||
|
models=$(
|
||||||
|
echo -n '\\$COSMOS_REPO/'"$cmd_hostname/:"
|
||||||
|
test -d /var/cache/cosmos/repo/${_host_type}-common && echo -n '\\$COSMOS_REPO/'"${_host_type}-common/:"
|
||||||
|
echo -n '\\$COSMOS_REPO/global/'
|
||||||
|
)
|
||||||
|
echo "Configuring cosmos with the following models:"
|
||||||
|
echo "${models}"
|
||||||
|
|
||||||
|
perl -pi -e "s,#COSMOS_REPO_MODELS=.*,COSMOS_REPO_MODELS=\"${models}\"," /etc/cosmos/cosmos.conf
|
||||||
|
perl -pi -e "s,#COSMOS_UPDATE_VERIFY_GIT_TAG_PATTERN=.*,COSMOS_UPDATE_VERIFY_GIT_TAG_PATTERN=\"${cmd_tags}*\"," /etc/cosmos/cosmos.conf
|
||||||
|
|
||||||
|
env COSMOS_BASE=/var/cache/cosmos COSMOS_KEYS=/var/cache/cosmos/repo/global/overlay/etc/cosmos/keys /var/cache/cosmos/repo/global/post-tasks.d/015cosmos-trust
|
||||||
|
|
||||||
|
mkdir -p /var/cache/scriptherder
|
||||||
|
|
||||||
|
(date; nohup cosmos -v update && nohup cosmos -v apply && rm /etc/run-cosmos-at-boot; date) 2>&1 | tee /var/log/cosmos.log
|
||||||
|
|
||||||
|
|
||||||
|
exit 0
|
BIN
global/overlay/etc/cosmos/apt/cosmos_1.5-1_all.deb
Normal file
BIN
global/overlay/etc/cosmos/apt/cosmos_1.5-1_all.deb
Normal file
Binary file not shown.
BIN
global/overlay/etc/cosmos/apt/cosmos_1.5-2~sunet20220414_all.deb
Normal file
BIN
global/overlay/etc/cosmos/apt/cosmos_1.5-2~sunet20220414_all.deb
Normal file
Binary file not shown.
0
global/overlay/etc/cosmos/keys/.placeholder
Normal file
0
global/overlay/etc/cosmos/keys/.placeholder
Normal file
6
global/overlay/etc/cron.d/cosmos
Normal file
6
global/overlay/etc/cron.d/cosmos
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
SHELL=/bin/sh
|
||||||
|
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
|
||||||
|
|
||||||
|
*/15 * * * * root /usr/local/libexec/cosmos-cron-wrapper
|
||||||
|
|
||||||
|
@reboot root sleep 30; /usr/local/libexec/cosmos-cron-wrapper
|
3
global/overlay/etc/puppet/cosmos-rules.yaml
Normal file
3
global/overlay/etc/puppet/cosmos-rules.yaml
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
# Note that the matching is done with re.match()
|
||||||
|
'^ns[0-9]?.mnt.se$':
|
||||||
|
nameserver:
|
38
global/overlay/etc/puppet/cosmos_enc.py
Executable file
38
global/overlay/etc/puppet/cosmos_enc.py
Executable file
|
@ -0,0 +1,38 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
#
|
||||||
|
# Puppet 'External Node Classifier' to tell puppet what classes to apply to this node.
|
||||||
|
#
|
||||||
|
# Docs: https://puppet.com/docs/puppet/5.3/nodes_external.html
|
||||||
|
#
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
rules_path = os.environ.get("COSMOS_RULES_PATH", "/etc/puppet")
|
||||||
|
|
||||||
|
node_name = sys.argv[1]
|
||||||
|
|
||||||
|
rules = dict()
|
||||||
|
for p in rules_path.split(":"):
|
||||||
|
rules_file = os.path.join(p, "cosmos-rules.yaml")
|
||||||
|
if os.path.exists(rules_file):
|
||||||
|
with open(rules_file) as fd:
|
||||||
|
rules.update(yaml.safe_load(fd))
|
||||||
|
|
||||||
|
found = False
|
||||||
|
classes = dict()
|
||||||
|
for reg, cls in rules.items():
|
||||||
|
if re.search(reg, node_name):
|
||||||
|
if cls:
|
||||||
|
classes.update(cls)
|
||||||
|
found = True
|
||||||
|
|
||||||
|
if not found:
|
||||||
|
sys.stderr.write(f"{sys.argv[0]}: {node_name} not found in cosmos-rules.yaml\n")
|
||||||
|
|
||||||
|
print("---\n" + yaml.dump(dict(classes=classes)))
|
||||||
|
|
||||||
|
sys.exit(0)
|
27
global/overlay/etc/puppet/hiera.yaml
Normal file
27
global/overlay/etc/puppet/hiera.yaml
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
# Hiera version 5 configuration
|
||||||
|
#
|
||||||
|
---
|
||||||
|
version: 5
|
||||||
|
defaults:
|
||||||
|
datadir: /etc/hiera/data
|
||||||
|
data_hash: yaml_data
|
||||||
|
|
||||||
|
hierarchy:
|
||||||
|
- name: "Per-node data"
|
||||||
|
path: "local.yaml"
|
||||||
|
|
||||||
|
- name: "Per-group data"
|
||||||
|
path: "group.yaml"
|
||||||
|
|
||||||
|
- name: "Per-host secrets"
|
||||||
|
path: "local.eyaml"
|
||||||
|
lookup_key: eyaml_lookup_key
|
||||||
|
options:
|
||||||
|
pkcs7_private_key: /etc/hiera/eyaml/private_key.pkcs7.pem
|
||||||
|
pkcs7_public_key: /etc/hiera/eyaml/public_certkey.pkcs7.pem
|
||||||
|
|
||||||
|
- name: "Overrides per distribution"
|
||||||
|
path: "dist_%{facts.os.distro.codename}_override.yaml"
|
||||||
|
|
||||||
|
- name: "Data common to whole environment"
|
||||||
|
path: "common.yaml"
|
51
global/overlay/etc/puppet/manifests/cosmos-site.pp
Normal file
51
global/overlay/etc/puppet/manifests/cosmos-site.pp
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
# This manifest is managed using cosmos
|
||||||
|
|
||||||
|
Exec {
|
||||||
|
path => "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||||
|
}
|
||||||
|
|
||||||
|
# include some of this stuff for additional features
|
||||||
|
|
||||||
|
#include cosmos::tools
|
||||||
|
#include cosmos::motd
|
||||||
|
#include cosmos::ntp
|
||||||
|
#include cosmos::rngtools
|
||||||
|
#include cosmos::preseed
|
||||||
|
#include ufw
|
||||||
|
#include apt
|
||||||
|
#include cosmos
|
||||||
|
|
||||||
|
# you need a default node
|
||||||
|
|
||||||
|
node default {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
# edit and uncomment to manage ssh root keys in a simple way
|
||||||
|
|
||||||
|
#class { 'cosmos::access':
|
||||||
|
# keys => [
|
||||||
|
# "ssh-rsa ..."
|
||||||
|
# ]
|
||||||
|
#}
|
||||||
|
|
||||||
|
# example config for the nameserver class which is matched in cosmos-rules.yaml
|
||||||
|
|
||||||
|
#class nameserver {
|
||||||
|
# package {'bind9':
|
||||||
|
# ensure => latest
|
||||||
|
# }
|
||||||
|
# service {'bind9':
|
||||||
|
# ensure => running
|
||||||
|
# }
|
||||||
|
# ufw::allow { "allow-dns-udp":
|
||||||
|
# ip => 'any',
|
||||||
|
# port => 53,
|
||||||
|
# proto => "udp"
|
||||||
|
# }
|
||||||
|
# ufw::allow { "allow-dns-tcp":
|
||||||
|
# ip => 'any',
|
||||||
|
# port => 53,
|
||||||
|
# proto => "tcp"
|
||||||
|
# }
|
||||||
|
#}
|
15
global/overlay/etc/puppet/puppet.conf
Normal file
15
global/overlay/etc/puppet/puppet.conf
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
[main]
|
||||||
|
logdir=/var/log/puppet
|
||||||
|
vardir=/var/lib/puppet
|
||||||
|
ssldir=/var/lib/puppet/ssl
|
||||||
|
rundir=/var/run/puppet
|
||||||
|
factpath=$vardir/lib/facter
|
||||||
|
templatedir=$confdir/templates
|
||||||
|
node_terminus = exec
|
||||||
|
external_nodes = /etc/puppet/cosmos_enc.py
|
||||||
|
basemodulepath = /etc/puppet/modules:/etc/puppet/cosmos-modules:/usr/share/puppet/modules
|
||||||
|
|
||||||
|
[master]
|
||||||
|
# These are needed when the puppetmaster is run by passenger
|
||||||
|
# and can safely be removed if webrick is used.
|
||||||
|
ssl_client_header = SSL_CLIENT_S_DN
|
|
@ -0,0 +1,11 @@
|
||||||
|
[Unit]
|
||||||
|
Description=run-cosmos fleetlock unlocker
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=oneshot
|
||||||
|
ExecStart=/usr/local/bin/run-cosmos fleetlock-unlock
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
152
global/overlay/usr/local/bin/run-cosmos
Executable file
152
global/overlay/usr/local/bin/run-cosmos
Executable file
|
@ -0,0 +1,152 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Simplify running cosmos, with serialization if flock is available.
|
||||||
|
#
|
||||||
|
|
||||||
|
readonly PROGNAME=$(basename "$0")
|
||||||
|
readonly LOCKFILE_DIR=/tmp
|
||||||
|
readonly LOCK_FD=200
|
||||||
|
readonly FLEETLOCK_CONFIG=/etc/run-cosmos-fleetlock-conf
|
||||||
|
readonly FLEETLOCK_DISABLE_FILE=/etc/run-cosmos-fleetlock-disable
|
||||||
|
readonly FLEETLOCK_TOOL=/usr/local/bin/sunet-fleetlock
|
||||||
|
readonly FLEETLOCK_UNLOCK_SERVICE=run-cosmos-fleetlock-unlocker.service
|
||||||
|
readonly HEALTHCHECK_TOOL=/usr/local/bin/sunet-machine-healthy
|
||||||
|
readonly HEALTHCHECK_DISABLE_FILE=/etc/run-cosmos-healthcheck-disable
|
||||||
|
|
||||||
|
lock() {
|
||||||
|
local prefix=$1
|
||||||
|
local fd=${2:-$LOCK_FD}
|
||||||
|
local lock_file=$LOCKFILE_DIR/$prefix.lock
|
||||||
|
|
||||||
|
# create lock file
|
||||||
|
eval "exec $fd>$lock_file"
|
||||||
|
|
||||||
|
# acquier the lock
|
||||||
|
flock -n "$fd" \
|
||||||
|
&& return 0 \
|
||||||
|
|| return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
eexit() {
|
||||||
|
local error_str="$*"
|
||||||
|
|
||||||
|
echo "$error_str"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
oexit() {
|
||||||
|
local info_str="$*"
|
||||||
|
|
||||||
|
echo "$info_str"
|
||||||
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
fleetlock_enable_unlock_service() {
|
||||||
|
# In case e.g. the unit file has been removed "FragmentPath" will still
|
||||||
|
# return the old filename until daemon-reload is called, so do that here
|
||||||
|
# before we try checking for the FragmentPath.
|
||||||
|
need_reload=$(systemctl show --property NeedDaemonReload $FLEETLOCK_UNLOCK_SERVICE | awk -F= '{print $2}')
|
||||||
|
if [ "$need_reload" = "yes" ]; then
|
||||||
|
systemctl daemon-reload
|
||||||
|
fi
|
||||||
|
|
||||||
|
unit_file=$(systemctl show --property FragmentPath $FLEETLOCK_UNLOCK_SERVICE | awk -F= '{print $2}')
|
||||||
|
if [ -z "$unit_file" ]; then
|
||||||
|
# No unit file matching the service name, do nothing
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Enable the service if needed
|
||||||
|
systemctl is-enabled --quiet $FLEETLOCK_UNLOCK_SERVICE || systemctl enable --quiet $FLEETLOCK_UNLOCK_SERVICE
|
||||||
|
}
|
||||||
|
|
||||||
|
fleetlock_lock() {
|
||||||
|
if [ ! -f $FLEETLOCK_DISABLE_FILE ] && [ -f $FLEETLOCK_CONFIG ] && [ -x $FLEETLOCK_TOOL ]; then
|
||||||
|
# Make sure the unlock service is enabled before we take a lock if
|
||||||
|
# cosmos ends up rebooting the machine before fleetlock_unlock() is
|
||||||
|
# called.
|
||||||
|
fleetlock_enable_unlock_service || return 1
|
||||||
|
local fleetlock_group=""
|
||||||
|
local optional_args=()
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
. $FLEETLOCK_CONFIG || return 1
|
||||||
|
if [ -z "$fleetlock_group" ]; then
|
||||||
|
echo "Unable to set fleetlock_group"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
if [ -n "$fleetlock_lock_timeout" ]; then
|
||||||
|
optional_args+=("--timeout")
|
||||||
|
optional_args+=("$fleetlock_lock_timeout")
|
||||||
|
fi
|
||||||
|
echo "Getting fleetlock lock"
|
||||||
|
$FLEETLOCK_TOOL --lock-group "$fleetlock_group" --lock "${optional_args[@]}" || return 1
|
||||||
|
fi
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
fleetlock_unlock() {
|
||||||
|
if [ ! -f $FLEETLOCK_DISABLE_FILE ] && [ -f $FLEETLOCK_CONFIG ] && [ -x $FLEETLOCK_TOOL ]; then
|
||||||
|
local fleetlock_group=""
|
||||||
|
local optional_args=()
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
. $FLEETLOCK_CONFIG || return 1
|
||||||
|
if [ -z "$fleetlock_group" ]; then
|
||||||
|
echo "Unable to set fleetlock_group"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
if [ -n "$fleetlock_unlock_timeout" ]; then
|
||||||
|
optional_args+=("--timeout")
|
||||||
|
optional_args+=("$fleetlock_unlock_timeout")
|
||||||
|
fi
|
||||||
|
machine_is_healthy || return 1
|
||||||
|
echo "Releasing fleetlock lock"
|
||||||
|
$FLEETLOCK_TOOL --lock-group "$fleetlock_group" --unlock "${optional_args[@]}" || return 1
|
||||||
|
fi
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
machine_is_healthy() {
|
||||||
|
if [ ! -f $HEALTHCHECK_DISABLE_FILE ] && [ -x $HEALTHCHECK_TOOL ]; then
|
||||||
|
local fleetlock_healthcheck_timeout=""
|
||||||
|
local optional_args=()
|
||||||
|
# shellcheck source=/dev/null
|
||||||
|
. $FLEETLOCK_CONFIG || return 1
|
||||||
|
if [ -n "$fleetlock_healthcheck_timeout" ]; then
|
||||||
|
optional_args+=("--timeout")
|
||||||
|
optional_args+=("$fleetlock_healthcheck_timeout")
|
||||||
|
fi
|
||||||
|
echo "Running any health checks"
|
||||||
|
$HEALTHCHECK_TOOL "${optional_args[@]}" || return 1
|
||||||
|
fi
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
main () {
|
||||||
|
lock "$PROGNAME" || eexit "Only one instance of $PROGNAME can run at one time."
|
||||||
|
fleetlock_lock || eexit "Unable to acquire fleetlock lock."
|
||||||
|
cosmos "$@" update
|
||||||
|
cosmos "$@" apply
|
||||||
|
fleetlock_unlock || eexit "Unable to release fleetlock lock."
|
||||||
|
|
||||||
|
touch /var/run/last-cosmos-ok.stamp
|
||||||
|
|
||||||
|
find /var/lib/puppet/reports/ -type f -mtime +10 -print0 | xargs -0 rm -f
|
||||||
|
|
||||||
|
if [ -f /cosmos-reboot ]; then
|
||||||
|
rm -f /cosmos-reboot
|
||||||
|
reboot
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Most of the time we just pass on any arguments to the underlying cosmos
|
||||||
|
# tools, if adding special cases here make sure to not shadow any arguments
|
||||||
|
# (like "-v") which users expect to be passed on to cosmos.
|
||||||
|
case "$1" in
|
||||||
|
"fleetlock-unlock")
|
||||||
|
lock "$PROGNAME" || oexit "$PROGNAME appears locked by a running run-cosmos, let it handle unlocking instead."
|
||||||
|
fleetlock_unlock || eexit "Unable to release fleetlock lock."
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
main "$@"
|
||||||
|
;;
|
||||||
|
esac
|
251
global/overlay/usr/local/bin/sunet-fleetlock
Executable file
251
global/overlay/usr/local/bin/sunet-fleetlock
Executable file
|
@ -0,0 +1,251 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
# pylint: disable=invalid-name
|
||||||
|
# pylint: enable=invalid-name
|
||||||
|
""" Tool for taking and releasing fleetlock locks, used by run-cosmos if fleetlock is configured """
|
||||||
|
|
||||||
|
#
|
||||||
|
# You need a config file in "configparser" format with a section for the
|
||||||
|
# lock group you are using, so if the file describes two lock groups where one
|
||||||
|
# is called "fl-test1" and the other "fl-test2" then example contents would
|
||||||
|
# look like this:
|
||||||
|
# ===
|
||||||
|
# [fl-test1]
|
||||||
|
# server = https://fleetlock-server1.example.com
|
||||||
|
# password = mysecret1
|
||||||
|
#
|
||||||
|
# [fl-test2]
|
||||||
|
# server = https://fleetlock-server2.example.com
|
||||||
|
# password = mysecret2
|
||||||
|
# ===
|
||||||
|
#
|
||||||
|
# The password needs to match an acl configured for the lock group in the
|
||||||
|
# knubbis-fleetlock service.
|
||||||
|
#
|
||||||
|
# When modifying this code please make sure it is passed through the following
|
||||||
|
# tools:
|
||||||
|
# ===
|
||||||
|
# isort
|
||||||
|
# black
|
||||||
|
# pylint
|
||||||
|
# mypy --strict
|
||||||
|
# ===
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import configparser
|
||||||
|
import os.path
|
||||||
|
import platform
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
from types import FrameType
|
||||||
|
from typing import Optional, Union
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
|
||||||
|
class TimeoutException(Exception):
|
||||||
|
"""Exception raised when we hit tool timeout"""
|
||||||
|
|
||||||
|
|
||||||
|
def timeout_handler(signum: int, frame: Optional[FrameType]) -> None:
|
||||||
|
"""This is called if the tool takes too long to run"""
|
||||||
|
raise TimeoutException(f"{os.path.basename(sys.argv[0])} hit --timeout limit")
|
||||||
|
|
||||||
|
|
||||||
|
def do_fleetlock_request(
|
||||||
|
config: configparser.ConfigParser, args: argparse.Namespace, operation: str
|
||||||
|
) -> bool:
|
||||||
|
"""Perform fleetlock request based on given operation and return true if it succeeded"""
|
||||||
|
fleetlock_data = {
|
||||||
|
"client_params": {
|
||||||
|
"group": args.lock_group,
|
||||||
|
"id": args.lock_id,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
fleetlock_headers = {
|
||||||
|
"fleet-lock-protocol": "true",
|
||||||
|
}
|
||||||
|
|
||||||
|
if operation == "lock":
|
||||||
|
fleetlock_path = "/v1/pre-reboot"
|
||||||
|
url = config[args.lock_group]["server"] + fleetlock_path
|
||||||
|
elif operation == "unlock":
|
||||||
|
fleetlock_path = "/v1/steady-state"
|
||||||
|
url = config[args.lock_group]["server"] + fleetlock_path
|
||||||
|
else:
|
||||||
|
raise ValueError(f"unsupported operation: {operation}")
|
||||||
|
|
||||||
|
# Log the request-id header from responses so we can track requests in
|
||||||
|
# the knubbis-fleetlock logs more easily
|
||||||
|
request_id_key = "request-id"
|
||||||
|
request_id = None
|
||||||
|
|
||||||
|
retry_sleep_delay = 1
|
||||||
|
|
||||||
|
# Loop forever: we depend on the SIGALRM timout to raise an error if it
|
||||||
|
# takes too long
|
||||||
|
while True:
|
||||||
|
if args.verbose:
|
||||||
|
print(f"{operation} POST at url {url}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
resp = requests.post(
|
||||||
|
url,
|
||||||
|
headers=fleetlock_headers,
|
||||||
|
json=fleetlock_data,
|
||||||
|
timeout=args.request_timeout,
|
||||||
|
auth=("", config[args.lock_group]["password"]),
|
||||||
|
)
|
||||||
|
except (
|
||||||
|
requests.exceptions.ConnectionError,
|
||||||
|
requests.exceptions.ReadTimeout,
|
||||||
|
) as e:
|
||||||
|
print(f"POST request failed: {e}")
|
||||||
|
time.sleep(retry_sleep_delay)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if request_id_key in resp.headers:
|
||||||
|
request_id = resp.headers[request_id_key]
|
||||||
|
|
||||||
|
if resp.status_code == requests.codes.ok: # pylint: disable=no-member
|
||||||
|
if args.verbose:
|
||||||
|
print(
|
||||||
|
f"successful {operation} request for lock ID '{args.lock_id}'",
|
||||||
|
f"in lock group '{args.lock_group}' ({request_id_key}: {request_id})",
|
||||||
|
)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
# If the request is unauthorized this means we probably either try to
|
||||||
|
# use a lock group that does not exist, or we are using the wrong
|
||||||
|
# credentials and in either case we can give up immediately
|
||||||
|
if resp.status_code == requests.codes.unauthorized: # pylint: disable=no-member
|
||||||
|
print(
|
||||||
|
f"{operation} request unauthorized: incorrect lock group name '{args.lock_group}'",
|
||||||
|
f"or wrong credentials? ({request_id_key}: {request_id})",
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
# If the request failed in some other way we expect a JSON formatted
|
||||||
|
# response message:
|
||||||
|
print(
|
||||||
|
f"{operation} request failed:"
|
||||||
|
+ " "
|
||||||
|
+ resp.content.decode("utf-8").rstrip()
|
||||||
|
+ " "
|
||||||
|
+ f"({request_id_key}: {request_id})"
|
||||||
|
)
|
||||||
|
|
||||||
|
time.sleep(retry_sleep_delay)
|
||||||
|
|
||||||
|
|
||||||
|
def read_config(args: argparse.Namespace) -> Union[configparser.ConfigParser, None]:
|
||||||
|
"""Read lock group specific settings from config file"""
|
||||||
|
config = configparser.ConfigParser()
|
||||||
|
with open(args.config, encoding="utf-8") as config_fileobj:
|
||||||
|
config.read_file(config_fileobj)
|
||||||
|
|
||||||
|
if args.lock_group not in config:
|
||||||
|
print(f"missing required config section for lock group '{args.lock_group}'")
|
||||||
|
return None
|
||||||
|
|
||||||
|
required_settings = {
|
||||||
|
"server",
|
||||||
|
"password",
|
||||||
|
}
|
||||||
|
|
||||||
|
have_required_settings = True
|
||||||
|
for setting in required_settings:
|
||||||
|
if setting not in config[args.lock_group]:
|
||||||
|
print(
|
||||||
|
f"missing required setting '{setting}' in lock group '{args.lock_group}'"
|
||||||
|
)
|
||||||
|
have_required_settings = False
|
||||||
|
|
||||||
|
if not have_required_settings:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
"""Starting point of the program"""
|
||||||
|
|
||||||
|
# How long to wait per HTTP request to fleetlock service
|
||||||
|
default_request_timeout = 5
|
||||||
|
|
||||||
|
# How to long before giving up and exiting the tool with a failure
|
||||||
|
default_timeout = 60
|
||||||
|
|
||||||
|
default_config_file = "/etc/sunet-fleetlock/sunet-fleetlock.conf"
|
||||||
|
parser = argparse.ArgumentParser(description="Take and release fleetlock lock.")
|
||||||
|
parser.add_argument("--verbose", help="print more information", action="store_true")
|
||||||
|
parser.add_argument(
|
||||||
|
"--config",
|
||||||
|
help=f"the conf file to read (default: {default_config_file})",
|
||||||
|
default=default_config_file,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--lock-group", required=True, help="the group to take a lock in"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--lock-id",
|
||||||
|
help=f"the lock ID to use in the group (default: {platform.node()})",
|
||||||
|
default=platform.node(),
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--timeout",
|
||||||
|
type=int,
|
||||||
|
help=f"how many seconds before giving up and exiting tool (default: {default_timeout}s)",
|
||||||
|
default=default_timeout,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--request_timeout",
|
||||||
|
type=int,
|
||||||
|
help=f"individal fleetlock HTTP request timeout (default: {default_request_timeout}s)",
|
||||||
|
default=default_request_timeout,
|
||||||
|
)
|
||||||
|
action_group = parser.add_mutually_exclusive_group(required=True)
|
||||||
|
action_group.add_argument("--lock", action="store_true", help="lock a reboot slot")
|
||||||
|
action_group.add_argument(
|
||||||
|
"--unlock", action="store_true", help="unlock a reboot slot"
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
config = read_config(args)
|
||||||
|
|
||||||
|
if config is None:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Give up if tool has been running for more than --timeout seconds:
|
||||||
|
signal.signal(signal.SIGALRM, timeout_handler)
|
||||||
|
signal.alarm(args.timeout)
|
||||||
|
|
||||||
|
if args.lock:
|
||||||
|
locked = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
locked = do_fleetlock_request(config, args, "lock")
|
||||||
|
except TimeoutException as exc:
|
||||||
|
print(exc)
|
||||||
|
|
||||||
|
if locked:
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
if args.unlock:
|
||||||
|
unlocked = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
unlocked = do_fleetlock_request(config, args, "unlock")
|
||||||
|
except TimeoutException as exc:
|
||||||
|
print(exc)
|
||||||
|
|
||||||
|
if unlocked:
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
103
global/overlay/usr/local/bin/sunet-machine-healthy
Executable file
103
global/overlay/usr/local/bin/sunet-machine-healthy
Executable file
|
@ -0,0 +1,103 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
# pylint: disable=invalid-name
|
||||||
|
# pylint: enable=invalid-name
|
||||||
|
|
||||||
|
""" Run any check tools in a directory to decide if the machine is considered
|
||||||
|
healthy, called by run-cosmos if fleetlock locking is configured """
|
||||||
|
|
||||||
|
import pathlib
|
||||||
|
import os
|
||||||
|
import os.path
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import signal
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
from typing import List, Optional
|
||||||
|
from types import FrameType
|
||||||
|
|
||||||
|
|
||||||
|
class TimeoutException(Exception):
|
||||||
|
"""Exception returned when checks takes too long"""
|
||||||
|
|
||||||
|
|
||||||
|
def timeout_handler(signum: int, frame: Optional[FrameType]) -> None:
|
||||||
|
"""This is called if the tool takes too long to run"""
|
||||||
|
raise TimeoutException(f"{os.path.basename(sys.argv[0])} hit --timeout limit")
|
||||||
|
|
||||||
|
|
||||||
|
def find_checks(check_dir: str) -> List[pathlib.Path]:
|
||||||
|
"""Find all executable .check files in the given directory"""
|
||||||
|
check_files = []
|
||||||
|
|
||||||
|
dirobj = pathlib.Path(check_dir)
|
||||||
|
|
||||||
|
# iterdir() will raise error if the directory does not exist, and in this
|
||||||
|
# case we will just return an empty list
|
||||||
|
try:
|
||||||
|
for entry in dirobj.iterdir():
|
||||||
|
if entry.is_file():
|
||||||
|
if str(entry).endswith(".check") and os.access(entry, os.X_OK):
|
||||||
|
check_files.append(entry)
|
||||||
|
|
||||||
|
# run checks in alphabetical order
|
||||||
|
check_files = sorted(check_files)
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return check_files
|
||||||
|
|
||||||
|
|
||||||
|
def run_checks(check_files: List[pathlib.Path]) -> bool:
|
||||||
|
"""Run all checks"""
|
||||||
|
for check_file in check_files:
|
||||||
|
try:
|
||||||
|
subprocess.run([str(check_file)], check=True)
|
||||||
|
except subprocess.CalledProcessError as exc:
|
||||||
|
print(f"error: {exc}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
"""Starting point of the program"""
|
||||||
|
|
||||||
|
default_timeout = 60
|
||||||
|
default_health_check_dir = "/etc/sunet-machine-healthy/health-checks.d"
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Determine if machine is considered healthy."
|
||||||
|
)
|
||||||
|
parser.add_argument("--verbose", help="print more information", action="store_true")
|
||||||
|
parser.add_argument(
|
||||||
|
"--health-check-dir",
|
||||||
|
help=f"directory to run checks from (default: {default_health_check_dir}",
|
||||||
|
default=default_health_check_dir,
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--timeout",
|
||||||
|
type=int,
|
||||||
|
help=f"seconds before giving up and exiting tool (default: {default_timeout}s)",
|
||||||
|
default=default_timeout,
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
checks_ok = False
|
||||||
|
|
||||||
|
# Give up if checks has been running for more than --timeout seconds:
|
||||||
|
signal.signal(signal.SIGALRM, timeout_handler)
|
||||||
|
signal.alarm(args.timeout)
|
||||||
|
|
||||||
|
check_files = find_checks(args.health_check_dir)
|
||||||
|
|
||||||
|
checks_ok = run_checks(check_files)
|
||||||
|
|
||||||
|
if checks_ok:
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
12
global/overlay/usr/local/libexec/cosmos-cron-wrapper
Executable file
12
global/overlay/usr/local/libexec/cosmos-cron-wrapper
Executable file
|
@ -0,0 +1,12 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
test -f /etc/no-automatic-cosmos && exit 0
|
||||||
|
|
||||||
|
RUN_COSMOS='/usr/local/bin/run-cosmos'
|
||||||
|
SCRIPTHERDER_CMD=''
|
||||||
|
|
||||||
|
if [ -x /usr/local/bin/scriptherder ]; then
|
||||||
|
SCRIPTHERDER_CMD='/usr/local/bin/scriptherder --mode wrap --syslog --name cosmos --'
|
||||||
|
fi
|
||||||
|
|
||||||
|
exec ${SCRIPTHERDER_CMD} ${RUN_COSMOS} "$@"
|
124
global/overlay/usr/local/sbin/cosmos_vm
Executable file
124
global/overlay/usr/local/sbin/cosmos_vm
Executable file
|
@ -0,0 +1,124 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -- $(getopt h?H:D?s:B:M:C:R:i:g:n:I:G:N: "$@")
|
||||||
|
|
||||||
|
src_image=""
|
||||||
|
size="1G"
|
||||||
|
dhcp="no"
|
||||||
|
hostname="default"
|
||||||
|
bridge="br0"
|
||||||
|
cpus="1"
|
||||||
|
mem="1024"
|
||||||
|
repo="https://yourhost/myproj-cosmos.git"
|
||||||
|
tag="cosmos-ops"
|
||||||
|
ip=""
|
||||||
|
gateway=""
|
||||||
|
netmask=""
|
||||||
|
resolver=""
|
||||||
|
ip6=""
|
||||||
|
gateway6=""
|
||||||
|
netmask6="64"
|
||||||
|
|
||||||
|
while [ $# -gt 0 ]; do
|
||||||
|
case "$1" in
|
||||||
|
(-h) echo "Usage: $0 [-h] [-H hostname] [-M <memory>] [-C <#cpus>] [-B <bridge>] [-D (dhcp)] [-i/-I <ip4/6>] [-n/-N <mask4/6>] [-g/-G <gw4/6>] [-R <resolver(s)>] [-s <src image>]"; exit 0;;
|
||||||
|
(-H) hostname="$2"; shift;;
|
||||||
|
(-s) src_image="$2"; shift;;
|
||||||
|
(-D) dhcp="yes" ;;
|
||||||
|
(-S) size="$2"; shift;;
|
||||||
|
(-B) bridge="$2"; shift;;
|
||||||
|
(-M) mem="$2"; shift;;
|
||||||
|
(-C) cpus="$2"; shift;;
|
||||||
|
(-R) resolver="$2"; shift;;
|
||||||
|
(-i) ip="$2"; shift;;
|
||||||
|
(-g) gateway="$2"; shift;;
|
||||||
|
(-n) netmask="$2"; shift;;
|
||||||
|
(-I) ip6="$2"; shift;;
|
||||||
|
(-G) gateway6="$2"; shift;;
|
||||||
|
(-N) netmask6="$2"; shift;;
|
||||||
|
(--) shift; break;;
|
||||||
|
(-*) echo "Unknown option $1\nUsage: $0 [-h] [-H hostname] [-M <memory>] [-C <#cpus>] [-B <bridge>] [-D (dhcp)] [-i/-I <ip4/6>] [-n/-N <mask4/6>] [-g/-G <gw4/6>] [-R <resolver(s)>] [-s <src image>]"; exit 1;;
|
||||||
|
(*) break;;
|
||||||
|
esac
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
|
||||||
|
id=$(uuidgen)
|
||||||
|
|
||||||
|
seed=${id}_seed.img
|
||||||
|
disk=${id}.img
|
||||||
|
|
||||||
|
rm -f ${seed}
|
||||||
|
truncate --size 2M ${seed}
|
||||||
|
mkfs.vfat -n cidata ${seed} 2>/dev/null
|
||||||
|
|
||||||
|
user_data=$(mktemp)
|
||||||
|
cat > ${user_data} <<EOF
|
||||||
|
#cloud-config
|
||||||
|
password: ubuntu
|
||||||
|
chpasswd: { expire: False }
|
||||||
|
ssh_pwauth: True
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVvB4gdJ6EWRmx8xUSxrhoUNnWxEf8ZwAqhzC1+7XBY/hSd/cbEotLB9gxgqt0CLW56VU4FPLTw8snD8tgsyZN6KH1Da7UXno8oMk8tJdwLQM0Ggx3aWuztItkDfBc3Lfvq5T07YfphqJO7rcSGbS4QQdflXuOM9JLi6NStVao0ia4aE6Tj68pVVb3++XYvqvbU6NtEICvkTxEY93YpnRSfeAi64hsbaqSTN4kpeltzoSD1Rikz2aQFtFXE03ZC48HtGGhdMFA/Ade6KWBDaXxHGARVQ9/UccfhaR2XSjVxSZ8FBNOzNsH4k9cQIb2ndkEOXZXnjF5ZjdI4ZU0F+t7 leifj+00060AD478D6@sunet.se
|
||||||
|
runcmd:
|
||||||
|
- ["mkdir","/tmp/seed"]
|
||||||
|
- ["mount","/dev/vdb","/tmp/seed"]
|
||||||
|
- ["cp","/tmp/seed/bootstrap-cosmos.sh","/tmp/seed/cosmos_1.5-1_all.deb","/root"]
|
||||||
|
- ["cd","/root"]
|
||||||
|
- "cd /root && /root/bootstrap-cosmos.sh ${hostname} ${repo} ${tag}"
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
meta_data=$(mktemp)
|
||||||
|
cat > ${meta_data} <<EOF
|
||||||
|
#cloud-config
|
||||||
|
instance-id: iid-${id}
|
||||||
|
local-hostname: ${hostname}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
if [ "x${dhcp}" = "xyes" ]; then
|
||||||
|
cat >> ${meta_data} <<EOF
|
||||||
|
network-interfaces: |
|
||||||
|
auto eth0
|
||||||
|
iface eth0 inet dhcp
|
||||||
|
EOF
|
||||||
|
else
|
||||||
|
if [ "x${ip}" != "x" ]; then
|
||||||
|
cat >> ${meta_data} <<EOF
|
||||||
|
network-interfaces: |
|
||||||
|
auto eth0
|
||||||
|
iface eth0 inet static
|
||||||
|
address ${ip}
|
||||||
|
netmask ${netmask}
|
||||||
|
gateway ${gateway}
|
||||||
|
dns-nameservers ${resolver}
|
||||||
|
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "x${ip6}" != "x" ]; then
|
||||||
|
cat >> ${meta_data} <<EOF
|
||||||
|
network-interfaces: |
|
||||||
|
auto eth0
|
||||||
|
iface eth0 inet6 static
|
||||||
|
address ${ip6}
|
||||||
|
netmask ${netmask6}
|
||||||
|
gateway ${gateway6}
|
||||||
|
dns-nameservers ${resolver}
|
||||||
|
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
mcopy -i ${seed} ${user_data} ::user-data 2>/dev/null
|
||||||
|
mcopy -i ${seed} ${meta_data} ::meta-data 2>/dev/null
|
||||||
|
mcopy -i ${seed} /etc/cosmos/apt/bootstrap-cosmos.sh /etc/cosmos/apt/cosmos_1.5-1_all.deb ::
|
||||||
|
mv ${seed} /var/lib/libvirt/images/
|
||||||
|
virsh pool-refresh default
|
||||||
|
|
||||||
|
virsh vol-clone --pool default ${src_image} ${disk}
|
||||||
|
|
||||||
|
virt-install -r ${mem} -n ${hostname} --vcpus=${cpus} --autostart --memballoon virtio --network bridge=${bridge} --boot hd --disk vol=default/${disk},format=qcow2,bus=virtio --disk vol=default/${seed},bus=virtio
|
||||||
|
|
||||||
|
rm -f ${user_data}
|
||||||
|
rm -f ${meta_data}
|
39
global/post-tasks.d/010cosmos-modules
Executable file
39
global/post-tasks.d/010cosmos-modules
Executable file
|
@ -0,0 +1,39 @@
|
||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Dynamically configure /etc/puppet/cosmos-modules.conf
|
||||||
|
#
|
||||||
|
# The content of that file is chosen according to:
|
||||||
|
#
|
||||||
|
# 1. If the file is actually present in the model, use that.
|
||||||
|
# 2. If there is a script called /etc/puppet/setup_cosmos_modules, run that.
|
||||||
|
# 3. If the file still doesn't exist, create it with the defaults in this script.
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
if [ -f "${COSMOS_MODEL}/overlay/etc/puppet/cosmos-modules.conf" ]; then
|
||||||
|
test "$COSMOS_VERBOSE" = "y" && \
|
||||||
|
echo "$0: /etc/puppet/cosmos-modules.conf is present in the model, exiting"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -x /etc/puppet/setup_cosmos_modules ]; then
|
||||||
|
test "$COSMOS_VERBOSE" = "y" && \
|
||||||
|
echo "$0: Updating /etc/puppet/cosmos-modules.conf with /etc/puppet/setup_cosmos_modules"
|
||||||
|
/etc/puppet/setup_cosmos_modules
|
||||||
|
|
||||||
|
test -f /etc/puppet/cosmos-modules.conf && exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
test "$COSMOS_VERBOSE" = "y" && \
|
||||||
|
echo "$0: Creating/updating /etc/puppet/cosmos-modules.conf with defaults from this script"
|
||||||
|
|
||||||
|
cat > /etc/puppet/cosmos-modules.conf << EOF
|
||||||
|
# File created/updated by $0
|
||||||
|
#
|
||||||
|
concat puppetlabs/concat yes
|
||||||
|
stdlib puppetlabs/stdlib yes
|
||||||
|
#ufw attachmentgenie/ufw yes
|
||||||
|
#apt puppetlabs/apt yes
|
||||||
|
#cosmos https://github.com/SUNET/puppet-cosmos.git yes
|
||||||
|
EOF
|
23
global/post-tasks.d/010fix-ssh-perms
Executable file
23
global/post-tasks.d/010fix-ssh-perms
Executable file
|
@ -0,0 +1,23 @@
|
||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Re-used example from SJD
|
||||||
|
#
|
||||||
|
|
||||||
|
if test -d /root/.ssh && \
|
||||||
|
test `stat -t /root/.ssh | cut -d\ -f5` != 0; then
|
||||||
|
chown root.root /root/.ssh
|
||||||
|
fi
|
||||||
|
|
||||||
|
if test -d /root/.ssh && \
|
||||||
|
test `stat -c %a /root/.ssh` != 700; then
|
||||||
|
chmod 700 /root/.ssh
|
||||||
|
fi
|
||||||
|
|
||||||
|
if test -f /root/.ssh/authorized_keys; then
|
||||||
|
if test `stat -t /root/.ssh/authorized_keys | cut -d\ -f5` != 0; then
|
||||||
|
chown root.root /root/.ssh/authorized_keys
|
||||||
|
fi
|
||||||
|
if test `stat --printf=%a /root/.ssh/authorized_keys` != 440; then
|
||||||
|
chmod 440 /root/.ssh/authorized_keys
|
||||||
|
fi
|
||||||
|
fi
|
24
global/post-tasks.d/014set-cosmos-permissions
Executable file
24
global/post-tasks.d/014set-cosmos-permissions
Executable file
|
@ -0,0 +1,24 @@
|
||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Set Cosmos directory permissions so that
|
||||||
|
# the files cannot be read by anyone but root,
|
||||||
|
# since it's possible that the directory
|
||||||
|
# can contain files that after applying the
|
||||||
|
# overlay to / only should be read or writable
|
||||||
|
# by root.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
self=$(basename "$0")
|
||||||
|
|
||||||
|
if ! test -d "$COSMOS_BASE"; then
|
||||||
|
test -z "$COSMOS_VERBOSE" || echo "$self: COSMOS_BASE was not found. Aborting change of permissions."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
args=""
|
||||||
|
if [ "x$COSMOS_VERBOSE" = "xy" ]; then
|
||||||
|
args="-v"
|
||||||
|
fi
|
||||||
|
|
||||||
|
chown ${args} root:root "$COSMOS_BASE"
|
||||||
|
chmod ${args} 750 "$COSMOS_BASE"
|
78
global/post-tasks.d/015cosmos-trust
Executable file
78
global/post-tasks.d/015cosmos-trust
Executable file
|
@ -0,0 +1,78 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
gnupg_show_options='--import --import-options show-only,import-minimal'
|
||||||
|
if [[ $(lsb_release -sr | awk -F . '{ print $1 }') -le 16 ]]; then
|
||||||
|
# gpg on Ubuntu 16 and less is gnupg < 2, which doesn't have --import-options show-only
|
||||||
|
# but on the other hand defaults to this mode (https://dev.gnupg.org/T2943)
|
||||||
|
gnupg_show_options='--dry-run'
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$COSMOS_KEYS" ]; then
|
||||||
|
COSMOS_KEYS=/etc/cosmos/keys
|
||||||
|
fi
|
||||||
|
|
||||||
|
bold='\e[1m'
|
||||||
|
reset='\e[0m'
|
||||||
|
red='\033[01;31m'
|
||||||
|
|
||||||
|
# Associative array of fingerprints in the GPG keyring
|
||||||
|
declare -A KEYRING
|
||||||
|
|
||||||
|
# Associative array with expired keys in the GPG keyring
|
||||||
|
declare -A EXPIRED
|
||||||
|
|
||||||
|
# associative array with non-expired keys found in $COSMOS_KEYS directory
|
||||||
|
declare -A SEEN
|
||||||
|
|
||||||
|
# Load information about all keys present in the GPG keyring
|
||||||
|
for line in $(cosmos gpg --with-colons --fingerprint | awk -F: '$1 == "pub" { print $2 ":" $5 }'); do
|
||||||
|
IFS=':' read -r expired fp <<< $line
|
||||||
|
KEYRING[$fp]='1'
|
||||||
|
if [[ $expired == 'e' ]]; then
|
||||||
|
EXPIRED[$fp]=1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Install new keys discovered in the $COSMOS_KEYS directory
|
||||||
|
for k in $COSMOS_KEYS/*.pub; do
|
||||||
|
if [[ ! -s $k ]]; then
|
||||||
|
# Silently ignore empty files
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
pubkeys_in_file=$(cosmos gpg ${gnupg_show_options} \
|
||||||
|
--with-colons --with-fingerprint --quiet < $k \
|
||||||
|
| grep "^pub:")
|
||||||
|
non_expired_pubkeys_in_file=$(echo ${pubkeys_in_file} | awk -F: '$2 != "e" { print $0 }')
|
||||||
|
if [[ ! $non_expired_pubkeys_in_file ]]; then
|
||||||
|
echo -e "$0: ${red}Ignoring file with expired pubkey: ${k}${reset}"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
fp=$(echo ${pubkeys_in_file} | awk -F: '{print $5}')
|
||||||
|
|
||||||
|
# Remember that we saw fingerprint $fp in file $k
|
||||||
|
SEEN[$fp]=$k
|
||||||
|
|
||||||
|
if [[ ! ${KEYRING[$fp]} ]]; then
|
||||||
|
echo -e "$0: ${bold}Importing new key ${fp}${reset} from ${k}"
|
||||||
|
cosmos gpg --no-tty --import < $k
|
||||||
|
elif [[ ${EXPIRED[$fp]} ]]; then
|
||||||
|
echo -e "$0: ${bold}Re-importing expired key ${fp}${reset} from ${k}"
|
||||||
|
cosmos gpg --no-tty --import < $k
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ ! ${#SEEN[@]} ]]; then
|
||||||
|
echo "$0: ${red}NO trusted keys found in directory ${COSMOS_KEYS} - aborting${reset}"
|
||||||
|
echo "(this is probably a syntax problem with the gpg commands in this script)"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Delete keys no longer present (or expired) in $COSMOS_KEYS directory
|
||||||
|
for fp in ${!KEYRING[@]}; do
|
||||||
|
if [[ ! ${SEEN[$fp]} ]]; then
|
||||||
|
echo -e "$0: ${bold}Deleting key${reset} ${fp} not present (or expired) in ${COSMOS_KEYS}"
|
||||||
|
cosmos gpg --fingerprint $fp
|
||||||
|
cosmos gpg --yes --batch --delete-key $fp || true
|
||||||
|
fi
|
||||||
|
done
|
112
global/post-tasks.d/018packages
Executable file
112
global/post-tasks.d/018packages
Executable file
|
@ -0,0 +1,112 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
CONFIG=${CONFIG:=/etc/puppet/cosmos-modules.conf}
|
||||||
|
LOCALCONFIG=${LOCALCONFIG:=/etc/puppet/cosmos-modules_local.conf}
|
||||||
|
CACHE_DIR=/var/cache/puppet-modules
|
||||||
|
MODULES_DIR=${MODULES_DIR:=/etc/puppet/cosmos-modules}
|
||||||
|
export GNUPGHOME=/etc/cosmos/gnupg
|
||||||
|
|
||||||
|
# /etc/puppet/cosmos_enc.py needs the YAML module
|
||||||
|
python3 -c "import yaml" 2>/dev/null || apt-get -y install python3-yaml
|
||||||
|
|
||||||
|
bold='\e[1m'
|
||||||
|
reset='\e[0m'
|
||||||
|
red='\033[01;31m'
|
||||||
|
|
||||||
|
stage_module() {
|
||||||
|
rm -rf $CACHE_DIR/staging/$1
|
||||||
|
git archive --format=tar --prefix=$1/ $2 | (cd $CACHE_DIR/staging/ && tar xf -)
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ -f $CONFIG -o $LOCALCONFIG ]; then
|
||||||
|
if [ ! -d $MODULES_DIR ]; then
|
||||||
|
mkdir -p $MODULES_DIR
|
||||||
|
fi
|
||||||
|
if [ ! -d $CACHE_DIR ]; then
|
||||||
|
mkdir -p $CACHE_DIR/{scm,staging}
|
||||||
|
fi
|
||||||
|
|
||||||
|
test -f $CONFIG || CONFIG=''
|
||||||
|
test -f $LOCALCONFIG || LOCALCONFIG=''
|
||||||
|
|
||||||
|
# First pass to clone any new modules, and update those marked for updating.
|
||||||
|
grep -h -E -v "^#" $CONFIG $LOCALCONFIG | sort | (
|
||||||
|
while read module src update pattern; do
|
||||||
|
# We only support git://, file:/// and https:// urls at the moment
|
||||||
|
if [ "${src:0:6}" = "git://" -o "${src:0:8}" = "file:///" -o "${src:0:8}" = "https://" ]; then
|
||||||
|
if [ ! -d $CACHE_DIR/scm/$module ]; then
|
||||||
|
git clone -q $src $CACHE_DIR/scm/$module
|
||||||
|
elif [ -d $CACHE_DIR/scm/$module/.git ]; then
|
||||||
|
if [ "$update" = "yes" ]; then
|
||||||
|
cd $CACHE_DIR/scm/$module
|
||||||
|
if [ "$src" != "$(git config remote.origin.url)" ]; then
|
||||||
|
git config remote.origin.url $src
|
||||||
|
fi
|
||||||
|
# Support master branch being renamed to main
|
||||||
|
git branch --all | grep -q '^[[:space:]]*remotes/origin/main$' && git checkout main
|
||||||
|
# Update repo and clean out any local inconsistencies
|
||||||
|
git pull -q || (git fetch && git reset --hard)
|
||||||
|
else
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "${red}ERROR: Ignoring non-git repository${reset}"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
elif [[ "$src" =~ .*:// ]]; then
|
||||||
|
echo -e "${red}ERROR: Don't know how to install '${src}'${reset}"
|
||||||
|
continue
|
||||||
|
else
|
||||||
|
echo -e "${bold}WARNING - attempting UNSAFE installation/upgrade of puppet-module ${module} from ${src}${reset}"
|
||||||
|
if [ ! -d /etc/puppet/modules/$module ]; then
|
||||||
|
puppet module install $src
|
||||||
|
elif [ "$update" = "yes" ]; then
|
||||||
|
puppet module upgrade $src
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
)
|
||||||
|
|
||||||
|
# Second pass to verify the signatures on all modules and stage those that
|
||||||
|
# have good signatures.
|
||||||
|
grep -h -E -v "^#" $CONFIG $LOCALCONFIG | sort | (
|
||||||
|
while read module src update pattern; do
|
||||||
|
# We only support git://, file:/// and https:// urls at the moment
|
||||||
|
if [ "${src:0:6}" = "git://" -o "${src:0:8}" = "file:///" -o "${src:0:8}" = "https://" ]; then
|
||||||
|
# Verify git tag
|
||||||
|
cd $CACHE_DIR/scm/$module
|
||||||
|
TAG=$(git tag -l "${pattern:-*}" | sort | tail -1)
|
||||||
|
if [ "$COSMOS_VERBOSE" = "y" ]; then
|
||||||
|
echo -e "Checking signature on puppet-module:tag ${bold}${module}:${TAG}${reset}"
|
||||||
|
fi
|
||||||
|
if [ -z "$TAG" ]; then
|
||||||
|
echo -e "${red}ERROR: No git tag found for pattern '${pattern:-*}' on puppet-module ${module}${reset}"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
git tag -v $TAG &> /dev/null
|
||||||
|
if [ $? == 0 ]; then
|
||||||
|
#if [ "$COSMOS_VERBOSE" = "y" ]; then
|
||||||
|
# # short output on good signature
|
||||||
|
# git tag -v $TAG 2>&1 | grep "gpg: Good signature"
|
||||||
|
#fi
|
||||||
|
# Put archive in staging since tag verified OK
|
||||||
|
stage_module $module $TAG
|
||||||
|
else
|
||||||
|
echo -e "${red}FAILED signature check on puppet-module ${module}${reset}"
|
||||||
|
git tag -v $TAG
|
||||||
|
echo ''
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
)
|
||||||
|
|
||||||
|
# Cleanup removed puppet modules from CACHE_DIR
|
||||||
|
for MODULE in $(ls -1 $CACHE_DIR/staging/); do
|
||||||
|
if ! grep -h -E -q "^$MODULE\s+" $CONFIG $LOCALCONFIG; then
|
||||||
|
rm -rf $CACHE_DIR/{scm,staging}/$MODULE
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Installing verified puppet modules
|
||||||
|
rsync --archive --delete $CACHE_DIR/staging/ $MODULES_DIR/
|
||||||
|
fi
|
5
global/post-tasks.d/020reports
Executable file
5
global/post-tasks.d/020reports
Executable file
|
@ -0,0 +1,5 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
#rm -f /var/run/facts.json
|
||||||
|
#facter -p -y > /var/run/facts.yaml
|
||||||
|
rm -f /var/run/facts.yaml
|
16
global/post-tasks.d/030puppet
Executable file
16
global/post-tasks.d/030puppet
Executable file
|
@ -0,0 +1,16 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
if [ "x$COSMOS_VERBOSE" = "xy" ]; then
|
||||||
|
args="--verbose --show_diff"
|
||||||
|
else
|
||||||
|
args="--logdest=syslog"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f /usr/bin/puppet ] && [ -d /etc/puppet/manifests ]; then
|
||||||
|
find /etc/puppet/manifests -name \*.pp | while read -r m; do
|
||||||
|
test "x$COSMOS_VERBOSE" = "xy" && echo "$0: Applying Puppet manifest $m"
|
||||||
|
puppet apply $args $m
|
||||||
|
done
|
||||||
|
fi
|
8
global/post-tasks.d/099autoremove
Executable file
8
global/post-tasks.d/099autoremove
Executable file
|
@ -0,0 +1,8 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
export DEBIAN_FRONTEND='noninteractive'
|
||||||
|
|
||||||
|
if (( $RANDOM % 20 == 0)); then
|
||||||
|
apt-get -qq update
|
||||||
|
apt-get -qq -y autoremove
|
||||||
|
fi
|
26
global/post-tasks.d/999reboot
Executable file
26
global/post-tasks.d/999reboot
Executable file
|
@ -0,0 +1,26 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [[ -f /var/run/reboot-required && -f /etc/cosmos-automatic-reboot ]]; then
|
||||||
|
|
||||||
|
if [[ $HOSTNAME =~ -tug- ]]; then
|
||||||
|
# Reboot hosts in site TUG with 15 seconds delay (enough to manually
|
||||||
|
# cancel the reboot if logged in and seeing the 'emerg' message broadcasted to console)
|
||||||
|
sleep=15
|
||||||
|
elif [[ $HOSTNAME =~ -fre- ]]; then
|
||||||
|
# reboot hosts in site FRE with 15+180 to 15+180+180 seconds delay
|
||||||
|
sleep=$(( 180 + ($RANDOM % 180)))
|
||||||
|
elif [[ $HOSTNAME =~ -lla- ]]; then
|
||||||
|
# reboot hosts in site LLA with 15+180+180 to 15+180+180+180 seconds delay
|
||||||
|
sleep=$(( 375 + ($RANDOM % 180)))
|
||||||
|
else
|
||||||
|
# reboot hosts in any other site with 15 to 315 seconds delay
|
||||||
|
sleep=$(( 15 + ($RANDOM % 300)))
|
||||||
|
fi
|
||||||
|
|
||||||
|
logger -p local0.emerg -i -t cosmos-automatic-reboot "Rebooting automatically in $sleep seconds (if /var/run/reboot-required still exists)"
|
||||||
|
sleep $sleep
|
||||||
|
if [ -f /var/run/reboot-required ]; then
|
||||||
|
logger -p local0.crit -i -t cosmos-automatic-reboot "Rebooting automatically"
|
||||||
|
reboot
|
||||||
|
fi
|
||||||
|
fi
|
5
global/pre-tasks.d/010fix-package-manager
Executable file
5
global/pre-tasks.d/010fix-package-manager
Executable file
|
@ -0,0 +1,5 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# dpkg frequently breaks with automatic reboots.
|
||||||
|
# Make an attempt to get it back into working order.
|
||||||
|
dpkg --configure -a
|
24
global/pre-tasks.d/014set-cosmos-permissions
Executable file
24
global/pre-tasks.d/014set-cosmos-permissions
Executable file
|
@ -0,0 +1,24 @@
|
||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Set Cosmos directory permissions so that
|
||||||
|
# the files cannot be read by anyone but root,
|
||||||
|
# since it's possible that the directory
|
||||||
|
# can contain files that after applying the
|
||||||
|
# overlay to / only should be read or writable
|
||||||
|
# by root.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
self=$(basename "$0")
|
||||||
|
|
||||||
|
if ! test -d "$COSMOS_BASE"; then
|
||||||
|
test -z "$COSMOS_VERBOSE" || echo "$self: COSMOS_BASE was not found. Aborting change of permissions."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
args=""
|
||||||
|
if [ "x$COSMOS_VERBOSE" = "xy" ]; then
|
||||||
|
args="-v"
|
||||||
|
fi
|
||||||
|
|
||||||
|
chown ${args} root:root "$COSMOS_BASE"
|
||||||
|
chmod ${args} 750 "$COSMOS_BASE"
|
30
global/pre-tasks.d/015set-overlay-permissions
Executable file
30
global/pre-tasks.d/015set-overlay-permissions
Executable file
|
@ -0,0 +1,30 @@
|
||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Set overlay file permissions in model directory before apply.d/60overlay
|
||||||
|
# rsyncs it to /
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
self=$(basename "$0")
|
||||||
|
|
||||||
|
MODEL_OVERLAY="$COSMOS_MODEL/overlay"
|
||||||
|
|
||||||
|
if ! test -d "$MODEL_OVERLAY"; then
|
||||||
|
test -z "$COSMOS_VERBOSE" || echo "$self: overlay is a no-op"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
args=""
|
||||||
|
if [ "x$COSMOS_VERBOSE" = "xy" ]; then
|
||||||
|
args="-v"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -d "$MODEL_OVERLAY/root" ]; then
|
||||||
|
chown ${args} root:root "$MODEL_OVERLAY"/root
|
||||||
|
chmod ${args} 0700 "$MODEL_OVERLAY"/root
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -d "$MODEL_OVERLAY/root/.ssh" ]; then
|
||||||
|
chown ${args} -R root:root "$MODEL_OVERLAY"/root/.ssh
|
||||||
|
chmod ${args} 0700 "$MODEL_OVERLAY"/root/.ssh
|
||||||
|
fi
|
16
global/pre-tasks.d/020common-tools
Executable file
16
global/pre-tasks.d/020common-tools
Executable file
|
@ -0,0 +1,16 @@
|
||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Re-used example from SJD
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
stamp="$COSMOS_BASE/stamps/common-tools-v01.stamp"
|
||||||
|
|
||||||
|
if ! test -f $stamp; then
|
||||||
|
apt-get -y install vim traceroute tcpdump molly-guard less rsync git-core unattended-upgrades ntp
|
||||||
|
update-alternatives --set editor /usr/bin/vim.basic
|
||||||
|
|
||||||
|
mkdir -p `dirname $stamp`
|
||||||
|
touch $stamp
|
||||||
|
fi
|
31
global/pre-tasks.d/030puppet
Executable file
31
global/pre-tasks.d/030puppet
Executable file
|
@ -0,0 +1,31 @@
|
||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Re-used example from SJD
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
stamp="$COSMOS_BASE/stamps/puppet-tools-v01.stamp"
|
||||||
|
|
||||||
|
if ! test -f "${stamp}" -a -f /usr/bin/puppet; then
|
||||||
|
apt-get update
|
||||||
|
apt-get -y install puppet
|
||||||
|
. /etc/os-release
|
||||||
|
|
||||||
|
# Note: in posix shell, string comparison is done with a single =
|
||||||
|
if [ "${ID}" = "debian" ] && [ "${VERSION_ID}" -ge 12 ] || ([ "${ID}" = "ubuntu" ] && $(dpkg --compare-versions ${VERSION_ID} ge 24.04)) ; then
|
||||||
|
apt-get -y install \
|
||||||
|
cron \
|
||||||
|
puppet-module-camptocamp-augeas \
|
||||||
|
puppet-module-puppetlabs-apt \
|
||||||
|
puppet-module-puppetlabs-concat \
|
||||||
|
puppet-module-puppetlabs-cron-core \
|
||||||
|
puppet-module-puppetlabs-stdlib \
|
||||||
|
puppet-module-puppetlabs-vcsrepo
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "$(dirname "${stamp}")"
|
||||||
|
touch "${stamp}"
|
||||||
|
fi
|
||||||
|
|
39
global/pre-tasks.d/040hiera-eyaml
Executable file
39
global/pre-tasks.d/040hiera-eyaml
Executable file
|
@ -0,0 +1,39 @@
|
||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Set up eyaml for Hiera
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
EYAMLDIR=/etc/hiera/eyaml
|
||||||
|
|
||||||
|
vendor=$(lsb_release -is)
|
||||||
|
version=$(lsb_release -rs)
|
||||||
|
# eyaml is only used on Ubuntu 20.04 and newer, and Debian 11 and newer (earlier OSes use hiera-gpg instead)
|
||||||
|
test "${vendor}" = "Ubuntu" && dpkg --compare-versions "${version}" "lt" "18.04" && exit 0
|
||||||
|
test "${vendor}" = "Debian" && dpkg --compare-versions "${version}" "lt" "10" && exit 0
|
||||||
|
|
||||||
|
stamp="$COSMOS_BASE/stamps/hiera-eyaml-v01.stamp"
|
||||||
|
|
||||||
|
test -f "$stamp" && exit 0
|
||||||
|
|
||||||
|
if [ ! -f /usr/bin/eyaml ] || [ ! -d /usr/share/doc/yaml-mode ]; then
|
||||||
|
apt-get update
|
||||||
|
# If we don't install emacs before yaml-mode the default emacs package
|
||||||
|
# will be emacs-gtk which brings x11 with friends which we don't need.
|
||||||
|
apt-get -y install emacs-nox
|
||||||
|
apt-get -y install hiera-eyaml yaml-mode
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f ${EYAMLDIR}/public_certkey.pkcs7.pem ] || [ ! -f ${EYAMLDIR}/private_key.pkcs7.pem ]; then
|
||||||
|
# hiera-eyaml wants a certificate and public key, not just a public key oddly enough
|
||||||
|
echo "$0: Generating eyaml key in ${EYAMLDIR} - this might take a while..."
|
||||||
|
mkdir -p /etc/hiera/eyaml
|
||||||
|
openssl req -x509 -newkey rsa:4096 -keyout ${EYAMLDIR}/private_key.pkcs7.pem \
|
||||||
|
-out ${EYAMLDIR}/public_certkey.pkcs7.pem -days 3653 -nodes -sha256 \
|
||||||
|
-subj "/C=SE/O=SUNET/OU=EYAML/CN=$(hostname)"
|
||||||
|
rm -f ${EYAMLDIR}/public_key.pkcs7.pem # cleanup
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "$(dirname "${stamp}")"
|
||||||
|
touch "$stamp"
|
64
global/pre-tasks.d/040hiera-gpg
Executable file
64
global/pre-tasks.d/040hiera-gpg
Executable file
|
@ -0,0 +1,64 @@
|
||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Set up a keyring for Hiera GPG
|
||||||
|
# https://github.com/crayfishx/hiera-gpg
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
GNUPGHOME=/etc/hiera/gpg
|
||||||
|
export GNUPGHOME
|
||||||
|
|
||||||
|
vendor=$(lsb_release -is)
|
||||||
|
version=$(lsb_release -rs)
|
||||||
|
# If the OS is Ubuntu 18.04 or newer, or Debian 10 or newer, we don't need to do anything (those use eyaml instead)
|
||||||
|
test "${vendor}" = "Ubuntu" && dpkg --compare-versions "${version}" "ge" "18.04" && exit 0
|
||||||
|
test "${vendor}" = "Debian" && dpkg --compare-versions "${version}" "ge" "10" && exit 0
|
||||||
|
|
||||||
|
stamp="$COSMOS_BASE/stamps/hiera-gpg-v01.stamp"
|
||||||
|
|
||||||
|
test -f "$stamp" && exit 0
|
||||||
|
|
||||||
|
if [ ! -f /usr/lib/ruby/vendor_ruby/gpgme.rb ]; then
|
||||||
|
apt-get update
|
||||||
|
apt-get -y install ruby-gpgme
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -s $GNUPGHOME/secring.gpg ]; then
|
||||||
|
|
||||||
|
if [ "x$1" != "x--force" ]; then
|
||||||
|
echo ""
|
||||||
|
echo "Automatic Hiera-GPG key generation DISABLED (to not block on missing entropy)"
|
||||||
|
echo ""
|
||||||
|
echo " Run \`$0 --force' manually"
|
||||||
|
echo ""
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -f /usr/bin/gpg2 ]; then
|
||||||
|
apt-get update
|
||||||
|
apt-get -y install gnupg2
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p $GNUPGHOME
|
||||||
|
chmod 700 $GNUPGHOME
|
||||||
|
|
||||||
|
TMPFILE=$(mktemp /tmp/hiera-gpg.XXXXXX)
|
||||||
|
cat > "$TMPFILE" <<EOF
|
||||||
|
%echo Generating a default key
|
||||||
|
Key-Type: default
|
||||||
|
Subkey-Type: default
|
||||||
|
Name-Real: Cosmos Puppet
|
||||||
|
Name-Comment: Hiera GPG key
|
||||||
|
Name-Email: root@$(hostname --fqdn)
|
||||||
|
Expire-Date: 0
|
||||||
|
# Do a commit here, so that we can later print "done" :-)
|
||||||
|
%commit
|
||||||
|
%echo done
|
||||||
|
EOF
|
||||||
|
gpg2 --batch --gen-key "$TMPFILE"
|
||||||
|
rm -f "$TMPFILE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "$(dirname "${stamp}")"
|
||||||
|
touch "$stamp"
|
43
host-puppet-conf-test
Executable file
43
host-puppet-conf-test
Executable file
|
@ -0,0 +1,43 @@
|
||||||
|
#!/bin/bash
|
||||||
|
set +x
|
||||||
|
HOSTNAME=$1
|
||||||
|
PUPPET_ARGS=$2
|
||||||
|
|
||||||
|
if [ -z "$HOSTNAME" ]; then
|
||||||
|
echo "Usage: $0 fqdn"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -d "$HOSTNAME" ]; then
|
||||||
|
echo "$0: No host-directory for '$HOSTNAME' found - execute in top-level cosmos dir"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
PUPPET_ARGS=${PUPPET_ARGS-"--verbose"}
|
||||||
|
|
||||||
|
# Check if cosmos or puppet is already running on host
|
||||||
|
echo "Checking if puppet or cosmos is already running..."
|
||||||
|
ssh root@"$HOSTNAME" ps aux | grep -Ev "grep|edit-secrets|gpg-agent" | grep -Eq "cosmos|puppet"
|
||||||
|
|
||||||
|
if [ $? -eq 1 ]
|
||||||
|
then
|
||||||
|
echo "Copying files to host..."
|
||||||
|
rsync -av --exclude '*~' global/overlay/etc/puppet/cosmos-rules.yaml root@"$HOSTNAME":/etc/puppet/cosmos-rules.yaml
|
||||||
|
rsync -av --exclude '*~' global/overlay/etc/puppet/manifests/cosmos-site.pp root@"$HOSTNAME":/etc/puppet/manifests/cosmos-site.pp
|
||||||
|
rsync -av --exclude '*~' global/overlay/etc/hiera/data/common.yaml root@"$HOSTNAME":/etc/hiera/data/common.yaml
|
||||||
|
|
||||||
|
# Test if the user has symlinked puppet-sunet correctly
|
||||||
|
# by first checking if the link exits and then whether
|
||||||
|
# or not the directory contains any files.
|
||||||
|
if [ -L global/overlay/etc/puppet/cosmos-modules/sunet ] && \
|
||||||
|
[ -n "$(ls -A global/overlay/etc/puppet/cosmos-modules/sunet/*)" ]
|
||||||
|
then
|
||||||
|
rsync -av --delete --exclude '*~' global/overlay/etc/puppet/cosmos-modules/sunet/* root@$HOSTNAME:/etc/puppet/cosmos-modules/sunet/.
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Running puppet apply..."
|
||||||
|
ssh root@"$HOSTNAME" /usr/bin/puppet apply $PUPPET_ARGS /etc/puppet/manifests/cosmos-site.pp
|
||||||
|
else
|
||||||
|
echo "Cosmos or puppet already running. Exiting."
|
||||||
|
exit 1
|
||||||
|
fi
|
17
iaas-enable-root.sh
Executable file
17
iaas-enable-root.sh
Executable file
|
@ -0,0 +1,17 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# This script is called from prepare-iaas-$os after logging in via ssh as
|
||||||
|
# the default user existing in cloud images
|
||||||
|
#
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
os=$(lsb_release -si | tr '[:upper:]' '[:lower:]')
|
||||||
|
if [ "$os" != "ubuntu" ] && [ "$os" != "debian" ]; then
|
||||||
|
echo "unsupported os: '$os'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
sudo cp -r /home/"$os"/.ssh /root/
|
||||||
|
sudo chown -R root:root /root/.ssh
|
||||||
|
sudo chmod 700 /root/.ssh
|
||||||
|
sudo chmod 600 /root/.ssh/authorized_keys
|
103
iaas-setup.sh
Executable file
103
iaas-setup.sh
Executable file
|
@ -0,0 +1,103 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# This script is called from prepare-iaas-$os after logging in over ssh as
|
||||||
|
# the root user
|
||||||
|
#
|
||||||
|
set -x
|
||||||
|
|
||||||
|
os=$(lsb_release -si | tr '[:upper:]' '[:lower:]')
|
||||||
|
if [ "$os" != "ubuntu" ] && [ "$os" != "debian" ]; then
|
||||||
|
echo "unsupported os: '$os'"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Get rid of ugly perl messages when running from macOS:
|
||||||
|
# ===
|
||||||
|
# apt-listchanges: Reading changelogs...
|
||||||
|
# perl: warning: Setting locale failed.
|
||||||
|
# perl: warning: Please check that your locale settings:
|
||||||
|
# LANGUAGE = (unset),
|
||||||
|
# LC_ALL = (unset),
|
||||||
|
# LC_CTYPE = "UTF-8",
|
||||||
|
# LC_TERMINAL = "iTerm2",
|
||||||
|
# LANG = "C.UTF-8"
|
||||||
|
# are supported and installed on your system.
|
||||||
|
# perl: warning: Falling back to a fallback locale ("C.UTF-8").
|
||||||
|
# ===
|
||||||
|
export LC_CTYPE=C.UTF-8
|
||||||
|
|
||||||
|
# Remove default user if present
|
||||||
|
if id "$os"; then
|
||||||
|
# Make sure there is no systemd process running as the initial cloud image user
|
||||||
|
# after the "enable root" step in prepare-iaas-$os. If there are any
|
||||||
|
# proceses still running as the specified user the "userdel" command
|
||||||
|
# below will fail.
|
||||||
|
#
|
||||||
|
# Depending on how long we have waited between running the "enable root"
|
||||||
|
# script and this one it is possible the process has timed out on its own,
|
||||||
|
# so run this command before doing "set -e" in case there is no process
|
||||||
|
# to match.
|
||||||
|
pkill -u "$os" -xf "/lib/systemd/systemd --user"
|
||||||
|
|
||||||
|
# Make sure the process has gone away before continuing
|
||||||
|
sleep_seconds=1
|
||||||
|
attempt=1
|
||||||
|
max_attempts=10
|
||||||
|
while pgrep -u "$os" -xf "/lib/systemd/systemd --user"; do
|
||||||
|
if [ $attempt -gt $max_attempts ]; then
|
||||||
|
echo "failed waiting for systemd process to exit, please investigate"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "systemd process still running as '$os' user, this is attempt $attempt out of $max_attempts, sleeping for $sleep_seconds seconds..."
|
||||||
|
sleep $sleep_seconds
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
# While the man page for "userdel" recommends using "deluser" we can not
|
||||||
|
# run "deluser" with "--remove-home" without installing more than the
|
||||||
|
# already included `perl-base` package on debian, so stick with the low
|
||||||
|
# level utility.
|
||||||
|
if ! userdel --remove "$os"; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# From this point we expect all commands to succeed
|
||||||
|
set -e
|
||||||
|
|
||||||
|
rm /etc/sudoers.d/*
|
||||||
|
|
||||||
|
# Make sure en_US.UTF-8 is present in the system, expected by at least
|
||||||
|
# bootstrap-cosmos.sh
|
||||||
|
locale_gen_file=/etc/locale.gen
|
||||||
|
if grep -q '^# en_US.UTF-8 UTF-8$' $locale_gen_file; then
|
||||||
|
sed -i 's/^# \(en_US.UTF-8 UTF-8\)$/\1/' $locale_gen_file
|
||||||
|
locale-gen
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$(lsb_release -is)" == "Debian" ] && [ "$(lsb_release -cs)" == "bullseye" ]; then
|
||||||
|
interfaces_file='/etc/network/interfaces.d/50-cloud-init'
|
||||||
|
|
||||||
|
if [ -f "${interfaces_file}" ]; then
|
||||||
|
interface_string='iface ens3 inet6 dhcp'
|
||||||
|
accept_ra_string=' accept_ra 2'
|
||||||
|
|
||||||
|
if ! grep -qPz "${interface_string}\n${accept_ra_string}" ${interfaces_file} ; then
|
||||||
|
|
||||||
|
# By default net.ipv6.conf.ens3.accept_ra is set to 1 which
|
||||||
|
# makes the kernel throw a way the IPv6 route when
|
||||||
|
# net.ipv6.conf.all.forwarding is set to 1 by our service for
|
||||||
|
# Docker.
|
||||||
|
echo "Configuring interfaces to always accept Router Advertisements even with IP Forwarding enabled"
|
||||||
|
sed -i -r "s/(${interface_string})/\1\n${accept_ra_string}/" ${interfaces_file}
|
||||||
|
else
|
||||||
|
echo "WARN: Configuration already applied or no match for \"${interface_string}\" in ${interfaces_file}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "WARN: ${interfaces_file} not found. File renamed in this image?"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
DEBIAN_FRONTEND="noninteractive" apt-get -y update
|
||||||
|
DEBIAN_FRONTEND="noninteractive" apt-get -o Dpkg::Options::="--force-confnew" --fix-broken --assume-yes dist-upgrade
|
||||||
|
reboot
|
29
prepare-iaas-debian
Executable file
29
prepare-iaas-debian
Executable file
|
@ -0,0 +1,29 @@
|
||||||
|
#!/bin/bash
|
||||||
|
ip="${1}"
|
||||||
|
ssh_proxy="${2}"
|
||||||
|
|
||||||
|
if [[ -z "${ip}" ]]; then
|
||||||
|
echo "Please specify a cloud image host that the script should do the following on:"
|
||||||
|
echo " #1 enable root-login"
|
||||||
|
echo " #2 remove the default user"
|
||||||
|
echo " #3 run apt-get update and dist-upgrade without interaction"
|
||||||
|
echo " #4 reboot to start using the new kernel, updated packages etc."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [[ -n "${ssh_proxy}" ]]; then
|
||||||
|
proxyjump="-o ProxyJump=${ssh_proxy}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Make sure we read the additional scripts from the same directory as
|
||||||
|
# this script is located at
|
||||||
|
script_dir=$(dirname "$0")
|
||||||
|
|
||||||
|
# The reason for running two separate logins is that it is tricky to
|
||||||
|
# remove the initial user while logged in as that same user:
|
||||||
|
# ===
|
||||||
|
# userdel: user debian is currently used by process 1082
|
||||||
|
# ===
|
||||||
|
ssh "debian@${ip}" ${proxyjump} "bash -s" < "$script_dir"/iaas-enable-root.sh
|
||||||
|
ssh "root@${ip}" ${proxyjump} "bash -s" < "$script_dir"/iaas-setup.sh
|
29
prepare-iaas-ubuntu
Executable file
29
prepare-iaas-ubuntu
Executable file
|
@ -0,0 +1,29 @@
|
||||||
|
#!/bin/bash
|
||||||
|
ip="${1}"
|
||||||
|
ssh_proxy="${2}"
|
||||||
|
|
||||||
|
if [[ -z "${ip}" ]]; then
|
||||||
|
echo "Please specify a cloud image host that the script should do the following on:"
|
||||||
|
echo " #1 enable root-login"
|
||||||
|
echo " #2 remove the default user"
|
||||||
|
echo " #3 run apt-get update and dist-upgrade without interaction"
|
||||||
|
echo " #4 reboot to start using the new kernel, updated packages etc."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n "${ssh_proxy}" ]]; then
|
||||||
|
proxyjump="-o ProxyJump=${ssh_proxy}"
|
||||||
|
fi
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# Make sure we read the additional scripts from the same directory as
|
||||||
|
# this script is located at
|
||||||
|
script_dir=$(dirname "$0")
|
||||||
|
|
||||||
|
# The reason for running two separate logins is that it is tricky to
|
||||||
|
# remove the initial user while logged in as that same user:
|
||||||
|
# ===
|
||||||
|
# userdel: user ubuntu is currently used by process 44063
|
||||||
|
# ===
|
||||||
|
ssh "ubuntu@${ip}" ${proxyjump} "bash -s" < "$script_dir"/iaas-enable-root.sh
|
||||||
|
ssh "root@${ip}" ${proxyjump} "bash -s" < "$script_dir"/iaas-setup.sh
|
42
scripts/test-in-docker.sh
Executable file
42
scripts/test-in-docker.sh
Executable file
|
@ -0,0 +1,42 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# This script runs in a Docker container (started with the 'make test_in_docker' command)
|
||||||
|
# and installs multiverse as it is in your source directory.
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
apt -y update
|
||||||
|
apt -y install git rsync gpg
|
||||||
|
|
||||||
|
cosmos_deb=$(find /multiverse/apt/ -maxdepth 1 -name 'cosmos_*.deb' | sort -V | tail -1)
|
||||||
|
dpkg -i "$cosmos_deb"
|
||||||
|
|
||||||
|
test -d /var/cache/cosmos/repo || mkdir -p /var/cache/cosmos/repo
|
||||||
|
test -d /var/cache/cosmos/model || mkdir -p /var/cache/cosmos/model
|
||||||
|
|
||||||
|
# Make every "cosmos update" copy the contents from /multiverse
|
||||||
|
# without requiring the changes in there to be checked into git.
|
||||||
|
cat >/etc/cosmos/update.d/50update-while-testing << EOF
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
rsync -a --delete --exclude .git /multiverse/ /var/cache/cosmos/repo
|
||||||
|
EOF
|
||||||
|
chmod 755 /etc/cosmos/update.d/50update-while-testing
|
||||||
|
|
||||||
|
sed -i -e 's!^#COSMOS_REPO_MODELS=.*!COSMOS_REPO_MODELS="\$COSMOS_REPO/global/"!' /etc/cosmos/cosmos.conf
|
||||||
|
|
||||||
|
export DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "***"
|
||||||
|
echo ""
|
||||||
|
echo "$0: Configured docker container for testing of files in /multiverse."
|
||||||
|
echo ""
|
||||||
|
echo "You should now be able to do"
|
||||||
|
echo ""
|
||||||
|
echo " cosmos -v update"
|
||||||
|
echo " cosmos -v apply"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
exec bash -l
|
Loading…
Reference in a new issue