Compare commits

..

1 commit

Author SHA1 Message Date
2b0d99eb23
keycloak(host): update to 26.1 & postgres to 15.12
All checks were successful
/ Ansible Lint (push) Successful in 1m58s
/ Ansible Lint (pull_request) Successful in 1m48s
2025-02-22 22:56:40 +01:00
52 changed files with 491 additions and 1381 deletions

View file

@ -45,8 +45,3 @@ Im Ansible-Repo müssen diese Sachen hinzugefügt werden:
* Individuelle Config für den Service. Wenn Docker Compose, hier weiterleiten auf den eigentlichen Dienst in Compose. * Individuelle Config für den Service. Wenn Docker Compose, hier weiterleiten auf den eigentlichen Dienst in Compose.
* Cert-Dateinamen anpassen * Cert-Dateinamen anpassen
* `resources/chaosknoten/`*host*`/docker_compose/compose.yaml.j2`: Config für Docker Compose (wenn verwendet) * `resources/chaosknoten/`*host*`/docker_compose/compose.yaml.j2`: Config für Docker Compose (wenn verwendet)
## License
This CCCHH ansible-ccchh repository is licensed under the [MIT License](./LICENSE).
[`custom_pipeline_oidc_group_and_role_mapping.py`](./roles/netbox/files/custom_pipeline_oidc_group_and_role_mapping.py) is licensed under the Creative Commons: CC BY-SA 4.0 license.

View file

@ -1,16 +0,0 @@
netbox__version: "v4.1.7"
netbox__db_password: "{{ lookup('community.general.passwordstore', 'noc/vm-secrets/chaosknoten/eh22-netbox/DATABASE_PASSWORD', create=false, missing='error') }}"
netbox__config: "{{ lookup('ansible.builtin.template', 'resources/chaosknoten/eh22-netbox/netbox/configuration.py.j2') }}"
netbox__custom_pipeline_oidc_group_and_role_mapping: true
nginx__version_spec: ""
nginx__configurations:
- name: netbox.eh22.easterhegg.eu
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/eh22-netbox/nginx/netbox.eh22.easterhegg.eu.conf') }}"
certbot__version_spec: ""
certbot__acme_account_email_address: j+letsencrypt-ccchh@jsts.xyz
certbot__certificate_domains:
- "netbox.eh22.easterhegg.eu"
certbot__new_cert_commands:
- "systemctl reload nginx.service"

View file

@ -1,14 +0,0 @@
nginx__version_spec: ""
nginx__configurations:
- name: eh22.easterhegg.eu
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/eh22-wiki/nginx/eh22.easterhegg.eu.conf') }}"
certbot__version_spec: ""
certbot__acme_account_email_address: j+letsencrypt-ccchh@jsts.xyz
certbot__certificate_domains:
- "eh22.easterhegg.eu"
certbot__new_cert_commands:
- "systemctl reload nginx.service"
dokuwiki__custom_theme_git_url: "https://git.hamburg.ccc.de/EH22/dokuwiki-template-sprintdoc-modified.git"
dokuwiki__custom_theme_version: "eh22"
dokuwiki__custom_theme_name: "sprintdoc"

View file

@ -1,16 +0,0 @@
netbox__version: "v4.1.7"
netbox__db_password: "{{ lookup('community.general.passwordstore', 'noc/vm-secrets/chaosknoten/netbox/DATABASE_PASSWORD', create=false, missing='error') }}"
netbox__config: "{{ lookup('ansible.builtin.template', 'resources/chaosknoten/netbox/netbox/configuration.py.j2') }}"
netbox__custom_pipeline_oidc_group_and_role_mapping: true
nginx__version_spec: ""
nginx__configurations:
- name: netbox.hamburg.ccc.de
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/netbox/nginx/netbox.hamburg.ccc.de.conf') }}"
certbot__version_spec: ""
certbot__acme_account_email_address: j+letsencrypt-ccchh@jsts.xyz
certbot__certificate_domains:
- "netbox.hamburg.ccc.de"
certbot__new_cert_commands:
- "systemctl reload nginx.service"

View file

@ -1,173 +1,135 @@
all: all:
hosts: children:
ccchoir: debian_12:
ansible_host: ccchoir-intern.hamburg.ccc.de hosts:
ansible_user: chaos ccchoir:
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de ansible_host: ccchoir-intern.hamburg.ccc.de
chaosknoten: ansible_user: chaos
ansible_host: chaosknoten.hamburg.ccc.de ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
cloud: cloud:
ansible_host: cloud-intern.hamburg.ccc.de ansible_host: cloud-intern.hamburg.ccc.de
ansible_user: chaos ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
eh22-netbox: grafana:
ansible_host: eh22-netbox-intern.hamburg.ccc.de ansible_host: grafana-intern.hamburg.ccc.de
ansible_user: chaos ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
eh22-wiki: tickets:
ansible_host: eh22-wiki-intern.hamburg.ccc.de ansible_host: tickets-intern.hamburg.ccc.de
ansible_user: chaos ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
grafana: keycloak:
ansible_host: grafana-intern.hamburg.ccc.de ansible_host: keycloak-intern.hamburg.ccc.de
ansible_user: chaos ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
tickets: lists:
ansible_host: tickets-intern.hamburg.ccc.de ansible_host: lists.hamburg.ccc.de
ansible_user: chaos ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de mumble:
keycloak: ansible_host: mumble.hamburg.ccc.de
ansible_host: keycloak-intern.hamburg.ccc.de ansible_user: chaos
ansible_user: chaos onlyoffice:
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de ansible_host: onlyoffice-intern.hamburg.ccc.de
lists: ansible_user: chaos
ansible_host: lists.hamburg.ccc.de ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
ansible_user: chaos pad:
mumble: ansible_host: pad-intern.hamburg.ccc.de
ansible_host: mumble.hamburg.ccc.de ansible_user: chaos
ansible_user: chaos ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
netbox: pretalx:
ansible_host: netbox-intern.hamburg.ccc.de ansible_host: pretalx-intern.hamburg.ccc.de
ansible_user: chaos ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
onlyoffice: public-reverse-proxy:
ansible_host: onlyoffice-intern.hamburg.ccc.de ansible_host: public-reverse-proxy.hamburg.ccc.de
ansible_user: chaos ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de wiki:
pad: ansible_host: wiki-intern.hamburg.ccc.de
ansible_host: pad-intern.hamburg.ccc.de ansible_user: chaos
ansible_user: chaos ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de zammad:
pretalx: ansible_host: zammad-intern.hamburg.ccc.de
ansible_host: pretalx-intern.hamburg.ccc.de ansible_user: chaos
ansible_user: chaos ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de base_config_hosts:
public-reverse-proxy: hosts:
ansible_host: public-reverse-proxy.hamburg.ccc.de ccchoir:
ansible_user: chaos cloud:
wiki: grafana:
ansible_host: wiki-intern.hamburg.ccc.de keycloak:
ansible_user: chaos lists:
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de mumble:
zammad: onlyoffice:
ansible_host: zammad-intern.hamburg.ccc.de pad:
ansible_user: chaos pretalx:
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de public-reverse-proxy:
hypervisors: tickets:
hosts: wiki:
chaosknoten: zammad:
base_config_hosts: docker_compose_hosts:
hosts: hosts:
ccchoir: ccchoir:
cloud: grafana:
eh22-netbox: tickets:
eh22-wiki: keycloak:
grafana: lists:
keycloak: onlyoffice:
lists: pad:
mumble: pretalx:
netbox: zammad:
onlyoffice: nextcloud_hosts:
pad: hosts:
pretalx: cloud:
public-reverse-proxy: nginx_hosts:
tickets: hosts:
wiki: ccchoir:
zammad: grafana:
docker_compose_hosts: tickets:
hosts: keycloak:
ccchoir: lists:
grafana: mumble:
tickets: onlyoffice:
keycloak: pad:
lists: pretalx:
onlyoffice: public-reverse-proxy:
pad: wiki:
pretalx: zammad:
zammad: public_reverse_proxy_hosts:
nextcloud_hosts: hosts:
hosts: public-reverse-proxy:
cloud: certbot_hosts:
nginx_hosts: hosts:
hosts: ccchoir:
ccchoir: grafana:
eh22-netbox: tickets:
eh22-wiki: keycloak:
grafana: lists:
tickets: mumble:
keycloak: onlyoffice:
lists: pad:
mumble: pretalx:
netbox: wiki:
onlyoffice: zammad:
pad: prometheus_node_exporter_hosts:
pretalx: hosts:
public-reverse-proxy: ccchoir:
wiki: tickets:
zammad: keycloak:
public_reverse_proxy_hosts: onlyoffice:
hosts: pad:
public-reverse-proxy: pretalx:
certbot_hosts: wiki:
hosts: zammad:
ccchoir: infrastructure_authorized_keys_hosts:
eh22-netbox: hosts:
eh22-wiki: ccchoir:
grafana: grafana:
tickets: tickets:
keycloak: cloud:
lists: keycloak:
mumble: onlyoffice:
netbox: pad:
onlyoffice: pretalx:
pad: public-reverse-proxy:
pretalx: wiki:
wiki: zammad:
zammad:
prometheus_node_exporter_hosts:
hosts:
ccchoir:
eh22-netbox:
eh22-wiki:
tickets:
keycloak:
netbox:
onlyoffice:
pad:
pretalx:
wiki:
zammad:
infrastructure_authorized_keys_hosts:
hosts:
ccchoir:
eh22-netbox:
eh22-wiki:
grafana:
tickets:
cloud:
keycloak:
netbox:
onlyoffice:
pad:
pretalx:
public-reverse-proxy:
wiki:
zammad:
wiki_hosts:
hosts:
eh22-wiki:
wiki:
netbox_hosts:
hosts:
eh22-netbox:
netbox:

View file

@ -1,21 +1,25 @@
all: all:
hosts: children:
light: debian_11:
ansible_host: light.z9.ccchh.net hosts:
ansible_user: chaos light:
authoritative-dns: ansible_host: light.z9.ccchh.net
ansible_host: authoritative-dns.z9.ccchh.net ansible_user: chaos
ansible_user: chaos authoritative-dns:
nginx_hosts: ansible_host: authoritative-dns.z9.ccchh.net
hosts: ansible_user: chaos
light: debian_12:
ola_hosts: hosts:
hosts: nginx_hosts:
light: hosts:
foobazdmx_hosts: light:
hosts: ola_hosts:
light: hosts:
infrastructure_authorized_keys_hosts: light:
hosts: foobazdmx_hosts:
light: hosts:
authoritative-dns: light:
infrastructure_authorized_keys_hosts:
hosts:
light:
authoritative-dns:

View file

@ -29,14 +29,3 @@
- name: Print .dpkg-* files list - name: Print .dpkg-* files list
ansible.builtin.debug: ansible.builtin.debug:
var: check__dpkg_files_list var: check__dpkg_files_list
- name: Get all held packages
ansible.builtin.command: apt-mark showhold
when: ansible_facts['pkg_mgr'] == "apt"
changed_when: false
register: check__apt_mark_showhold
- name: Print all held packages
ansible.builtin.debug:
var: check__apt_mark_showhold.stdout_lines
when: check__apt_mark_showhold.stdout_lines != []

View file

@ -25,15 +25,10 @@
- foobazdmx - foobazdmx
- name: Ensure Dokuwiki config - name: Ensure Dokuwiki config
hosts: wiki_hosts hosts: wiki
roles: roles:
- dokuwiki - dokuwiki
- name: Ensure NetBox deployment on netbox_hosts
hosts: netbox_hosts
roles:
- netbox
- name: Ensure NGINX deployment on nginx_hosts, which are also public_reverse_proxy_hosts, before certbot role runs - name: Ensure NGINX deployment on nginx_hosts, which are also public_reverse_proxy_hosts, before certbot role runs
hosts: nginx_hosts:&public_reverse_proxy_hosts hosts: nginx_hosts:&public_reverse_proxy_hosts
roles: roles:
@ -59,8 +54,8 @@
roles: roles:
- prometheus_node_exporter - prometheus_node_exporter
- name: Configure unattended upgrades for all non-hypervisors - name: Configure unattended upgrades
hosts: all:!hypervisors hosts: all
become: true become: true
roles: roles:
- role: debops.debops.unattended_upgrades - role: debops.debops.unattended_upgrades
@ -69,6 +64,3 @@
- "o=${distro_id},n=${distro_codename}" - "o=${distro_id},n=${distro_codename}"
- "o=Docker,n=${distro_codename}" - "o=Docker,n=${distro_codename}"
- "o=nginx,n=${distro_codename}" - "o=nginx,n=${distro_codename}"
- name: Run ensure_eh22_styleguide_dir Playbook
ansible.builtin.import_playbook: ensure_eh22_styleguide_dir.yaml

View file

@ -1,40 +0,0 @@
---
# TODO: This should really be handled through a role at some point.
# This role is also needed for migrating public-web-static to Ansible.
- name: Ensure base for working EH22 Styleguide CI deployment
hosts: eh22-wiki
tasks:
- name: Ensure deployment user group
ansible.builtin.group:
name: eh22-styleguide-deploy
system: false
become: true
- name: Ensure deployment user
ansible.builtin.user:
name: eh22-styleguide-deploy
group: eh22-styleguide-deploy
password: '!'
system: false
become: true
- name: Ensure SSH key is set for deployment user
ansible.posix.authorized_key:
user: eh22-styleguide-deploy
exclusive: true
key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOEgdXYZUq6SUDeKpX0Q8d1WYJ5WduHPMEaRuj0yfJTl deploy key for eh22 styleguide"
become: true
- name: Ensure deployment directory
ansible.builtin.file:
path: /var/www/eh22-styleguide
state: directory
mode: "0755"
owner: eh22-styleguide-deploy
group: eh22-styleguide-deploy
become: true
- name: Ensure rsync is present for deployment
ansible.builtin.apt:
name: rsync
become: true

View file

@ -1,31 +1,33 @@
- name: Ensure NGINX repo setup and nginx install on relevant hosts - name: Ensure NGINX repo and install on nginx_hosts
hosts: nginx_hosts:nextcloud_hosts hosts: nginx_hosts
tasks: tasks:
- name: Ensure NGINX repo is setup - name: make sure NGINX repos are setup
ansible.builtin.include_role: ansible.builtin.include_role:
name: nginx name: nginx
tasks_from: main/02_repo_setup.yaml tasks_from: main/repo_setup
- name: Ensure nginx is installed - name: make sure NGINX is installed
ansible.builtin.include_role: ansible.builtin.include_role:
name: nginx name: nginx
tasks_from: main/03_nginx_install.yaml tasks_from: main/nginx_install
- name: Ensure Docker repo setup and package install on relevant hosts - name: Ensure NGINX repo and install on nextcloud_hosts
hosts: docker_compose_hosts:nextcloud_hosts hosts: nextcloud_hosts:!nginx_hosts
tasks: tasks:
- name: Ensure Docker repo is setup - name: make sure NGINX repos are setup
ansible.builtin.include_role: ansible.builtin.include_role:
name: docker name: nginx
tasks_from: main/01_repo_setup.yaml tasks_from: main/repo_setup
- name: Ensure Docker Engine and other related packages are installed - name: make sure NGINX is installed
ansible.builtin.include_role: ansible.builtin.include_role:
name: docker name: nginx
tasks_from: main/02_docker_install.yaml tasks_from: main/nginx_install
vars:
nginx__version_spec: "{{ nextcloud__nginx_version_spec | default('') }}"
- name: Make Sure System Package Are Up-To-Date for all non-hypervisors - name: Make Sure System Package Are Up-To-Date
hosts: all:!hypervisors hosts: all
roles: roles:
- apt_update_and_upgrade - apt_update_and_upgrade

View file

@ -1,60 +0,0 @@
ALLOWED_HOSTS = [ "netbox.eh22.easterhegg.eu" ]
DATABASE = {
"HOST": "localhost",
"NAME": "netbox",
"USER": "netbox",
"PASSWORD": "{{ lookup('community.general.passwordstore', 'noc/vm-secrets/chaosknoten/eh22-netbox/DATABASE_PASSWORD', create=false, missing='error') }}",
}
REDIS = {
"tasks": {
"HOST": "localhost",
"PORT": 6379,
"USERNAME": "",
"PASSWORD": "",
"DATABASE": 0,
"SSL": False,
},
"caching": {
"HOST": "localhost",
"PORT": 6379,
"USERNAME": "",
"PASSWORD": "",
"DATABASE": 1,
"SSL": False,
},
}
SECRET_KEY = "{{ lookup('community.general.passwordstore', 'noc/vm-secrets/chaosknoten/eh22-netbox/SECRET_KEY', create=false, missing='error') }}"
SESSION_COOKIE_SECURE = True
# CCCHH ID (Keycloak) integration.
# https://github.com/python-social-auth/social-core/blob/0925304a9e437f8b729862687d3a808c7fb88a95/social_core/backends/keycloak.py#L7
# https://python-social-auth.readthedocs.io/en/latest/backends/keycloak.html
REMOTE_AUTH_BACKEND = "social_core.backends.keycloak.KeycloakOAuth2"
SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = (
"https://id.hamburg.ccc.de/realms/ccchh/protocol/openid-connect/token"
)
SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = (
"https://id.hamburg.ccc.de/realms/ccchh/protocol/openid-connect/auth"
)
SOCIAL_AUTH_KEYCLOAK_KEY = "eh22-netbox"
SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAi/Shi+b2OyYNGVFPsa6qf9SesEpRl5U5rpwgmt8H7NawMvwpPUYVW9o46QW0ulYcDmysT3BzpP3tagO/SFNoOjZdYe0D9nJ7vEp8KHbzR09KCfkyQIi0wLssKnDotVHL5JeUY+iKk+gjiwF9FSFSHPBqsST7hXVAut9LkOvs2aDod9AzbTH/uYbt4wfUm5l/1Ii8D+K7YcsFGUIqxv4XS/ylKqObqN4M2dac69iIwapoh6reaBQEm66vrOzJ+3yi4DZuPrkShJqi2hddtoyZihyCkF+eJJKEI5LrBf1KZB3Ec2YUrqk93ZGUGs/XY6R87QSfR3hJ82B1wnF+c2pw+QIDAQAB"
SOCIAL_AUTH_KEYCLOAK_SECRET = "{{ lookup('community.general.passwordstore', 'noc/vm-secrets/chaosknoten/eh22-netbox/SOCIAL_AUTH_KEYCLOAK_SECRET', create=false, missing='error') }}"
# Use custom OIDC group and role mapping pipeline functions added in via
# netbox__custom_pipeline_oidc_group_and_role_mapping.
# The default pipeline this is based on can be found here:
# https://github.com/netbox-community/netbox/blob/main/netbox/netbox/settings.py
SOCIAL_AUTH_PIPELINE = [
"social_core.pipeline.social_auth.social_details",
"social_core.pipeline.social_auth.social_uid",
"social_core.pipeline.social_auth.social_user",
"social_core.pipeline.user.get_username",
"social_core.pipeline.user.create_user",
"social_core.pipeline.social_auth.associate_user",
"netbox.authentication.user_default_groups_handler",
"social_core.pipeline.social_auth.load_extra_data",
"social_core.pipeline.user.user_details",
# Custom OIDC group and role mapping functions.
"netbox.custom_pipeline_oidc_mapping.add_groups",
"netbox.custom_pipeline_oidc_mapping.remove_groups",
"netbox.custom_pipeline_oidc_mapping.set_roles",
]

View file

@ -1,48 +0,0 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name netbox.eh22.easterhegg.eu;
ssl_certificate /etc/letsencrypt/live/netbox.eh22.easterhegg.eu/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/netbox.eh22.easterhegg.eu/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/netbox.eh22.easterhegg.eu/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 443;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
# Hide the X-Forwarded header.
proxy_hide_header X-Forwarded;
# Assume we are the only Reverse Proxy (well using Proxy Protocol, but that
# is transparent).
# Also provide "_hidden" for by, since it's not relevant.
proxy_set_header Forwarded "for=$remote_addr;proto=https;host=$host;by=_hidden";
client_max_body_size 25m;
location /static/ {
alias /opt/netbox/netbox/static/;
}
location / {
proxy_pass http://127.0.0.1:8001;
}
}

View file

@ -1,79 +0,0 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name eh22.easterhegg.eu;
ssl_certificate /etc/letsencrypt/live/eh22.easterhegg.eu/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/eh22.easterhegg.eu/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/eh22.easterhegg.eu/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
# Maximum file upload size is 20MB - change accordingly if needed
# See: https://www.dokuwiki.org/faq:uploadsize
client_max_body_size 20M;
client_body_buffer_size 128k;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
root /var/www/dokuwiki;
index doku.php;
#Remember to comment the below out when you're installing, and uncomment it when done.
location ~ /(conf/|bin/|inc/|vendor/|install.php) { deny all; }
#Support for X-Accel-Redirect
location ~ ^/data/ { internal ; }
location ~ ^/lib.*\.(js|css|gif|png|ico|jpg|jpeg)$ {
expires 365d;
}
location / { try_files $uri $uri/ @dokuwiki; }
location @dokuwiki {
# rewrites "doku.php/" out of the URLs if you set the userwrite setting to .htaccess in dokuwiki config page
rewrite ^/_media/(.*) /lib/exe/fetch.php?media=$1 last;
rewrite ^/_detail/(.*) /lib/exe/detail.php?media=$1 last;
rewrite ^/_export/([^/]+)/(.*) /doku.php?do=export_$1&id=$2 last;
rewrite ^/(.*) /doku.php?id=$1&$args last;
}
location ~ \.php$ {
try_files $uri $uri/ /doku.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param REDIRECT_STATUS 200;
fastcgi_pass unix:/var/run/php/php-fpm-dokuwiki.sock;
}
location = /design {
# Disable port in redirect as NGINX would redirect to the PROXY Protocol port 8443 for locations like https://eh22.easterhegg.eu/design
port_in_redirect off;
return 302 /design/;
}
location /design/ {
# Disable port in redirect as NGINX would redirect to the PROXY Protocol port 8443 for locations like https://eh22.easterhegg.eu/design
port_in_redirect off;
alias /var/www/eh22-styleguide/;
index index.html;
}
}

View file

@ -83,8 +83,8 @@ scrape_configs:
- public-web-static-intern.hamburg.ccc.de:9100 - public-web-static-intern.hamburg.ccc.de:9100
- git-intern.hamburg.ccc.de:9100 - git-intern.hamburg.ccc.de:9100
- forgejo-actions-runner-intern.hamburg.ccc.de:9100 - forgejo-actions-runner-intern.hamburg.ccc.de:9100
- eh22-netbox-intern.hamburg.ccc.de:9100
- eh22-wiki-intern.hamburg.ccc.de:9100 - eh22-wiki-intern.hamburg.ccc.de:9100
- nix-box-june-intern.hamburg.ccc.de:9100
- mjolnir-intern.hamburg.ccc.de:9100 - mjolnir-intern.hamburg.ccc.de:9100
- woodpecker-intern.hamburg.ccc.de:9100 - woodpecker-intern.hamburg.ccc.de:9100
- penpot-intern.hamburg.ccc.de:9100 - penpot-intern.hamburg.ccc.de:9100

View file

@ -1,7 +1,7 @@
# Links & Resources: # Links & Resources:
# - https://samber.github.io/awesome-prometheus-alerts/rules # - https://samber.github.io/awesome-prometheus-alerts/rules
groups: groups:
- name: node-exporter-memory - name: node-exporter
rules: rules:
- alert: HostOutOfMemory - alert: HostOutOfMemory
expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
@ -28,41 +28,6 @@ groups:
annotations: annotations:
summary: Host Memory is underutilized (instance {{ $labels.instance }}) summary: Host Memory is underutilized (instance {{ $labels.instance }})
description: "Node memory is < 10% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}" description: "Node memory is < 10% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}"
- alert: HostSwapIsFillingUp
expr: ((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host swap is filling up (instance {{ $labels.instance }})
description: "Swap is filling up (>80%)\n VALUE = {{ $value }}"
- alert: HostOomKillDetected
expr: (increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: warning
annotations:
summary: Host OOM kill detected (instance {{ $labels.instance }})
description: "OOM kill detected\n VALUE = {{ $value }}"
- alert: HostEdacCorrectableErrorsDetected
expr: (increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: info
annotations:
summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}"
- alert: HostEdacUncorrectableErrorsDetected
expr: (node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: warning
annotations:
summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}"
- name: node-exporter-network
rules:
- alert: HostUnusualNetworkThroughputIn - alert: HostUnusualNetworkThroughputIn
expr: (sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} expr: (sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m for: 5m
@ -79,107 +44,56 @@ groups:
annotations: annotations:
summary: Host unusual network throughput out (instance {{ $labels.instance }}) summary: Host unusual network throughput out (instance {{ $labels.instance }})
description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}" description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}"
# General network receive error alerts. # Have different disk read and write rate alerts for VMs and physical machines.
# Excluding: OPNsense hosts - alert: VirtualHostUnusualDiskReadRate
- alert: HostNetworkReceiveErrors expr: (sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{ype="virtual_machine", nodename=~".+", nodename!="forgejo-actions-runner", nodename!="woodpecker"}
expr: (rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+", nodename!="OPNsense"}
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Receive Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}"
# OPNsense network receive error alerts.
# This is the same as the regular network receive error alerts, but excluding the WireGuard interfaces as they like to throw errors, but which aren't of importance.
- alert: OPNsenseHostNetworkReceiveErrors
expr: (rate(node_network_receive_errs_total{device!~"wg.+"}[2m]) / rate(node_network_receive_packets_total{device!~"wg.+"}[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename="OPNsense"}
for: 2m
labels:
severity: warning
annotations:
summary: OPNsense host Network Receive Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}"
- alert: HostNetworkTransmitErrors
expr: (rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Transmit Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}"
- alert: HostNetworkBondDegraded
expr: ((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Bond Degraded (instance {{ $labels.instance }})
description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}"
- alert: HostConntrackLimit
expr: (node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m for: 5m
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: Host conntrack limit (instance {{ $labels.instance }}) summary: Virtual host unusual disk read rate (instance {{ $labels.instance }})
description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}"
- name: node-exporter-disk
rules:
# General high disk read and write rate alerts.
# Excluding: hypervisor hosts, CI hosts
- alert: HostUnusualDiskReadRate
expr: (sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+", nodename!="forgejo-actions-runner", nodename!="woodpecker", nodename!="chaosknoten"}
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual disk read rate (instance {{ $labels.instance }})
description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}" description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}"
- alert: HostUnusualDiskWriteRate - alert: VirtualHostUnusualDiskWriteRate
expr: (sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename=~".+", nodename!="forgejo-actions-runner", nodename!="woodpecker", nodename!="chaosknoten"} expr: (sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{type="virtual_machine", nodename=~".+", nodename!="forgejo-actions-runner", nodename!="woodpecker"}
for: 2m for: 2m
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: Host unusual disk write rate (instance {{ $labels.instance }}) summary: Virtual host unusual disk write rate (instance {{ $labels.instance }})
description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}" description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}"
# CI hosts high disk read and write alerts. # Some VMs are expected to have high Read / Write rates z.B. CI servers
# Longer intervals to account for disk intensive CI tasks. - alert: VirtualHostUnusualDiskReadRate
- alert: CIHostUnusualDiskReadRate expr: (sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{ype="virtual_machine", nodename="forgejo-actions-runner", nodename="woodpecker"}
expr: (sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename="forgejo-actions-runner", nodename="woodpecker"}
for: 10m for: 10m
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: CI host unusual disk read rate for 10 min (instance {{ $labels.instance }}) summary: Virtual host unusual disk read rate for 10 min (instance {{ $labels.instance }})
description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}" description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}"
- alert: VirtualHostUnusualDiskWriteRate - alert: VirtualHostUnusualDiskWriteRate
expr: (sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename="forgejo-actions-runner", nodename="woodpecker"} expr: (sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{type="virtual_machine", nodename="forgejo-actions-runner", nodename="woodpecker"}
for: 4m for: 4m
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: CI host unusual disk write rate for 4 min (instance {{ $labels.instance }}) summary: Virtual host unusual disk write rate for 4 min (instance {{ $labels.instance }})
description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}" description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}"
# Hypervisor host high disk read and write alerts. - alert: PhysicalHostUnusualDiskReadRate
# Longer intervals to account for disk intensive hypervisor tasks (backups, moving VMs, etc.). expr: (sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{type="physical_machine", nodename=~".+"}
- alert: HypervisorHostUnusualDiskReadRate for: 20m
expr: (sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename="chaosknoten"}
for: 2h
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: Hypervisor host unusual disk read rate (instance {{ $labels.instance }}) summary: Physical host unusual disk read rate (instance {{ $labels.instance }})
description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}" description: "Disk is probably reading too much data (> 100 MB/s)\n VALUE = {{ $value }}"
- alert: HypervisorHostUnusualDiskWriteRate - alert: PhysicalHostUnusualDiskWriteRate
expr: (sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{nodename="chaosknoten"} expr: (sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{type="physical_machine", nodename=~".+"}
for: 2h for: 15m
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: Hypervisor host unusual disk write rate (instance {{ $labels.instance }}) summary: Physical host unusual disk write rate (instance {{ $labels.instance }})
description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}" description: "Disk is probably writing too much data (> 100 MB/s)\n VALUE = {{ $value }}"
# Please add ignored mountpoints in node_exporter parameters like # Please add ignored mountpoints in node_exporter parameters like
# "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)". # "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)".
# Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users. # Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users.
@ -242,55 +156,6 @@ groups:
annotations: annotations:
summary: Host unusual disk write latency (instance {{ $labels.instance }}) summary: Host unusual disk write latency (instance {{ $labels.instance }})
description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}" description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}"
# General unusual disk io alerts.
# Excluding: hypervisor hosts
- alert: HostUnusualDiskIo
expr: (rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename=~".+", nodename!="chaosknoten"}
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual disk IO (instance {{ $labels.instance }})
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}"
# Hypervisor host unusual hard disk io alerts.
# Since hard disks on the hypervisor can easily have their IO saturated by hypervisor tasks (backups, moving VMs, etc.), alert when the IO is above the regular threshold for a very long time.
- alert: HypervisorHostUnusualHardDiskIo
expr: (rate(node_disk_io_time_seconds_total{device=~"s.+"}[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename="chaosknoten"}
for: 2h
labels:
severity: warning
annotations:
summary: Hypervisor host unusual hard disk IO (instance {{ $labels.instance }})
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}"
# Hypervisor host unusual other (non-hard) disk io alerts.
# This is the same as the regular unsual disk io alerts.
- alert: HypervisorHostUnusualOtherDiskIo
expr: (rate(node_disk_io_time_seconds_total{device!~"s.+"}[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{nodename="chaosknoten"}
for: 5m
labels:
severity: warning
annotations:
summary: Hypervisor host unusual other (non-hard) disk IO (instance {{ $labels.instance }})
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}"
- alert: HostRaidArrayGotInactive
expr: (node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: critical
annotations:
summary: Host RAID array got inactive (instance {{ $labels.instance }})
description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.\n VALUE = {{ $value }}"
- alert: HostRaidDiskFailure
expr: (node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host RAID disk failure (instance {{ $labels.instance }})
description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}"
- name: node-exporter-cpu
rules:
- alert: HostHighCpuLoad - alert: HostHighCpuLoad
expr: (sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} expr: (sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 10m for: 10m
@ -325,6 +190,31 @@ groups:
annotations: annotations:
summary: Host CPU high iowait (instance {{ $labels.instance }}) summary: Host CPU high iowait (instance {{ $labels.instance }})
description: "CPU iowait > 10%. A high iowait means that you are disk or network bound.\n VALUE = {{ $value }}" description: "CPU iowait > 10%. A high iowait means that you are disk or network bound.\n VALUE = {{ $value }}"
# Have different disk IO alerts for VMs and physical machines and for physical machines different ones for hard and other disks.
- alert: PhysicalHostUnusualHardDiskIo
expr: (rate(node_disk_io_time_seconds_total{device=~"s.+"}[1m]) > 0.75) * on(instance) group_left (nodename) node_uname_info{type="physical_machine", nodename=~".+"}
for: 5m
labels:
severity: warning
annotations:
summary: Physical host unusual hard disk IO (instance {{ $labels.instance }})
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}"
- alert: PhysicalHostUnusualOtherDiskIo
expr: (rate(node_disk_io_time_seconds_total{device!~"s.+"}[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{type="physical_machine", nodename=~".+"}
for: 5m
labels:
severity: warning
annotations:
summary: Physical host unusual other (non-hard) disk IO (instance {{ $labels.instance }})
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}"
- alert: VirtualHostUnusualDiskIo
expr: (rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{type="virtual_machine", nodename=~".+"}
for: 5m
labels:
severity: warning
annotations:
summary: Virtual host unusual disk IO (instance {{ $labels.instance }})
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}"
# # x2 context switches is an arbitrary number. # # x2 context switches is an arbitrary number.
# # The alert threshold depends on the nature of the application. # # The alert threshold depends on the nature of the application.
# # Please read: https://github.com/samber/awesome-prometheus-alerts/issues/58 # # Please read: https://github.com/samber/awesome-prometheus-alerts/issues/58
@ -336,28 +226,14 @@ groups:
# annotations: # annotations:
# summary: Host context switching high (instance {{ $labels.instance }}) # summary: Host context switching high (instance {{ $labels.instance }})
# description: "Context switching is growing on the node (twice the daily average during the last 15m)\n VALUE = {{ $value }}" # description: "Context switching is growing on the node (twice the daily average during the last 15m)\n VALUE = {{ $value }}"
- alert: HostSwapIsFillingUp
- name: node-exporter-physical expr: ((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
rules: for: 2m
- alert: HostNodeOvertemperatureAlarm
expr: ((node_hwmon_temp_crit_alarm_celsius == 1) or (node_hwmon_temp_alarm == 1)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: critical
annotations:
summary: Host node overtemperature alarm (instance {{ $labels.instance }})
description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}"
- alert: HostKernelVersionDeviations
expr: (count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 6h
labels: labels:
severity: warning severity: warning
annotations: annotations:
summary: Host kernel version deviations (instance {{ $labels.instance }}) summary: Host swap is filling up (instance {{ $labels.instance }})
description: "Different kernel versions are running\n VALUE = {{ $value }}" description: "Swap is filling up (>80%)\n VALUE = {{ $value }}"
- name: node-exporter-misc
rules:
- alert: HostSystemdServiceCrashed - alert: HostSystemdServiceCrashed
expr: (node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} expr: (node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m for: 0m
@ -374,6 +250,94 @@ groups:
annotations: annotations:
summary: Host physical component too hot (instance {{ $labels.instance }}) summary: Host physical component too hot (instance {{ $labels.instance }})
description: "Physical hardware component too hot\n VALUE = {{ $value }}" description: "Physical hardware component too hot\n VALUE = {{ $value }}"
- alert: HostNodeOvertemperatureAlarm
expr: ((node_hwmon_temp_crit_alarm_celsius == 1) or (node_hwmon_temp_alarm == 1)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: critical
annotations:
summary: Host node overtemperature alarm (instance {{ $labels.instance }})
description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}"
- alert: HostRaidArrayGotInactive
expr: (node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: critical
annotations:
summary: Host RAID array got inactive (instance {{ $labels.instance }})
description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.\n VALUE = {{ $value }}"
- alert: HostRaidDiskFailure
expr: (node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host RAID disk failure (instance {{ $labels.instance }})
description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}"
- alert: HostKernelVersionDeviations
expr: (count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 6h
labels:
severity: warning
annotations:
summary: Host kernel version deviations (instance {{ $labels.instance }})
description: "Different kernel versions are running\n VALUE = {{ $value }}"
- alert: HostOomKillDetected
expr: (increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: warning
annotations:
summary: Host OOM kill detected (instance {{ $labels.instance }})
description: "OOM kill detected\n VALUE = {{ $value }}"
- alert: HostEdacCorrectableErrorsDetected
expr: (increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: info
annotations:
summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}"
- alert: HostEdacUncorrectableErrorsDetected
expr: (node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: warning
annotations:
summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}"
- alert: HostNetworkReceiveErrors
expr: (rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Receive Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}"
- alert: HostNetworkTransmitErrors
expr: (rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Transmit Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}"
- alert: HostNetworkBondDegraded
expr: ((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Bond Degraded (instance {{ $labels.instance }})
description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}"
- alert: HostConntrackLimit
expr: (node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m
labels:
severity: warning
annotations:
summary: Host conntrack limit (instance {{ $labels.instance }})
description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}"
- alert: HostClockSkew - alert: HostClockSkew
expr: ((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"} expr: ((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 10m for: 10m
@ -398,7 +362,6 @@ groups:
annotations: annotations:
summary: Host requires reboot (instance {{ $labels.instance }}) summary: Host requires reboot (instance {{ $labels.instance }})
description: "{{ $labels.instance }} requires a reboot.\n VALUE = {{ $value }}" description: "{{ $labels.instance }} requires a reboot.\n VALUE = {{ $value }}"
- name: prometheus - name: prometheus
rules: rules:
- alert: PrometheusJobMissing - alert: PrometheusJobMissing

View file

@ -1,60 +0,0 @@
ALLOWED_HOSTS = [ "netbox.hamburg.ccc.de" ]
DATABASE = {
"HOST": "localhost",
"NAME": "netbox",
"USER": "netbox",
"PASSWORD": "{{ lookup('community.general.passwordstore', 'noc/vm-secrets/chaosknoten/netbox/DATABASE_PASSWORD', create=false, missing='error') }}",
}
REDIS = {
"tasks": {
"HOST": "localhost",
"PORT": 6379,
"USERNAME": "",
"PASSWORD": "",
"DATABASE": 0,
"SSL": False,
},
"caching": {
"HOST": "localhost",
"PORT": 6379,
"USERNAME": "",
"PASSWORD": "",
"DATABASE": 1,
"SSL": False,
},
}
SECRET_KEY = "{{ lookup('community.general.passwordstore', 'noc/vm-secrets/chaosknoten/netbox/SECRET_KEY', create=false, missing='error') }}"
SESSION_COOKIE_SECURE = True
# CCCHH ID (Keycloak) integration.
# https://github.com/python-social-auth/social-core/blob/0925304a9e437f8b729862687d3a808c7fb88a95/social_core/backends/keycloak.py#L7
# https://python-social-auth.readthedocs.io/en/latest/backends/keycloak.html
REMOTE_AUTH_BACKEND = "social_core.backends.keycloak.KeycloakOAuth2"
SOCIAL_AUTH_KEYCLOAK_ACCESS_TOKEN_URL = (
"https://id.hamburg.ccc.de/realms/ccchh/protocol/openid-connect/token"
)
SOCIAL_AUTH_KEYCLOAK_AUTHORIZATION_URL = (
"https://id.hamburg.ccc.de/realms/ccchh/protocol/openid-connect/auth"
)
SOCIAL_AUTH_KEYCLOAK_KEY = "netbox"
SOCIAL_AUTH_KEYCLOAK_PUBLIC_KEY = "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAi/Shi+b2OyYNGVFPsa6qf9SesEpRl5U5rpwgmt8H7NawMvwpPUYVW9o46QW0ulYcDmysT3BzpP3tagO/SFNoOjZdYe0D9nJ7vEp8KHbzR09KCfkyQIi0wLssKnDotVHL5JeUY+iKk+gjiwF9FSFSHPBqsST7hXVAut9LkOvs2aDod9AzbTH/uYbt4wfUm5l/1Ii8D+K7YcsFGUIqxv4XS/ylKqObqN4M2dac69iIwapoh6reaBQEm66vrOzJ+3yi4DZuPrkShJqi2hddtoyZihyCkF+eJJKEI5LrBf1KZB3Ec2YUrqk93ZGUGs/XY6R87QSfR3hJ82B1wnF+c2pw+QIDAQAB"
SOCIAL_AUTH_KEYCLOAK_SECRET = "{{ lookup('community.general.passwordstore', 'noc/vm-secrets/chaosknoten/netbox/SOCIAL_AUTH_KEYCLOAK_SECRET', create=false, missing='error') }}"
# Use custom OIDC group and role mapping pipeline functions added in via
# netbox__custom_pipeline_oidc_group_and_role_mapping.
# The default pipeline this is based on can be found here:
# https://github.com/netbox-community/netbox/blob/main/netbox/netbox/settings.py
SOCIAL_AUTH_PIPELINE = [
"social_core.pipeline.social_auth.social_details",
"social_core.pipeline.social_auth.social_uid",
"social_core.pipeline.social_auth.social_user",
"social_core.pipeline.user.get_username",
"social_core.pipeline.user.create_user",
"social_core.pipeline.social_auth.associate_user",
"netbox.authentication.user_default_groups_handler",
"social_core.pipeline.social_auth.load_extra_data",
"social_core.pipeline.user.user_details",
# Custom OIDC group and role mapping functions.
"netbox.custom_pipeline_oidc_mapping.add_groups",
"netbox.custom_pipeline_oidc_mapping.remove_groups",
"netbox.custom_pipeline_oidc_mapping.set_roles",
]

View file

@ -1,48 +0,0 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name netbox.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/netbox.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/netbox.hamburg.ccc.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/netbox.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 443;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
# Hide the X-Forwarded header.
proxy_hide_header X-Forwarded;
# Assume we are the only Reverse Proxy (well using Proxy Protocol, but that
# is transparent).
# Also provide "_hidden" for by, since it's not relevant.
proxy_set_header Forwarded "for=$remote_addr;proto=https;host=$host;by=_hidden";
client_max_body_size 25m;
location /static/ {
alias /opt/netbox/netbox/static/;
}
location / {
proxy_pass http://127.0.0.1:8001;
}
}

View file

@ -17,7 +17,7 @@ map $host $upstream_acme_challenge_host {
invite.hamburg.ccc.de 172.31.17.144:31820; invite.hamburg.ccc.de 172.31.17.144:31820;
keycloak-admin.hamburg.ccc.de 172.31.17.144:31820; keycloak-admin.hamburg.ccc.de 172.31.17.144:31820;
matrix.hamburg.ccc.de 172.31.17.150:31820; matrix.hamburg.ccc.de 172.31.17.150:31820;
netbox.hamburg.ccc.de 172.31.17.167:31820; netbox.hamburg.ccc.de 172.31.17.149:31820;
onlyoffice.hamburg.ccc.de 172.31.17.147:31820; onlyoffice.hamburg.ccc.de 172.31.17.147:31820;
pad.hamburg.ccc.de 172.31.17.141:31820; pad.hamburg.ccc.de 172.31.17.141:31820;
pretalx.hamburg.ccc.de 172.31.17.157:31820; pretalx.hamburg.ccc.de 172.31.17.157:31820;
@ -35,7 +35,7 @@ map $host $upstream_acme_challenge_host {
eh11.easterhegg.eu 172.31.17.151:31820; eh11.easterhegg.eu 172.31.17.151:31820;
eh20.easterhegg.eu 172.31.17.151:31820; eh20.easterhegg.eu 172.31.17.151:31820;
www.eh20.easterhegg.eu 172.31.17.151:31820; www.eh20.easterhegg.eu 172.31.17.151:31820;
eh22.easterhegg.eu 172.31.17.165:31820; eh22.easterhegg.eu 172.31.17.159:31820;
easterheggxxxx.hamburg.ccc.de 172.31.17.151:31820; easterheggxxxx.hamburg.ccc.de 172.31.17.151:31820;
eh2003.hamburg.ccc.de 172.31.17.151:31820; eh2003.hamburg.ccc.de 172.31.17.151:31820;
www.eh2003.hamburg.ccc.de 172.31.17.151:31820; www.eh2003.hamburg.ccc.de 172.31.17.151:31820;
@ -71,7 +71,6 @@ map $host $upstream_acme_challenge_host {
hydra.hamburg.ccc.de 172.31.17.163:31820; hydra.hamburg.ccc.de 172.31.17.163:31820;
cfp.eh22.easterhegg.eu 172.31.17.157:31820; cfp.eh22.easterhegg.eu 172.31.17.157:31820;
hub.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:31820; hub.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:31820;
netbox.eh22.easterhegg.eu eh22-netbox-intern.hamburg.ccc.de:31820;
default ""; default "";
} }

View file

@ -32,7 +32,7 @@ stream {
onlyoffice.hamburg.ccc.de 172.31.17.147:8443; onlyoffice.hamburg.ccc.de 172.31.17.147:8443;
hackertours.hamburg.ccc.de 172.31.17.151:8443; hackertours.hamburg.ccc.de 172.31.17.151:8443;
staging.hackertours.hamburg.ccc.de 172.31.17.151:8443; staging.hackertours.hamburg.ccc.de 172.31.17.151:8443;
netbox.hamburg.ccc.de 172.31.17.167:8443; netbox.hamburg.ccc.de 172.31.17.149:8443;
matrix.hamburg.ccc.de 172.31.17.150:8443; matrix.hamburg.ccc.de 172.31.17.150:8443;
element.hamburg.ccc.de 172.31.17.151:8443; element.hamburg.ccc.de 172.31.17.151:8443;
branding-resources.hamburg.ccc.de 172.31.17.151:8443; branding-resources.hamburg.ccc.de 172.31.17.151:8443;
@ -53,7 +53,7 @@ stream {
eh11.easterhegg.eu 172.31.17.151:8443; eh11.easterhegg.eu 172.31.17.151:8443;
eh20.easterhegg.eu 172.31.17.151:8443; eh20.easterhegg.eu 172.31.17.151:8443;
www.eh20.easterhegg.eu 172.31.17.151:8443; www.eh20.easterhegg.eu 172.31.17.151:8443;
eh22.easterhegg.eu 172.31.17.165:8443; eh22.easterhegg.eu 172.31.17.159:8443;
easterheggxxxx.hamburg.ccc.de 172.31.17.151:8443; easterheggxxxx.hamburg.ccc.de 172.31.17.151:8443;
eh2003.hamburg.ccc.de 172.31.17.151:8443; eh2003.hamburg.ccc.de 172.31.17.151:8443;
www.eh2003.hamburg.ccc.de 172.31.17.151:8443; www.eh2003.hamburg.ccc.de 172.31.17.151:8443;
@ -89,7 +89,6 @@ stream {
hydra.hamburg.ccc.de 172.31.17.163:8443; hydra.hamburg.ccc.de 172.31.17.163:8443;
cfp.eh22.easterhegg.eu pretalx-intern.hamburg.ccc.de:8443; cfp.eh22.easterhegg.eu pretalx-intern.hamburg.ccc.de:8443;
hub.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:8443; hub.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:8443;
netbox.eh22.easterhegg.eu eh22-netbox-intern.hamburg.ccc.de:8443;
} }
server { server {

View file

@ -38,11 +38,7 @@ server {
location = / { location = / {
#return 302 https://wiki.hamburg.ccc.de/infrastructure:service-overview#tickets_pretix; #return 302 https://wiki.hamburg.ccc.de/infrastructure:service-overview#tickets_pretix;
return 302 https://tickets.hamburg.ccc.de/hackertours/eh22ht/; return 302 https://tickets.hamburg.ccc.de/hackertours/38c3/;
}
location = /hackertours/eh22/ {
return 302 https://tickets.hamburg.ccc.de/hackertours/eh22ht/;
} }
location / { location / {

View file

@ -1,22 +1,26 @@
# Role `docker` # Role `docker`
Ensures the Docker Engine and other related packages are installed from the Docker repos. Makes sure Docker Engine and other related packages are installed from the Docker repos on the specified hosts.
For detailed list of packages see: [`tasks/main/02_docker_install.yaml`](./tasks/main/02_docker_install.yaml). For details see: [`tasks/main/02_docker_install.yaml`](./tasks/main/02_docker_install.yaml).
## Supported Distributions ## Supported Distributions
The following distributions are supported: The following distributions are supported:
- Debian 11 - Debian 11
- Debian 12
## Required Arguments ## Required Arguments
None. None.
## Optional Arguments ## Updates
None. This role doesn't handle updates.
However it uses the system package manager for installing Docker Engine and the other related packages, so when you're making sure the system packages are up-to-date, you're handling updates for the packages installed by this role as well.
## `hosts`
The `hosts` for this role need to be the machines for which you want to make sure Docker Engine and other related packages are installed from the Docker repos.
## Links & Resources ## Links & Resources

View file

@ -1,11 +1,11 @@
- name: Ensure Docker repo is setup - name: make sure the Docker repo is setup
ansible.builtin.import_tasks: ansible.builtin.import_tasks:
file: main/01_repo_setup.yaml file: main/01_repo_setup.yaml
- name: Ensure Docker Engine and other related packages are installed - name: make sure Docker Engine and other related packages are installed
ansible.builtin.import_tasks: ansible.builtin.import_tasks:
file: main/02_docker_install.yaml file: main/02_docker_install.yaml
- name: Ensure Docker daemon configuration - name: configure the Docker daemon
ansible.builtin.import_tasks: ansible.builtin.import_tasks:
file: main/03_docker_config.yaml file: main/03_docker_config.yaml

View file

@ -1,4 +1,4 @@
- name: Ensure Dockers GPG key is added - name: make sure Dockers GPG key is added
ansible.builtin.get_url: ansible.builtin.get_url:
url: https://download.docker.com/linux/debian/gpg url: https://download.docker.com/linux/debian/gpg
dest: /etc/apt/trusted.gpg.d/docker.asc dest: /etc/apt/trusted.gpg.d/docker.asc
@ -7,7 +7,7 @@
group: root group: root
become: true become: true
- name: Ensure Docker APT repository is added - name: make sure Dockers APT repository is added
ansible.builtin.apt_repository: ansible.builtin.apt_repository:
repo: "deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/docker.asc] https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable" repo: "deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/docker.asc] https://download.docker.com/linux/debian {{ ansible_distribution_release }} stable"
filename: docker filename: docker

View file

@ -1,4 +1,4 @@
- name: Ensure Docker Engine and other related packages are installed - name: make sure Docker Engine and other related packages are installed
ansible.builtin.apt: ansible.builtin.apt:
name: name:
- docker-ce - docker-ce

View file

@ -1,7 +1,7 @@
# Ensure the Docker daemon is configured with the following settings: # Configure the Docker daemon with the following settings:
# - log to systemd journal # - log to systemd journal
# https://docs.docker.com/engine/logging/drivers/journald/ # https://docs.docker.com/engine/logging/drivers/journald/
- name: Ensure Docker daemon configuration - name: configure Docker daemon
ansible.builtin.copy: ansible.builtin.copy:
src: daemon.json src: daemon.json
dest: /etc/docker/daemon.json dest: /etc/docker/daemon.json

View file

@ -104,18 +104,18 @@
name: anacron name: anacron
state: present state: present
- name: ensure automatic update cron job is present - name: Install automatic update cron job
become: true become: true
ansible.builtin.cron: ansible.builtin.cron:
name: 'ansible docker compose auto update' name: 'docker compose auto update'
minute: "0" minute: "0"
hour: "5" hour: "5"
job: "cd /ansible_docker_compose; docker compose pull && docker compose up -d" job: "cd /ansible_docker_compose; docker compose pull && docker compose up -d"
- name: ensure automatic cleanup cron job is present - name: Install automatic cleanup cron job
become: true become: true
ansible.builtin.cron: ansible.builtin.cron:
name: 'ansible docker compose auto cleanup' name: 'docker compose auto update'
minute: "23" minute: "23"
hour: "4" hour: "4"
job: "docker system prune -a -f" job: "docker system prune -a -f"

View file

@ -22,12 +22,6 @@ None.
- `dokuwiki__php_version`: Your PHP version, default `7.4` for Debian 11 and `8.2` for Debian 12 - `dokuwiki__php_version`: Your PHP version, default `7.4` for Debian 11 and `8.2` for Debian 12
- `dokuwiki__php_user`: User of your php-fpm process, default `www-data` - `dokuwiki__php_user`: User of your php-fpm process, default `www-data`
- `dokuwiki__nginx_user`: User of your nginx process, default `nginx` - `dokuwiki__nginx_user`: User of your nginx process, default `nginx`
- `dokuwiki__custom_theme_git_url`: Clone URL of custom theme (HTTPS only)
- `dokuwiki__custom_theme_version`: Version (git branch, tag, ...) to checkout, default main
- `dokuwiki__custom_theme_name`: Name of the directory into which the custom theme is cloned
Warning: if a directory of the same name as `dokuwiki__custom_theme_name` already exists in the same directory, the task will fail.
This needs to be manually deleted first before the first run with the the custom theme enabled.
## nginx Configuration ## nginx Configuration

View file

@ -3,6 +3,3 @@ dokuwiki__installpath: "/var/www/dokuwiki"
dokuwiki__php_version: "{{ dokuwiki__php_versions[ansible_distribution + '-' + ansible_distribution_major_version] }}" dokuwiki__php_version: "{{ dokuwiki__php_versions[ansible_distribution + '-' + ansible_distribution_major_version] }}"
dokuwiki__php_user: "www-data" dokuwiki__php_user: "www-data"
dokuwiki__nginx_user: "nginx" dokuwiki__nginx_user: "nginx"
dokuwiki__custom_theme_git_url: ""
dokuwiki__custom_theme_version: "main"
dokuwiki__custom_theme_name: ""

View file

@ -1,4 +1,4 @@
- name: Install dependencies - name: Install php-fpm
become: true become: true
ansible.builtin.apt: ansible.builtin.apt:
name: name:
@ -9,7 +9,6 @@
- php-intl - php-intl
- php-gd - php-gd
- php-sqlite3 - php-sqlite3
- git
diff: false diff: false
- name: Ensure `php-fpm` is enabled - name: Ensure `php-fpm` is enabled
@ -43,17 +42,3 @@
owner: root owner: root
group: root group: root
mode: "0644" mode: "0644"
- name: checkout custom theme git repo
become: true
ansible.builtin.git:
repo: "{{ dokuwiki__custom_theme_git_url }}"
dest: "{{ dokuwiki__installpath }}/lib/tpl/{{ dokuwiki__custom_theme_name }}"
version: "{{ dokuwiki__custom_theme_version }}"
force: true
depth: 1
single_branch: true
track_submodules: true
when:
- dokuwiki__custom_theme_git_url != ""
- dokuwiki__custom_theme_name != ""

View file

@ -1,88 +0,0 @@
# `netbox` role
A role for setting up NetBox.
It automatically pulls in all required dependencies like Redis and PostgreSQL, deploys the provided systemd services and gunicorn config and sets up a PostgreSQL database named `netbox` with an owner named `netbox` and the specified password.
However providing the [NetBox configuration](#netbox-configuration), [setting up a web server like nginx to proxy to gunicorn](#web-server-setup) and tasks like creating users, etc. you have to do yourself.
## Supported Distributions
Should work on Debian-based distributions.
## Required Arguments
- `netbox__version`: The NetBox version to deploy.
- `netbox__db_password`: The password to use for connection to the database.
This is required since the upgrade script runs as root and therefore peer authentication doesn't work.
- `netbox__config`: The NetBox config to deploy.
See [NetBox Configuration](#netbox-configuration) for more infos.
## Optional Arguments
- `netbox__custom_pipeline_oidc_group_and_role_mapping`: Whether or not to have custom pipeline code for OIDC group and role mapping present.
See [Custom Pipeline Code for OIDC Group and Role Mapping](#custom-pipeline-code-for-oidc-group-and-role-mapping) for more infos.
Defaults to `false`.
## NetBox Configuration
The NetBox configuration should include a connection to Redis as well as a connection to PostgreSQL.
Configuration for the Redis connection:
```python
REDIS = {
"tasks": {
"HOST": "localhost",
"PORT": 6379,
"USERNAME": "",
"PASSWORD": "",
"DATABASE": 0,
"SSL": False,
},
"caching": {
"HOST": "localhost",
"PORT": 6379,
"USERNAME": "",
"PASSWORD": "",
"DATABASE": 1,
"SSL": False,
},
}
```
Configuration for the PostgreSQL connection:
```python
DATABASE = {
"HOST": "localhost",
"NAME": "netbox",
"USER": "netbox",
"PASSWORD": "<same as netbox__db_password>",
}
```
Further configuration should take place. Some relevant resources can be found here:
- Installation guide configuration docs: <https://netboxlabs.com/docs/netbox/en/stable/installation/3-netbox/#configuration>
- Configuration docs: <https://netboxlabs.com/docs/netbox/en/stable/configuration/>
- Example configuration: <https://github.com/netbox-community/netbox/blob/main/netbox/netbox/configuration_example.py>
## Web Server Setup
As this role just sets up gunicorn, but doesn't set up a web server, you need to do that yourself.
The relevant documentation on how to do that can be found here:
- Web server setup docs: <https://netboxlabs.com/docs/netbox/en/stable/installation/5-http-server/>
- Example base nginx config: <https://github.com/netbox-community/netbox/blob/main/contrib/nginx.conf>
## Custom Pipeline Code for OIDC Group and Role Mapping
Setting the option `netbox__custom_pipeline_oidc_group_and_role_mapping` to `true` makes this role ensure custom pipeline code for OIDC group and role mapping is present.
Note that this role uses code for NetBox >= 4.0.0.
The code is available in `files/custom_pipeline_oidc_group_and_role_mapping.py`, licensed under the CC BY-SA 4.0 license and taken from [this authentik NetBox documentation](https://docs.goauthentik.io/integrations/services/netbox/).
The documentation also shows how to use the pipeline code by defining a custom `SOCIAL_AUTH_PIPELINE`, which you also need to do, as the configuration isn't provided by this role.
However instead of under `netbox.custom_pipeline.` the functions are available under `netbox.custom_pipeline_oidc_mapping.` with this role.
See also [the default settings.py](https://github.com/netbox-community/netbox/blob/main/netbox/netbox/settings.py) for the default `SOCIAL_AUTH_PIPELINE`.
## Links & Resources
- The NetBox Git Repo: <https://github.com/netbox-community/netbox>
- The NetBox installation docs: <https://netboxlabs.com/docs/netbox/en/stable/installation/>

View file

@ -1 +0,0 @@
netbox__custom_pipeline_oidc_group_and_role_mapping: false

View file

@ -1,55 +0,0 @@
# Licensed under Creative Commons: CC BY-SA 4.0 license.
# https://github.com/goauthentik/authentik/blob/main/LICENSE
# https://github.com/goauthentik/authentik/blob/main/website/integrations/services/netbox/index.md
# https://docs.goauthentik.io/integrations/services/netbox/
from netbox.authentication import Group
class AuthFailed(Exception):
pass
def add_groups(response, user, backend, *args, **kwargs):
try:
groups = response['groups']
except KeyError:
pass
# Add all groups from oAuth token
for group in groups:
group, created = Group.objects.get_or_create(name=group)
user.groups.add(group)
def remove_groups(response, user, backend, *args, **kwargs):
try:
groups = response['groups']
except KeyError:
# Remove all groups if no groups in oAuth token
user.groups.clear()
pass
# Get all groups of user
user_groups = [item.name for item in user.groups.all()]
# Get groups of user which are not part of oAuth token
delete_groups = list(set(user_groups) - set(groups))
# Delete non oAuth token groups
for delete_group in delete_groups:
group = Group.objects.get(name=delete_group)
user.groups.remove(group)
def set_roles(response, user, backend, *args, **kwargs):
# Remove Roles temporary
user.is_superuser = False
user.is_staff = False
try:
groups = response['groups']
except KeyError:
# When no groups are set
# save the user without Roles
user.save()
pass
# Set roles is role (superuser or staff) is in groups
user.is_superuser = True if 'superusers' in groups else False
user.is_staff = True if 'staff' in groups else False
user.save()

View file

@ -1,24 +0,0 @@
- name: Run upgrade script
ansible.builtin.command: /opt/netbox/upgrade.sh
become: true
# When it runs, this should always report changed.
changed_when: true
- name: Ensure netbox systemd services are set up and up-to-date
ansible.builtin.systemd_service:
daemon_reload: true
name: "{{ item }}"
enabled: true
state: restarted
become: true
loop:
- "netbox.service"
- "netbox-rq.service"
- name: Ensure netbox housekeeping timer is set up and up-to-date
ansible.builtin.systemd_service:
daemon_reload: true
name: "netbox-housekeeping.timer"
enabled: true
state: restarted
become: true

View file

@ -1,16 +0,0 @@
argument_specs:
main:
options:
netbox__version:
type: str
required: true
netbox__db_password:
type: str
required: true
netbox__config:
type: str
required: true
netbox__custom_pipeline_oidc_group_and_role_mapping:
type: bool
required: false
default: false

View file

@ -1,11 +0,0 @@
---
dependencies:
- role: redis
- role: postgresql
vars:
postgresql__dbs:
- name: netbox
owner: netbox
postgresql__users:
- name: netbox
password: "{{ netbox__db_password }}"

View file

@ -1,124 +0,0 @@
- name: Ensure all dependencies are installed
ansible.builtin.apt:
name:
- python3
- python3-pip
- python3-venv
- python3-dev
- build-essential
- libxml2-dev
- libxslt1-dev
- libffi-dev
- libpq-dev
- libssl-dev
- zlib1g-dev
- git
become: true
- name: Ensure NetBox source is present
ansible.builtin.git:
repo: https://github.com/netbox-community/netbox.git
dest: /opt/netbox/
version: "{{ netbox__version }}"
become: true
notify:
- Run upgrade script
- Ensure netbox systemd services are set up and up-to-date
- name: Ensures custom pipeline code for OIDC group and role mapping is present
ansible.builtin.copy:
src: custom_pipeline_oidc_group_and_role_mapping.py
dest: /opt/netbox/netbox/netbox/custom_pipeline_oidc_mapping.py
mode: "0644"
owner: root
group: root
when: netbox__custom_pipeline_oidc_group_and_role_mapping
become: true
notify:
- Ensure netbox systemd services are set up and up-to-date
- name: Ensures custom pipeline code for OIDC group and role mapping is not present
ansible.builtin.file:
path: /opt/netbox/netbox/netbox/custom_pipeline_oidc_mapping.py
state: absent
when: not netbox__custom_pipeline_oidc_group_and_role_mapping
become: true
notify:
- Ensure netbox systemd services are set up and up-to-date
- name: Ensure netbox user
block:
- name: Ensure netbox group exists
ansible.builtin.group:
name: netbox
system: true
become: true
- name: Ensure netbox user exists
ansible.builtin.user:
name: netbox
group: netbox
password: '!'
system: true
become: true
- name: Ensure relevant directories are owned by netbox user
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: netbox
recurse: true
become: true
loop:
- "/opt/netbox/netbox/media/"
- "/opt/netbox/netbox/reports/"
- "/opt/netbox/netbox/scripts/"
- name: Deploy configuration.py
ansible.builtin.copy:
content: "{{ netbox__config }}"
dest: "/opt/netbox/netbox/netbox/configuration.py"
mode: "0644"
owner: root
group: root
become: true
notify: Ensure netbox systemd services are set up and up-to-date
- name: Ensure provided gunicorn config is copied
ansible.builtin.copy:
remote_src: true
src: "/opt/netbox/contrib/gunicorn.py"
dest: "/opt/netbox/gunicorn.py"
mode: "0644"
owner: root
group: root
become: true
notify: Ensure netbox systemd services are set up and up-to-date
- name: Ensure provided netbox systemd service files are copied
ansible.builtin.copy:
remote_src: true
src: "/opt/netbox/contrib/{{ item }}"
dest: "/etc/systemd/system/{{ item }}"
mode: "0644"
owner: root
group: root
become: true
loop:
- "netbox.service"
- "netbox-rq.service"
notify: Ensure netbox systemd services are set up and up-to-date
- name: Ensure provided housekeeping systemd service and timer are copied
ansible.builtin.copy:
remote_src: true
src: "/opt/netbox/contrib/{{ item }}"
dest: "/etc/systemd/system/{{ item }}"
mode: "0644"
owner: root
group: root
become: true
loop:
- "netbox-housekeeping.service"
- "netbox-housekeeping.timer"
notify: Ensure netbox housekeeping timer is set up and up-to-date

View file

@ -1,39 +1,32 @@
# Role `nginx` # Role `nginx`
Ensures nginx is installed from the NGINX repos and setup as specified via the arguments. Makes sure the `nginx` package is installed from the NGINX repos on the specified hosts.
Also makes sure a desirable baseline of NGINX configs is deployed on the specified hosts.
For the NGINX site configurations the config template below can be used.
## Entry Points
The entry points available for external use are:
- `main`
## Supported Distributions ## Supported Distributions
The following distributions are supported: The following distributions are supported:
- Debian 11 - Debian 11
- Debian 12
## Required Arguments ## Required Arguments
None. For the required arguments look at the [`argument_specs.yaml`](./meta/argument_specs.yaml).
## Optional Arguments ## Updates
- `nginx__deploy_redirect_conf`: Whether or not to deploy a config redirecting from HTTP to HTTPS, while still forwarding the `/.well-known/acme-challenge/` to localhost Port 31820 for certificate issuing. This role updates NGINX to the latest version covered by the provided version spec., if needed.
See [`files/redirect.conf`](./files/redirect.conf) for the configuration that would be deployed.
Defaults to `true`. ## `hosts`
- `nginx__deploy_tls_conf`: Whether or not to deploy a config configuring some TLS settings reasonably.
See [`files/tls.conf`](./files/tls.conf) for the configuration that would be deployed. The `hosts` for this role need to be the machines, for which you want to make sure the `nginx` package is installed from the NGINX repos and a desirable baseline of NGINX configs is deployed.
Defaults to `true`.
- `nginx__deploy_logging_conf`: Whether or not to deploy a config configuring logging to journald.
See [`files/logging.conf`](./files/logging.conf) for the configuration that would be deployed.
Defaults to `true`.
- `nginx__configurations`: List of nginx configurations to ensure are deployed.
- `nginx__configurations.*.name`: This name with `.conf` appended will be used for the configurations file name under `/etc/nginx/conf.d/`.
`tls`, `redirect` and `logging` are reserved names.
- `nginx__configurations.*.content`: This configurations content.
- `nginx__use_custom_nginx_conf`: Whether or not to use a custom `/etc/nginx/nginx.conf`.
If set to true, you must provide the content for a custom `nginx.conf` via `nginx__custom_nginx_conf`.
Defaults to `false`.
- `nginx__custom_nginx_conf`: The content to use for the custom `nginx.conf`.
Needs `nginx__use_custom_nginx_conf` to be set to true to work.
You should probably still make sure that your custom `nginx.conf` includes `/etc/nginx/conf.d/*.conf`, so that the other configuration files still work.
## Config Template ## Config Template

View file

@ -1,5 +1,10 @@
- name: Restart nginx - name: Restart `nginx.service`
ansible.builtin.systemd: ansible.builtin.systemd:
name: nginx.service name: nginx.service
state: restarted state: restarted
become: true become: true
- name: apt-get update
ansible.builtin.apt:
update_cache: true
become: true

View file

@ -1,15 +1,31 @@
argument_specs: argument_specs:
main: main:
options: options:
nginx__version_spec:
description: >-
The version specification to use for installing the `nginx` package. The
provided version specification will be used like the following: `nginx={{
nginx__version_spec }}*`. This makes it possible to e.g. specify
until a minor version (like `1.3.`) and then have patch versions be
installed automatically (like `1.3.1` and so on).
type: str
required: true
nginx__deploy_redirect_conf: nginx__deploy_redirect_conf:
description: >-
Whether or not to deploy a `redirect.conf` to
`/etc/nginx/conf.d/redirect.conf`.
type: bool type: bool
required: false required: false
default: true default: true
nginx__deploy_tls_conf: nginx__deploy_tls_conf:
description: >-
Whether or not to deploy a `tls.conf` to `/etc/nginx/conf.d/tls.conf`.
type: bool type: bool
required: false required: false
default: true default: true
nginx__deploy_logging_conf: nginx__deploy_logging_conf:
description: >-
Whether or not to deploy a `logging.conf` to `/etc/nginx/conf.d/logging.conf`.
type: bool type: bool
required: false required: false
default: true default: true
@ -21,16 +37,34 @@ argument_specs:
default: [ ] default: [ ]
options: options:
name: name:
description: >-
The name of the configuration file, where the configuration should
be deployed to. The file will be placed under `/etc/nginx/conf.d/`
and `.conf` will be appended to the given name. So in the end the
path will be like this: `/etc/nginx/conf.d/\{\{ name \}\}.conf`.
Note that the names `tls` and `redirect` aren't allowed.
type: str type: str
required: true required: true
content: content:
description: The content of the configuration.
type: str type: str
required: true required: true
nginx__use_custom_nginx_conf: nginx__use_custom_nginx_conf:
description: >-
Whether or not to use a custom `/etc/nginx/nginx.conf`. If set to
true, you must provide a custom `nginx.conf` via
`nginx__custom_nginx_conf`.
type: bool type: bool
required: false required: false
default: false default: false
nginx__custom_nginx_conf: nginx__custom_nginx_conf:
description: >-
The value for a `nginx.conf` to be placed at `/etc/nginx/nginx.conf`.
You must set `nginx__use_custom_nginx_conf` to true for this value to
be used.
You should probably make sure that your custom `nginx.conf` still
includes `/etc/nginx/conf.d/*.conf` so that the configuration provided
using `nginx__configurations` still work.
type: str type: str
required: false required: false
default: "" default: ""

View file

@ -1,15 +1,19 @@
- name: Ensure valid configuration names - name: make sure nginx configuration names are valid
ansible.builtin.import_tasks: ansible.builtin.include_role:
file: main/01_validate_config_names.yaml name: nginx
tasks_from: make_sure_nginx_configuration_names_are_valid
- name: Ensure NGINX repo is set up - name: make sure NGINX repos are setup
ansible.builtin.import_tasks: ansible.builtin.include_role:
file: main/02_repo_setup.yaml name: nginx
tasks_from: main/repo_setup
- name: Ensure nginx is installed - name: make sure NGINX is installed
ansible.builtin.import_tasks: ansible.builtin.include_role:
file: main/03_nginx_install.yaml name: nginx
tasks_from: main/nginx_install
- name: Ensure configuration deployment - name: make sure desirable NGINX configs are deployed
ansible.builtin.import_tasks: ansible.builtin.include_role:
file: main/04_config_deploy.yaml name: nginx
tasks_from: main/config_deploy

View file

@ -1,9 +0,0 @@
- name: Ensure that the given configuration names are valid
ansible.builtin.fail:
msg: "You used one of the reserved configuration names: '{{ item.name }}'."
when: item.name == "tls"
or item.name == "redirect"
or item.name == "logging"
loop: "{{ nginx__configurations }}"
loop_control:
label: "{{ item.name }}"

View file

@ -1,6 +0,0 @@
- name: Ensure nginx is installed
ansible.builtin.apt:
name: nginx
state: present
update_cache: true
become: true

View file

@ -1,13 +1,13 @@
- name: Check, if a save of a previous `nginx.conf` is present - name: check, if a save of a previous `nginx.conf` is present
ansible.builtin.stat: ansible.builtin.stat:
path: /etc/nginx/nginx.conf.ansiblesave path: /etc/nginx/nginx.conf.ansiblesave
register: nginx__nginx_conf_ansiblesave_stat register: nginx__nginx_conf_ansiblesave_stat_result
- name: Handle the case, where a custom `nginx.conf` is to be used - name: handle the case, where a custom `nginx.conf` is to be used
when: nginx__use_custom_nginx_conf when: nginx__use_custom_nginx_conf
block: block:
- name: When no `nginx.conf.ansiblesave` is present, save the current `nginx.conf` - name: when no `nginx.conf.ansiblesave` is present, save the current `nginx.conf`
when: not nginx__nginx_conf_ansiblesave_stat.stat.exists when: not nginx__nginx_conf_ansiblesave_stat_result.stat.exists
ansible.builtin.copy: ansible.builtin.copy:
force: true force: true
dest: /etc/nginx/nginx.conf.ansiblesave dest: /etc/nginx/nginx.conf.ansiblesave
@ -18,7 +18,7 @@
src: /etc/nginx/nginx.conf src: /etc/nginx/nginx.conf
become: true become: true
- name: Ensure the custom `nginx.conf` is deployed - name: deploy the custom `nginx.conf`
ansible.builtin.copy: ansible.builtin.copy:
content: "{{ nginx__custom_nginx_conf }}" content: "{{ nginx__custom_nginx_conf }}"
dest: "/etc/nginx/nginx.conf" dest: "/etc/nginx/nginx.conf"
@ -26,13 +26,13 @@
owner: root owner: root
group: root group: root
become: true become: true
notify: Restart nginx notify: Restart `nginx.service`
- name: Handle the case, where no custom `nginx.conf` is to be used - name: handle the case, where no custom `nginx.conf` is to be used
when: not nginx__use_custom_nginx_conf when: not nginx__use_custom_nginx_conf
block: block:
- name: When a `nginx.conf.ansiblesave` is present, copy it to `nginx.conf` - name: when a `nginx.conf.ansiblesave` is present, copy it to `nginx.conf`
when: nginx__nginx_conf_ansiblesave_stat.stat.exists when: nginx__nginx_conf_ansiblesave_stat_result.stat.exists
ansible.builtin.copy: ansible.builtin.copy:
force: true force: true
dest: /etc/nginx/nginx.conf dest: /etc/nginx/nginx.conf
@ -42,32 +42,32 @@
remote_src: true remote_src: true
src: /etc/nginx/nginx.conf.ansiblesave src: /etc/nginx/nginx.conf.ansiblesave
become: true become: true
notify: Restart nginx notify: Restart `nginx.service`
- name: Ensure no `nginx.conf.ansiblesave` is present - name: delete the `nginx.conf.ansiblesave`, if it is present
when: nginx__nginx_conf_ansiblesave_stat.stat.exists when: nginx__nginx_conf_ansiblesave_stat_result.stat.exists
ansible.builtin.file: ansible.builtin.file:
path: /etc/nginx/nginx.conf.ansiblesave path: /etc/nginx/nginx.conf.ansiblesave
state: absent state: absent
become: true become: true
- name: Ensure mozilla dhparam is deployed - name: make sure mozilla dhparam is deployed
ansible.builtin.get_url: ansible.builtin.get_url:
force: true force: true
dest: /etc/nginx-mozilla-dhparam dest: /etc/nginx-mozilla-dhparam
mode: "0644" mode: "0644"
url: https://ssl-config.mozilla.org/ffdhe2048.txt url: https://ssl-config.mozilla.org/ffdhe2048.txt
become: true become: true
notify: Restart nginx notify: Restart `nginx.service`
- name: Set `nginx__config_files_to_exist` fact initially to an empty list - name: set `nginx__config_files_to_exist` fact initially to an empty list
ansible.builtin.set_fact: ansible.builtin.set_fact:
nginx__config_files_to_exist: [ ] nginx__config_files_to_exist: [ ]
- name: Handle the case, where tls.conf should be deployed - name: handle the case, where tls.conf should be deployed
when: nginx__deploy_tls_conf when: nginx__deploy_tls_conf
block: block:
- name: Ensure tls.conf is deployed - name: make sure tls.conf is deployed
ansible.builtin.copy: ansible.builtin.copy:
force: true force: true
dest: /etc/nginx/conf.d/tls.conf dest: /etc/nginx/conf.d/tls.conf
@ -76,16 +76,16 @@
group: root group: root
src: tls.conf src: tls.conf
become: true become: true
notify: Restart nginx notify: Restart `nginx.service`
- name: Add tls.conf to nginx__config_files_to_exist - name: add tls.conf to nginx__config_files_to_exist
ansible.builtin.set_fact: ansible.builtin.set_fact:
nginx__config_files_to_exist: "{{ nginx__config_files_to_exist + [ 'tls.conf' ] }}" # noqa: jinja[spacing] nginx__config_files_to_exist: "{{ nginx__config_files_to_exist + [ 'tls.conf' ] }}" # noqa: jinja[spacing]
- name: Handle the case, where redirect.conf should be deployed - name: handle the case, where redirect.conf should be deployed
when: nginx__deploy_redirect_conf when: nginx__deploy_redirect_conf
block: block:
- name: Ensure redirect.conf is deployed - name: make sure redirect.conf is deployed
ansible.builtin.copy: ansible.builtin.copy:
force: true force: true
dest: /etc/nginx/conf.d/redirect.conf dest: /etc/nginx/conf.d/redirect.conf
@ -94,16 +94,16 @@
group: root group: root
src: redirect.conf src: redirect.conf
become: true become: true
notify: Restart nginx notify: Restart `nginx.service`
- name: Add redirect.conf to nginx__config_files_to_exist - name: add redirect.conf to nginx__config_files_to_exist
ansible.builtin.set_fact: ansible.builtin.set_fact:
nginx__config_files_to_exist: "{{ nginx__config_files_to_exist + [ 'redirect.conf' ] }}" # noqa: jinja[spacing] nginx__config_files_to_exist: "{{ nginx__config_files_to_exist + [ 'redirect.conf' ] }}" # noqa: jinja[spacing]
- name: Handle the case, where logging.conf should be deployed - name: handle the case, where logging.conf should be deployed
when: nginx__deploy_logging_conf when: nginx__deploy_logging_conf
block: block:
- name: Ensure logging.conf is deployed - name: make sure logging.conf is deployed
ansible.builtin.copy: ansible.builtin.copy:
force: true force: true
dest: /etc/nginx/conf.d/logging.conf dest: /etc/nginx/conf.d/logging.conf
@ -112,13 +112,13 @@
group: root group: root
src: logging.conf src: logging.conf
become: true become: true
notify: Restart nginx notify: Restart `nginx.service`
- name: Add logging.conf to nginx__config_files_to_exist - name: add logging.conf to nginx__config_files_to_exist
ansible.builtin.set_fact: ansible.builtin.set_fact:
nginx__config_files_to_exist: "{{ nginx__config_files_to_exist + [ 'logging.conf' ] }}" # noqa: jinja[spacing] nginx__config_files_to_exist: "{{ nginx__config_files_to_exist + [ 'logging.conf' ] }}" # noqa: jinja[spacing]
- name: Ensure all given configuration files are deployed - name: make sure all given configuration files are deployed
ansible.builtin.copy: ansible.builtin.copy:
content: "{{ item.content }}" content: "{{ item.content }}"
dest: "/etc/nginx/conf.d/{{ item.name }}.conf" dest: "/etc/nginx/conf.d/{{ item.name }}.conf"
@ -127,30 +127,24 @@
group: root group: root
become: true become: true
loop: "{{ nginx__configurations }}" loop: "{{ nginx__configurations }}"
loop_control: notify: Restart `nginx.service`
label: "{{ item.name }}"
notify: Restart nginx
- name: Add names with suffixes from `nginx__configurations` to `nginx__config_files_to_exist` fact - name: add names plus suffix from `nginx__configurations` to `nginx__config_files_to_exist` fact
ansible.builtin.set_fact: ansible.builtin.set_fact:
nginx__config_files_to_exist: "{{ nginx__config_files_to_exist + [ item.name + '.conf' ] }}" # noqa: jinja[spacing] nginx__config_files_to_exist: "{{ nginx__config_files_to_exist + [ item.name + '.conf' ] }}" # noqa: jinja[spacing]
loop: "{{ nginx__configurations }}" loop: "{{ nginx__configurations }}"
loop_control:
label: "{{ item.name }}"
- name: Find configuration files to remove - name: find configuration files to remove
ansible.builtin.find: ansible.builtin.find:
paths: /etc/nginx/conf.d/ paths: /etc/nginx/conf.d/
recurse: false recurse: false
excludes: "{{ nginx__config_files_to_exist }}" excludes: "{{ nginx__config_files_to_exist }}"
register: nginx__config_files_to_remove register: nginx__config_files_to_remove
- name: Remove all configuration file, which should be removed - name: remove all configuration file, which should be removed
ansible.builtin.file: ansible.builtin.file:
path: "{{ item.path }}" path: "{{ item.path }}"
state: absent state: absent
become: true become: true
loop: "{{ nginx__config_files_to_remove.files }}" loop: "{{ nginx__config_files_to_remove.files }}"
loop_control: notify: Restart `nginx.service`
label: "{{ item.path | ansible.builtin.basename }}"
notify: Restart nginx

View file

@ -0,0 +1,13 @@
- name: make sure the `nginx` package is installed
ansible.builtin.apt:
name: nginx={{ nginx__version_spec }}*
state: present
allow_change_held_packages: true
update_cache: true
become: true
- name: apt-mark hold `nginx`
ansible.builtin.dpkg_selections:
name: nginx
selection: hold
become: true

View file

@ -1,10 +1,16 @@
- name: Ensure gnupg is installed - name: gather package facts
ansible.builtin.package_facts:
manager: apt
- name: make sure `gnupg` package is installed
ansible.builtin.apt: ansible.builtin.apt:
name: gnupg name: gnupg
state: present state: present
update_cache: true
become: true become: true
when: "'gnupg' not in ansible_facts.packages"
- name: Ensure NGINX signing key is added - name: make sure NGINX signing key is added
ansible.builtin.get_url: ansible.builtin.get_url:
url: https://nginx.org/keys/nginx_signing.key url: https://nginx.org/keys/nginx_signing.key
dest: /etc/apt/trusted.gpg.d/nginx.asc dest: /etc/apt/trusted.gpg.d/nginx.asc
@ -12,20 +18,23 @@
owner: root owner: root
group: root group: root
become: true become: true
notify: apt-get update
- name: Ensure NGINX APT repository is added - name: make sure NGINX APT repository is added
ansible.builtin.apt_repository: ansible.builtin.apt_repository:
repo: "deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/nginx.asc] https://nginx.org/packages/debian/ {{ ansible_distribution_release }} nginx" repo: "deb [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/nginx.asc] https://nginx.org/packages/debian/ {{ ansible_distribution_release }} nginx"
state: present state: present
become: true become: true
notify: apt-get update
- name: Ensure NGINX APT source repository is added - name: make sure NGINX APT source repository is added
ansible.builtin.apt_repository: ansible.builtin.apt_repository:
repo: "deb-src [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/nginx.asc] https://nginx.org/packages/debian/ {{ ansible_distribution_release }} nginx" repo: "deb-src [arch=amd64 signed-by=/etc/apt/trusted.gpg.d/nginx.asc] https://nginx.org/packages/debian/ {{ ansible_distribution_release }} nginx"
state: present state: present
become: true become: true
notify: apt-get update
- name: Ensure repository pinning to make sure nginx package gets installed from NGINX repositories is set up - name: set up repository pinning to make sure nginx package gets installed from NGINX repositories
ansible.builtin.copy: ansible.builtin.copy:
content: | content: |
Package: * Package: *
@ -37,3 +46,6 @@
group: root group: root
mode: "0644" mode: "0644"
become: true become: true
- name: Flush handlers to make sure "apt-get update" handler runs, if needed
ansible.builtin.meta: flush_handlers

View file

@ -0,0 +1,6 @@
- name: make sure nginx configuration names are valid
ansible.builtin.fail:
msg: "You used the following name: `{{ item.name }}`. Please make sure to not use the following names: `tls`, `redirect`."
when: item.name == "tls"
or item.name == "redirect"
loop: "{{ nginx__configurations }}"

View file

@ -1,37 +0,0 @@
# Role `postgresql`
Ensures `postgresql` is installed by installing the distributions package.
Also ensures the optionally given databases and users are set up as specified.
## Supported Distributions
Should work on Debian-based distributions.
## Required Arguments
None.
## Optional Arguments
- `postgresql__dbs`: List of databases with their owner to ensure are set up.
- `postgresql__dbs.*.name`: Name of the database.
- `postgresql__dbs.*.owner`: Owner of the database.
- `postgresql__users`: List of users to ensure are set up.
- `postgresql__users.*.name`: Name of the user.
- `postgresql__users.*.password`: Optional password for the user.
If left unset, the user will have no password set, but can still connect using [peer authentication](https://www.postgresql.org/docs/current/auth-peer.html) on the local system.
(Peer authentication works when a password is set as well.)
## Example Arguments
```yaml
postgresql__dbs:
- name: netbox
owner: netbox
- name: foo
owner: bar
postgresql__users:
- name: netbox
password: super_secret
- name: bar
```

View file

@ -1,2 +0,0 @@
postgresql__dbs: [ ]
postgresql__users: [ ]

View file

@ -1,28 +0,0 @@
argument_specs:
main:
options:
postgresql__dbs:
type: list
elements: dict
required: false
default: [ ]
options:
name:
type: str
required: true
owner:
type: str
required: true
postgresql__users:
type: list
elements: dict
required: false
default: [ ]
options:
name:
type: str
required: true
password:
type: str
required: false
default: ""

View file

@ -1,30 +0,0 @@
- name: Ensure postgresql is installed
ansible.builtin.apt:
name:
- postgresql
become: true
- name: Ensure Python library for community.postgresql is installed if needed
ansible.builtin.apt:
name:
- python3-psycopg
become: true
when: postgresql__dbs != [ ] or postgresql__users != [ ]
- name: Ensure users
community.postgresql.postgresql_user:
name: "{{ item.name }}"
password: "{{ item.password | default('') }}"
become: true
become_user: postgres
loop: "{{ postgresql__users }}"
loop_control:
label: "user {{ item.name }} with {{ 'a password' if item.password is defined else 'no password' }}"
- name: Ensure dbs with owners
community.postgresql.postgresql_db:
name: "{{ item.name }}"
owner: "{{ item.owner }}"
become: true
become_user: postgres
loop: "{{ postgresql__dbs }}"

View file

@ -1,15 +0,0 @@
# Role `redis`
Ensures `redis` is installed by installing the distributions package.
## Supported Distributions
Should work on Debian-based distributions.
## Required Arguments
None.
## Optional Arguments
None.

View file

@ -1,5 +0,0 @@
- name: Ensure redis is installed
ansible.builtin.apt:
name:
- redis
become: true