Compare commits

..

2 commits

Author SHA1 Message Date
53b2f491f8 fix: Uses root password
Some checks failed
/ Ansible Lint (pull_request) Failing after 2m10s
2025-11-01 21:53:43 +01:00
0f8c0ffef9 fix: Replaces password in healthcheck with dynamic secret
Some checks failed
/ Ansible Lint (pull_request) Failing after 2m7s
2025-11-01 21:50:01 +01:00
61 changed files with 183 additions and 425 deletions

View file

@ -10,7 +10,7 @@ jobs:
name: Ansible Lint
runs-on: docker
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- name: Install pip
run: |
apt update
@ -24,7 +24,7 @@ jobs:
# work in our environmnet.
# Rather manually setup python (pip) before instead.
- name: Run ansible-lint
uses: https://github.com/ansible/ansible-lint@v25.11.0
uses: https://github.com/ansible/ansible-lint@d7cd7cfa2469536527aceaef9ef2ec6f2fb331cb # v25.9.2
with:
setup_python: "false"
requirements_file: "requirements.yml"

View file

@ -1,11 +1,11 @@
# renovate: datasource=docker depName=git.hamburg.ccc.de/ccchh/oci-images/nextcloud
nextcloud__version: 32
# renovate: datasource=docker depName=docker.io/library/postgres
nextcloud__postgres_version: 15.15
nextcloud__postgres_version: 15.14
nextcloud__fqdn: cloud.hamburg.ccc.de
nextcloud__data_dir: /data/nextcloud
nextcloud__extra_configuration: "{{ lookup('ansible.builtin.template', 'resources/chaosknoten/cloud/nextcloud/extra_configuration.config.php.j2') }}"
nextcloud__use_custom_new_user_skeleton: true
nextcloud__custom_new_user_skeleton_directory: "resources/chaosknoten/cloud/nextcloud/new_user_skeleton_directory/"
nextcloud__proxy_protocol_reverse_proxy_ip: "2a00:14b0:4200:3000:125::1"
nextcloud__proxy_protocol_reverse_proxy_ip: 172.31.17.140
nextcloud__certbot_acme_account_email_address: le-admin@hamburg.ccc.de

View file

@ -53,6 +53,7 @@ nginx__configurations:
- name: metrics.hamburg.ccc.de
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/nginx/metrics.hamburg.ccc.de.conf') }}"
alloy_config: |
prometheus.remote_write "default" {
endpoint {

View file

@ -1,5 +1,5 @@
# renovate: datasource=github-releases depName=netbox packageName=netbox-community/netbox
netbox__version: "v4.4.6"
netbox__version: "v4.4.5"
netbox__config: "{{ lookup('ansible.builtin.template', 'resources/chaosknoten/netbox/netbox/configuration.py.j2') }}"
netbox__custom_pipeline_oidc_group_and_role_mapping: true

View file

@ -1,2 +0,0 @@
systemd_networkd__config_dir: 'resources/chaosknoten/router/systemd_networkd/'
nftables__config: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/router/nftables/nftables.conf') }}"

View file

@ -7,13 +7,13 @@ all:
chaosknoten:
ansible_host: chaosknoten.hamburg.ccc.de
cloud:
ansible_host: cloud.hosts.hamburg.ccc.de
ansible_host: cloud-intern.hamburg.ccc.de
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@router.hamburg.ccc.de
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
eh22-wiki:
ansible_host: eh22-wiki.hosts.hamburg.ccc.de
ansible_host: eh22-wiki-intern.hamburg.ccc.de
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@router.hamburg.ccc.de
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
grafana:
ansible_host: grafana-intern.hamburg.ccc.de
ansible_user: chaos
@ -23,9 +23,9 @@ all:
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
keycloak:
ansible_host: keycloak.hosts.hamburg.ccc.de
ansible_host: keycloak-intern.hamburg.ccc.de
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@router.hamburg.ccc.de
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
lists:
ansible_host: lists.hamburg.ccc.de
ansible_user: chaos
@ -37,13 +37,13 @@ all:
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
onlyoffice:
ansible_host: onlyoffice.hosts.hamburg.ccc.de
ansible_host: onlyoffice-intern.hamburg.ccc.de
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@router.hamburg.ccc.de
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
pad:
ansible_host: pad.hosts.hamburg.ccc.de
ansible_host: pad-intern.hamburg.ccc.de
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@router.hamburg.ccc.de
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
pretalx:
ansible_host: pretalx-intern.hamburg.ccc.de
ansible_user: chaos
@ -51,13 +51,10 @@ all:
public-reverse-proxy:
ansible_host: public-reverse-proxy.hamburg.ccc.de
ansible_user: chaos
router:
ansible_host: router.hamburg.ccc.de
ansible_user: chaos
wiki:
ansible_host: wiki.hosts.hamburg.ccc.de
ansible_host: wiki-intern.hamburg.ccc.de
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@router.hamburg.ccc.de
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
zammad:
ansible_host: zammad-intern.hamburg.ccc.de
ansible_user: chaos
@ -91,19 +88,12 @@ base_config_hosts:
pad:
pretalx:
public-reverse-proxy:
router:
tickets:
wiki:
zammad:
ntfy:
sunders:
renovate:
systemd_networkd_hosts:
hosts:
router:
nftables_hosts:
hosts:
router:
docker_compose_hosts:
hosts:
ccchoir:
@ -183,7 +173,6 @@ infrastructure_authorized_keys_hosts:
pad:
pretalx:
public-reverse-proxy:
router:
wiki:
zammad:
ntfy:

View file

@ -6,3 +6,4 @@ docker_compose__configuration_files:
content: "{{ lookup('ansible.builtin.template', 'resources/z9/yate/docker_compose/regexroute.conf.j2') }}"
- name: regfile.conf
content: "{{ lookup('ansible.builtin.template', 'resources/z9/yate/docker_compose/regfile.conf.j2') }}"
docker_compose__restart_cmd: "exec yate sh -c 'kill -1 1'"

View file

@ -4,7 +4,7 @@ all:
ansible_host: authoritative-dns.z9.ccchh.net
ansible_user: chaos
dooris:
ansible_host: dooris.z9.ccchh.net
ansible_host: 10.31.208.201
ansible_user: chaos
light:
ansible_host: light.z9.ccchh.net

View file

@ -4,16 +4,6 @@
roles:
- base_config
- name: Ensure systemd-networkd config deployment on systemd_networkd_hosts
hosts: systemd_networkd_hosts
roles:
- systemd_networkd
- name: Ensure nftables deployment on nftables_hosts
hosts: nftables_hosts
roles:
- nftables
- name: Ensure deployment of infrastructure authorized keys
hosts: infrastructure_authorized_keys_hosts
roles:

View file

@ -1,17 +1,13 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended",
// Parts from config:best-practices:
// https://docs.renovatebot.com/presets-config/#configbest-practices
":configMigration",
"abandonments:recommended",
"security:minimumReleaseAgeNpm",
"config:recommended", // Included in config:best-practices anyway, but added for clarity.
"config:best-practices",
":ignoreUnstable",
":disableRateLimiting",
":rebaseStalePrs",
":label(renovate)"
":label(renovate)",
"group:allDigest"
],
"semanticCommits": "disabled",
"packageRules": [
@ -32,6 +28,12 @@
"matchDatasources": ["docker"],
"matchPackageNames": ["docker.io/pretix/standalone"],
"versioning": "regex:^(?<major>\\d+\\.\\d+)(?:\\.(?<minor>\\d+))$"
},
// Since Forgejo seems to clean up older tag versions, so older digests, disable digest pinning for our images.
{
"matchDatasources": ["docker"],
"matchPackageNames": ["git.hamburg.ccc.de/*"],
"pinDigests": false
}
],
"customManagers": [

View file

@ -6,6 +6,3 @@ collections:
- name: community.sops
version: ">=2.2.4"
source: https://galaxy.ansible.com
- name: community.docker
version: ">=5.0.0"
source: https://galaxy.ansible.com

View file

@ -3,7 +3,7 @@
services:
database:
image: docker.io/library/mariadb:11
image: docker.io/library/mariadb:11@sha256:ae6119716edac6998ae85508431b3d2e666530ddf4e94c61a10710caec9b0f71
environment:
- "MARIADB_DATABASE=wordpress"
- "MARIADB_ROOT_PASSWORD={{ secret__mariadb_root_password }}"
@ -17,7 +17,7 @@ services:
restart: unless-stopped
app:
image: docker.io/library/wordpress:6-php8.1
image: docker.io/library/wordpress:6-php8.1@sha256:75f79f9c45a587b283e47fd21c6e51077d0c9dbbba529377faaa0c28d5b8f5a4
environment:
- "WORDPRESS_DB_HOST=database"
- "WORDPRESS_DB_NAME=wordpress"

View file

@ -3,12 +3,11 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 2a00:14b0:4200:3000:125::1;
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -2,13 +2,12 @@
services:
prometheus:
image: docker.io/prom/prometheus:v3.7.3
image: docker.io/prom/prometheus:v3.7.2@sha256:23031bfe0e74a13004252caaa74eccd0d62b6c6e7a04711d5b8bf5b7e113adc7
container_name: prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--web.enable-remote-write-receiver'
- '--enable-feature=promql-experimental-functions'
- '--storage.tsdb.retention.time=28d'
ports:
- 9090:9090
restart: unless-stopped
@ -19,7 +18,7 @@ services:
- prom_data:/prometheus
alertmanager:
image: docker.io/prom/alertmanager:v0.29.0
image: docker.io/prom/alertmanager:v0.28.1@sha256:27c475db5fb156cab31d5c18a4251ac7ed567746a2483ff264516437a39b15ba
container_name: alertmanager
command:
- '--config.file=/etc/alertmanager/alertmanager.yaml'
@ -32,7 +31,7 @@ services:
- alertmanager_data:/alertmanager
grafana:
image: docker.io/grafana/grafana:12.3.0
image: docker.io/grafana/grafana:12.2.1@sha256:35c41e0fd0295f5d0ee5db7e780cf33506abfaf47686196f825364889dee878b
container_name: grafana
ports:
- 3000:3000
@ -46,7 +45,7 @@ services:
- graf_data:/var/lib/grafana
pve-exporter:
image: docker.io/prompve/prometheus-pve-exporter:3.5.5
image: docker.io/prompve/prometheus-pve-exporter:3.5.5@sha256:79a5598906697b1a5a006d09f0200528a77c6ff1568faf018539ac65824454df
container_name: pve-exporter
ports:
- 9221:9221
@ -59,7 +58,7 @@ services:
- /dev/null:/etc/prometheus/pve.yml
loki:
image: docker.io/grafana/loki:3.6.0
image: docker.io/grafana/loki:3.5.7@sha256:0eaee7bf39cc83aaef46914fb58f287d4f4c4be6ec96b86c2ed55719a75e49c8
container_name: loki
ports:
- 13100:3100
@ -70,7 +69,7 @@ services:
- loki_data:/var/loki
ntfy-alertmanager-ccchh-critical:
image: docker.io/xenrox/ntfy-alertmanager:0.5.0
image: docker.io/xenrox/ntfy-alertmanager:0.5.0@sha256:5fea88db3bf0257d98c007ab0c4ef064c6d67d7b7ceead7d6956dfa0a5cb333b
container_name: ntfy-alertmanager-ccchh-critical
volumes:
- ./configs/ntfy-alertmanager-ccchh-critical:/etc/ntfy-alertmanager/config
@ -79,7 +78,7 @@ services:
restart: unless-stopped
ntfy-alertmanager-fux-critical:
image: docker.io/xenrox/ntfy-alertmanager:0.5.0
image: docker.io/xenrox/ntfy-alertmanager:0.5.0@sha256:5fea88db3bf0257d98c007ab0c4ef064c6d67d7b7ceead7d6956dfa0a5cb333b
container_name: ntfy-alertmanager-fux-critical
volumes:
- ./configs/ntfy-alertmanager-fux-critical:/etc/ntfy-alertmanager/config
@ -88,7 +87,7 @@ services:
restart: unless-stopped
ntfy-alertmanager-ccchh:
image: docker.io/xenrox/ntfy-alertmanager:0.5.0
image: docker.io/xenrox/ntfy-alertmanager:0.5.0@sha256:5fea88db3bf0257d98c007ab0c4ef064c6d67d7b7ceead7d6956dfa0a5cb333b
container_name: ntfy-alertmanager-ccchh
volumes:
- ./configs/ntfy-alertmanager-ccchh:/etc/ntfy-alertmanager/config
@ -97,7 +96,7 @@ services:
restart: unless-stopped
ntfy-alertmanager-fux:
image: docker.io/xenrox/ntfy-alertmanager:0.5.0
image: docker.io/xenrox/ntfy-alertmanager:0.5.0@sha256:5fea88db3bf0257d98c007ab0c4ef064c6d67d7b7ceead7d6956dfa0a5cb333b
container_name: ntfy-alertmanager-fux
volumes:
- ./configs/ntfy-alertmanager-fux:/etc/ntfy-alertmanager/config

View file

@ -46,7 +46,7 @@ services:
- "8080:8080"
db:
image: docker.io/library/postgres:15.15
image: docker.io/library/postgres:15.14@sha256:424e79b81868f5fc5cf515eaeac69d288692ebcca7db86d98f91b50d4bce64bb
restart: unless-stopped
networks:
- keycloak

View file

@ -4,12 +4,11 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 2a00:14b0:4200:3000:125::1;
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -4,12 +4,11 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 2a00:14b0:4200:3000:125::1;
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -7,13 +7,12 @@ server {
##listen [::]:443 ssl http2;
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
listen 8444 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 2a00:14b0:4200:3000:125::1;
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -1,7 +1,7 @@
services:
mailman-core:
restart: unless-stopped
image: docker.io/maxking/mailman-core:0.5 # Use a specific version tag (tag latest is not published)
image: docker.io/maxking/mailman-core:0.5@sha256:cb8e412bb18d74480f996da68f46e92473b6103995e71bc5aeba139b255cc3d2 # Use a specific version tag (tag latest is not published)
container_name: mailman-core
hostname: mailman-core
volumes:
@ -25,7 +25,7 @@ services:
mailman-web:
restart: unless-stopped
image: docker.io/maxking/mailman-web:0.5 # Use a specific version tag (tag latest is not published)
image: docker.io/maxking/mailman-web:0.5@sha256:014726db85586fb53541f66f6ce964bf07e939791cfd5ffc796cd6d243696a18 # Use a specific version tag (tag latest is not published)
container_name: mailman-web
hostname: mailman-web
depends_on:
@ -56,7 +56,7 @@ services:
- POSTGRES_DB=mailmandb
- POSTGRES_USER=mailman
- POSTGRES_PASSWORD=wvQjbMRnwFuxGEPz
image: docker.io/library/postgres:12-alpine
image: docker.io/library/postgres:12-alpine@sha256:7c8f4870583184ebadf7f17a6513620aac5f365a7938dc6a6911c1d5df2f481a
volumes:
- /opt/mailman/database:/var/lib/postgresql/data
networks:

View file

@ -1,7 +1,7 @@
---
services:
ntfy:
image: docker.io/binwiederhier/ntfy:v2.15.0
image: docker.io/binwiederhier/ntfy:v2.14.0@sha256:5a051798d14138c3ecb12c038652558ab6a077e1aceeb867c151cbf5fa8451ef
container_name: ntfy
command:
- serve

View file

@ -4,7 +4,7 @@
services:
onlyoffice:
image: docker.io/onlyoffice/documentserver:9.1.0
image: docker.io/onlyoffice/documentserver:9.1.0@sha256:34b92f4a67bfd939bd6b75893e8217556e3b977f81e49472f7e28737b741ba1d
restart: unless-stopped
volumes:
- "./onlyoffice/DocumentServer/logs:/var/log/onlyoffice"

View file

@ -3,13 +3,11 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 2a00:14b0:4200:3000:125::1;
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -3,7 +3,7 @@
services:
database:
image: docker.io/library/postgres:15-alpine
image: docker.io/library/postgres:15-alpine@sha256:64583b3cb4f2010277bdd9749456de78e5c36f8956466ba14b0b96922e510950
environment:
- "POSTGRES_USER=hedgedoc"
- "POSTGRES_PASSWORD={{ secret__hedgedoc_db_password }}"
@ -13,7 +13,7 @@ services:
restart: unless-stopped
app:
image: quay.io/hedgedoc/hedgedoc:1.10.3
image: quay.io/hedgedoc/hedgedoc:1.10.3@sha256:ca58fd73ecf05c89559b384fb7a1519c18c8cbba5c21a0018674ed820b9bdb73
environment:
- "CMD_DB_URL=postgres://hedgedoc:{{ secret__hedgedoc_db_password }}@database:5432/hedgedoc"
- "CMD_DOMAIN=pad.hamburg.ccc.de"

View file

@ -3,12 +3,11 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 2a00:14b0:4200:3000:125::1;
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -3,7 +3,7 @@
services:
database:
image: docker.io/library/postgres:15-alpine
image: docker.io/library/postgres:15-alpine@sha256:64583b3cb4f2010277bdd9749456de78e5c36f8956466ba14b0b96922e510950
environment:
- "POSTGRES_USER=pretalx"
- "POSTGRES_PASSWORD={{ secret__pretalx_db_password }}"
@ -15,7 +15,7 @@ services:
- pretalx_net
redis:
image: docker.io/library/redis:8.4.0
image: docker.io/library/redis:8.2.2@sha256:4521b581dbddea6e7d81f8fe95ede93f5648aaa66a9dacd581611bf6fe7527bd
restart: unless-stopped
volumes:
- redis:/data
@ -23,7 +23,7 @@ services:
- pretalx_net
static:
image: docker.io/library/nginx:1.29.3
image: docker.io/library/nginx:1.29.3@sha256:f547e3d0d5d02f7009737b284abc87d808e4252b42dceea361811e9fc606287f
restart: unless-stopped
volumes:
- public:/usr/share/nginx/html
@ -33,7 +33,7 @@ services:
- pretalx_net
pretalx:
image: docker.io/pretalx/standalone:v2025.1.0
image: docker.io/pretalx/standalone:v2025.1.0@sha256:fb2d15f11bcae8bb15430084ed81a150cfdf7c79705450583b51e352ba486e8e
entrypoint: gunicorn
command:
- "pretalx.wsgi"
@ -78,7 +78,7 @@ services:
- pretalx_net
celery:
image: docker.io/pretalx/standalone:v2025.1.0
image: docker.io/pretalx/standalone:v2025.1.0@sha256:fb2d15f11bcae8bb15430084ed81a150cfdf7c79705450583b51e352ba486e8e
command:
- taskworker
restart: unless-stopped

View file

@ -6,27 +6,27 @@ map $host $upstream_acme_challenge_host {
staging.c3cat.de 172.31.17.151:31820;
ccchoir.de ccchoir-intern.hamburg.ccc.de:31820;
www.ccchoir.de ccchoir-intern.hamburg.ccc.de:31820;
cloud.hamburg.ccc.de cloud.hosts.hamburg.ccc.de:31820;
cloud.hamburg.ccc.de 172.31.17.143:31820;
element.hamburg.ccc.de 172.31.17.151:31820;
git.hamburg.ccc.de 172.31.17.154:31820;
grafana.hamburg.ccc.de 172.31.17.145:31820;
hackertours.hamburg.ccc.de 172.31.17.151:31820;
staging.hackertours.hamburg.ccc.de 172.31.17.151:31820;
hamburg.ccc.de 172.31.17.151:31820;
id.hamburg.ccc.de keycloak.hosts.hamburg.ccc.de:31820;
invite.hamburg.ccc.de keycloak.hosts.hamburg.ccc.de:31820;
keycloak-admin.hamburg.ccc.de keycloak.hosts.hamburg.ccc.de:31820;
id.hamburg.ccc.de 172.31.17.144:31820;
invite.hamburg.ccc.de 172.31.17.144:31820;
keycloak-admin.hamburg.ccc.de 172.31.17.144:31820;
matrix.hamburg.ccc.de 172.31.17.150:31820;
mas.hamburg.ccc.de 172.31.17.150:31820;
element-admin.hamburg.ccc.de 172.31.17.151:31820;
netbox.hamburg.ccc.de 172.31.17.167:31820;
onlyoffice.hamburg.ccc.de onlyoffice.hosts.hamburg.ccc.de:31820;
pad.hamburg.ccc.de pad.hosts.hamburg.ccc.de:31820;
onlyoffice.hamburg.ccc.de 172.31.17.147:31820;
pad.hamburg.ccc.de 172.31.17.141:31820;
pretalx.hamburg.ccc.de 172.31.17.157:31820;
spaceapi.hamburg.ccc.de 172.31.17.151:31820;
staging.hamburg.ccc.de 172.31.17.151:31820;
wiki.ccchh.net wiki.hosts.hamburg.ccc.de:31820;
wiki.hamburg.ccc.de wiki.hosts.hamburg.ccc.de:31820;
wiki.ccchh.net 172.31.17.146:31820;
wiki.hamburg.ccc.de 172.31.17.146:31820;
www.hamburg.ccc.de 172.31.17.151:31820;
tickets.hamburg.ccc.de 172.31.17.148:31820;
sunders.hamburg.ccc.de 172.31.17.170:31820;
@ -38,7 +38,7 @@ map $host $upstream_acme_challenge_host {
eh11.easterhegg.eu 172.31.17.151:31820;
eh20.easterhegg.eu 172.31.17.151:31820;
www.eh20.easterhegg.eu 172.31.17.151:31820;
eh22.easterhegg.eu eh22-wiki.hosts.hamburg.ccc.de:31820;
eh22.easterhegg.eu 172.31.17.165:31820;
easterheggxxxx.hamburg.ccc.de 172.31.17.151:31820;
eh2003.hamburg.ccc.de 172.31.17.151:31820;
www.eh2003.hamburg.ccc.de 172.31.17.151:31820;

View file

@ -20,16 +20,16 @@ stream {
map $ssl_preread_server_name $address {
ccchoir.de ccchoir-intern.hamburg.ccc.de:8443;
www.ccchoir.de ccchoir-intern.hamburg.ccc.de:8443;
cloud.hamburg.ccc.de cloud.hosts.hamburg.ccc.de:8443;
pad.hamburg.ccc.de pad.hosts.hamburg.ccc.de:8443;
cloud.hamburg.ccc.de cloud-intern.hamburg.ccc.de:8443;
pad.hamburg.ccc.de pad-intern.hamburg.ccc.de:8443;
pretalx.hamburg.ccc.de pretalx-intern.hamburg.ccc.de:8443;
id.hamburg.ccc.de keycloak.hosts.hamburg.ccc.de:8443;
invite.hamburg.ccc.de keycloak.hosts.hamburg.ccc.de:8443;
keycloak-admin.hamburg.ccc.de keycloak.hosts.hamburg.ccc.de:8443;
id.hamburg.ccc.de 172.31.17.144:8443;
invite.hamburg.ccc.de 172.31.17.144:8443;
keycloak-admin.hamburg.ccc.de 172.31.17.144:8444;
grafana.hamburg.ccc.de 172.31.17.145:8443;
wiki.ccchh.net wiki.hosts.hamburg.ccc.de:8443;
wiki.hamburg.ccc.de wiki.hosts.hamburg.ccc.de:8443;
onlyoffice.hamburg.ccc.de onlyoffice.hosts.hamburg.ccc.de:8443;
wiki.ccchh.net 172.31.17.146:8443;
wiki.hamburg.ccc.de 172.31.17.146:8443;
onlyoffice.hamburg.ccc.de 172.31.17.147:8443;
hackertours.hamburg.ccc.de 172.31.17.151:8443;
staging.hackertours.hamburg.ccc.de 172.31.17.151:8443;
netbox.hamburg.ccc.de 172.31.17.167:8443;
@ -56,7 +56,7 @@ stream {
eh11.easterhegg.eu 172.31.17.151:8443;
eh20.easterhegg.eu 172.31.17.151:8443;
www.eh20.easterhegg.eu 172.31.17.151:8443;
eh22.easterhegg.eu eh22-wiki.hosts.hamburg.ccc.de:8443;
eh22.easterhegg.eu 172.31.17.165:8443;
easterheggxxxx.hamburg.ccc.de 172.31.17.151:8443;
eh2003.hamburg.ccc.de 172.31.17.151:8443;
www.eh2003.hamburg.ccc.de 172.31.17.151:8443;

View file

@ -1,79 +0,0 @@
#!/usr/sbin/nft -f
## Variables
# Interfaces
define if_net1_v4_wan = "net1"
define if_net2_v6_wan = "net2"
define if_net0_2_v4_nat = "net0.2"
define if_net0_3_ci_runner = "net0.3"
# Interface Groups
define wan_ifs = { $if_net1_v4_wan,
$if_net2_v6_wan }
define lan_ifs = { $if_net0_2_v4_nat,
$if_net0_3_ci_runner }
# define v4_exposed_ifs = { }
define v6_exposed_ifs = { $if_net0_2_v4_nat }
## Rules
table inet reverse-path-forwarding {
chain rpf-filter {
type filter hook prerouting priority mangle + 10; policy drop;
# Only allow packets if their source address is routed via their incoming interface.
# https://github.com/NixOS/nixpkgs/blob/d9d87c51960050e89c79e4025082ed965e770d68/nixos/modules/services/networking/firewall-nftables.nix#L100
fib saddr . mark . iif oif exists accept
}
}
table inet host {
chain input {
type filter hook input priority filter; policy drop;
iifname "lo" accept comment "allow loopback"
ct state invalid drop
ct state established,related accept
ip protocol icmp accept
ip6 nexthdr icmpv6 accept
# Allow SSH access.
tcp dport 22 accept comment "allow ssh access"
# Allow DHCP server access.
iifname $if_net0_3_ci_runner udp dport 67 accept comment "allow dhcp server access"
}
}
table ip v4nat {
chain prerouting {
type nat hook prerouting priority dstnat; policy accept;
}
chain postrouting {
type nat hook postrouting priority srcnat; policy accept;
oifname $if_net1_v4_wan masquerade
}
}
table inet forward {
chain forward {
type filter hook forward priority filter; policy drop;
ct state invalid drop
ct state established,related accept
# Allow internet access.
meta nfproto ipv6 iifname $lan_ifs oifname $if_net2_v6_wan accept comment "allow v6 internet access"
meta nfproto ipv4 iifname $lan_ifs oifname $if_net1_v4_wan accept comment "allow v4 internet access"
# Allow access to exposed networks from internet.
# meta nfproto ipv4 oifname $v4_exposed_ifs accept comment "allow v4 exposed network access"
meta nfproto ipv6 oifname $v6_exposed_ifs accept comment "allow v6 exposed network access"
}
}

View file

@ -1,6 +0,0 @@
[Match]
MACAddress=BC:24:11:54:11:15
Type=ether
[Link]
Name=net0

View file

@ -1,6 +0,0 @@
[Match]
MACAddress=BC:24:11:9A:FB:34
Type=ether
[Link]
Name=net1

View file

@ -1,6 +0,0 @@
[Match]
MACAddress=BC:24:11:AE:C7:04
Type=ether
[Link]
Name=net2

View file

@ -1,7 +0,0 @@
[NetDev]
Name=net0.2
Kind=vlan
[VLAN]
Id=2

View file

@ -1,7 +0,0 @@
[NetDev]
Name=net0.3
Kind=vlan
[VLAN]
Id=3

View file

@ -1,12 +0,0 @@
[Match]
Name=net0
[Link]
RequiredForOnline=no
[Network]
VLAN=net0.2
VLAN=net0.3
LinkLocalAddressing=no

View file

@ -1,14 +0,0 @@
[Match]
Name=net1
[Network]
DNS=212.12.50.158
IPForward=ipv4
IPv6AcceptRA=no
[Address]
Address=212.12.48.123/24
[Route]
Gateway=212.12.48.55

View file

@ -1,14 +0,0 @@
[Match]
Name=net2
[Network]
#DNS=212.12.50.158
IPForward=ipv6
IPv6AcceptRA=no
[Address]
Address=2a00:14b0:4200:3500::130:2/112
[Route]
Gateway=2a00:14b0:4200:3500::130:1

View file

@ -1,23 +0,0 @@
[Match]
Name=net0.2
Type=vlan
[Link]
RequiredForOnline=no
[Network]
Description=v4-NAT
# Masquerading done in nftables (nftables.conf).
IPv6SendRA=yes
[Address]
Address=10.32.2.1/24
[IPv6SendRA]
UplinkInterface=net2
[IPv6Prefix]
Prefix=2a00:14b0:42:102::/64
Assign=true
Token=static:::1

View file

@ -1,29 +0,0 @@
[Match]
Name=net0.3
Type=vlan
[Link]
RequiredForOnline=no
[Network]
Description=ci-runners
# Masquerading done in nftables (nftables.conf).
IPv6SendRA=yes
DHCPServer=true
[DHCPServer]
PoolOffset=100
PoolSize=150
[Address]
Address=10.32.3.1/24
[IPv6SendRA]
UplinkInterface=net2
[IPv6Prefix]
Prefix=2a00:14b0:42:103::/64
Assign=true
Token=static:::1

View file

@ -1,7 +1,7 @@
---
services:
database:
image: docker.io/library/postgres:15-alpine
image: docker.io/library/postgres:15-alpine@sha256:64583b3cb4f2010277bdd9749456de78e5c36f8956466ba14b0b96922e510950
environment:
- "POSTGRES_USER=pretix"
- "POSTGRES_PASSWORD={{ secret__pretix_db_password }}"
@ -13,7 +13,7 @@ services:
restart: unless-stopped
redis:
image: docker.io/library/redis:7.4.7
image: docker.io/library/redis:7.4.6@sha256:a9cc41d6d01da2aa26c219e4f99ecbeead955a7b656c1c499cce8922311b2514
ports:
- "6379:6379"
volumes:
@ -25,7 +25,7 @@ services:
backend:
pretix:
image: docker.io/pretix/standalone:2024.8
image: docker.io/pretix/standalone:2024.8@sha256:110bac37efa5f736227f158f38e421ed738d03dccc274dfb415b258ab0f75cfe
command: ["all"]
ports:
- "8345:80"

View file

@ -38,7 +38,11 @@ server {
location = / {
#return 302 https://wiki.hamburg.ccc.de/infrastructure:service-overview#tickets_pretix;
return 302 https://tickets.hamburg.ccc.de/hackertours/39c3ht/;
return 302 https://tickets.hamburg.ccc.de/hackertours/eh22ht/;
}
location = /hackertours/eh22/ {
return 302 https://tickets.hamburg.ccc.de/hackertours/eh22ht/;
}
location / {

View file

@ -3,12 +3,11 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 2a00:14b0:4200:3000:125::1;
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
@ -22,6 +21,6 @@ server {
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
return 302 https://wiki.hamburg.ccc.de$request_uri;
}

View file

@ -3,12 +3,11 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 2a00:14b0:4200:3000:125::1;
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -1,7 +1,7 @@
services:
# https://github.com/richardg867/WaybackProxy
waybackproxy:
image: cttynul/waybackproxy:latest
image: cttynul/waybackproxy:latest@sha256:e001d5b1d746522cd1ab2728092173c0d96f08086cbd3e49cdf1e298b8add22e
environment:
DATE: 19990101
DATE_TOLERANCE: 730

View file

@ -17,15 +17,7 @@ HostKey /etc/ssh/ssh_host_ed25519_key
HostKey /etc/ssh/ssh_host_rsa_key
HostKey /etc/ssh/ssh_host_ecdsa_key
{% if ansible_facts["distribution"] == "Debian" and ansible_facts["distribution_major_version"] == "13" %}
KexAlgorithms sntrup761x25519-sha512,mlkem768x25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256
{% elif ansible_facts["distribution"] == "Debian" and ansible_facts["distribution_major_version"] == "12" %}
KexAlgorithms sntrup761x25519-sha512,curve25519-sha256@libssh.org,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256
{% else %}
KexAlgorithms curve25519-sha256@libssh.org,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256
{% endif %}
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr

View file

@ -7,18 +7,17 @@ A use case for the deployment of the additional configuration files is Composes
## Supported Distributions
Should work on Debian-based distributions.
The following distributions are supported:
- Debian 11
## Required Arguments
- `docker_compose__compose_file_content`: The content to deploy to the Compose file at `/ansible_docker_compose/compose.yaml`.
For the required arguments look at the [`argument_specs.yaml`](./meta/argument_specs.yaml).
## Optional Arguments
## `hosts`
- `docker_compose__env_file_content`: The content to deploy to the `.env` file at `/ansible_docker_compose/.env`.
- `docker_compose__configuration_files`: A list of configuration files to deploy to the `/ansible_docker_compose/configs/` directory.
- `docker_compose__configuration_files.*.name`: The name of the configuration file.
- `docker_compose__configuration_files.*.content`: The content to deploy to the configuration file.
The `hosts` for this role need to be the machines, for which you want to make sure the given Compose file is deployed and all services of it are up-to-date and running.
## Links & Resources

View file

@ -1 +1,2 @@
docker_compose__configuration_files: [ ]
docker_compose__restart_cmd: ""

View file

@ -1,11 +1,13 @@
- name: docker compose down
community.docker.docker_compose_v2:
project_src: /ansible_docker_compose
state: absent
ansible.builtin.command:
cmd: /usr/bin/docker compose down
chdir: /ansible_docker_compose
become: true
- name: docker compose restart
community.docker.docker_compose_v2:
project_src: /ansible_docker_compose
state: restarted
changed_when: true # This is always changed.
- name: docker compose reload script
ansible.builtin.command:
cmd: /usr/bin/docker compose {{ docker_compose__restart_cmd }}
chdir: /ansible_docker_compose
become: true
changed_when: true # Mark this as always changed (for now?).
when: docker_compose__restart_cmd != ""

View file

@ -2,20 +2,31 @@ argument_specs:
main:
options:
docker_compose__compose_file_content:
description: >-
The content of the Compose file at
`/ansible_docker_compose/compose.yaml`.
type: str
required: true
docker_compose__env_file_content:
description: >-
The content of the .env file at
`/ansible_docker_compose/.env`.
type: str
required: false
docker_compose__configuration_files:
description: >-
A list of configuration files to be deployed in the
`/ansible_docker_compose/configs/` directory.
type: list
elements: dict
required: false
default: [ ]
options:
name:
description: The name of the configuration file.
type: str
required: true
content:
description: The content of the configuration file.
type: str
required: true

View file

@ -1,3 +1,10 @@
---
dependencies:
- role: distribution_check
vars:
distribution_check__distribution_support_spec:
- name: Debian
major_versions:
- 11
- 12
- role: docker

View file

@ -59,7 +59,7 @@
state: absent
become: true
loop: "{{ docker_compose__config_files_to_remove.files }}"
notify: docker compose restart
# notify: docker compose down
- name: make sure all given configuration files are deployed
ansible.builtin.copy:
@ -70,19 +70,45 @@
group: root
become: true
loop: "{{ docker_compose__configuration_files }}"
notify: docker compose restart
# notify: docker compose down
notify: docker compose reload script
- name: Flush handlers to make "docker compose down" and "docker compose restart" handlers run now
- name: Flush handlers to make "docker compose down" handler run now
ansible.builtin.meta: flush_handlers
- name: docker compose up
community.docker.docker_compose_v2:
project_src: /ansible_docker_compose
state: present
build: always
pull: always
remove_orphans: true
- name: docker compose ps --format json before docker compose up
ansible.builtin.command:
cmd: /usr/bin/docker compose ps --format json
chdir: /ansible_docker_compose
become: true
changed_when: false
register: docker_compose__ps_json_before_up
- name: docker compose up --detach --pull always --build
ansible.builtin.command:
cmd: /usr/bin/docker compose up --detach --pull always --build --remove-orphans
chdir: /ansible_docker_compose
become: true
changed_when: false
# The changed for this task is tried to be determined by the "potentially
# report changed" task together with the "docker compose ps --format json
# [...]" tasks.
- name: docker compose ps --format json after docker compose up
ansible.builtin.command:
cmd: /usr/bin/docker compose ps --format json
chdir: /ansible_docker_compose
become: true
changed_when: false
register: docker_compose__ps_json_after_up
# Doesn't work anymore. Dunno why.
# TODO: Fix
# - name: potentially report changed
# ansible.builtin.debug:
# msg: "If this reports changed, then the docker compose containers changed."
# changed_when: (docker_compose__ps_json_before_up.stdout | from_json | community.general.json_query('[].ID') | sort)
# != (docker_compose__ps_json_after_up.stdout | from_json | community.general.json_query('[].ID') | sort)
- name: Make sure anacron is installed
become: true

View file

@ -0,0 +1,8 @@
---
dependencies:
- role: distribution_check
vars:
distribution_check__distribution_support_spec:
- name: Debian
major_versions:
- "11"

View file

@ -7,7 +7,11 @@
- python3
- python3-pip
- python3-setuptools
- python3-poetry
- name: Ensure python peotry is installed
become: true
ansible.builtin.pip:
name: poetry
- name: Ensure foobazdmx user exists
become: true

View file

@ -4,7 +4,6 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.

View file

@ -1,11 +0,0 @@
# Role `nftables`
Deploys nftables.
## Support Distributions
Should work on Debian-based distributions.
## Required Arguments
- `nftables__config`: nftables configuration to deploy.

View file

@ -1,5 +0,0 @@
- name: Restart nftables service
ansible.builtin.systemd_service:
name: nftables
state: restarted
become: true

View file

@ -1,6 +0,0 @@
argument_specs:
main:
options:
nftables__config:
type: str
required: true

View file

@ -1,15 +0,0 @@
- name: ensure nftables is installed
ansible.builtin.apt:
name: nftables
state: present
become: true
- name: deploy nftables configuration
ansible.builtin.copy:
content: "{{ nftables__config }}"
dest: "/etc/nftables.conf"
mode: "0644"
owner: root
group: root
become: true
notify: Restart nftables service

8
roles/ola/meta/main.yaml Normal file
View file

@ -0,0 +1,8 @@
---
dependencies:
- role: distribution_check
vars:
distribution_check__distribution_support_spec:
- name: Debian
major_versions:
- "11"

View file

@ -1,11 +0,0 @@
# Role `systemd_networkd`
Deploys the given systemd-networkd configuration files.
## Support Distributions
Should work on Debian-based distributions.
## Required Arguments
- `systemd_networkd__config_dir`: Directory with systemd-networkd configs to deploy.

View file

@ -1,6 +0,0 @@
argument_specs:
main:
options:
systemd_networkd__config_dir:
type: path
required: true

View file

@ -1,14 +0,0 @@
- name: ensure rsync is installed
ansible.builtin.apt:
name: rsync
state: present
become: true
- name: synchronize systemd-networkd configs
ansible.posix.synchronize:
src: "{{ systemd_networkd__config_dir }}"
dest: "/etc/systemd/network"
archive: false
recursive: true
delete: true
become: true