Compare commits

..

2 commits

Author SHA1 Message Date
53b2f491f8 fix: Uses root password
Some checks failed
/ Ansible Lint (pull_request) Failing after 2m10s
2025-11-01 21:53:43 +01:00
0f8c0ffef9 fix: Replaces password in healthcheck with dynamic secret
Some checks failed
/ Ansible Lint (pull_request) Failing after 2m7s
2025-11-01 21:50:01 +01:00
26 changed files with 139 additions and 78 deletions

View file

@ -10,7 +10,7 @@ jobs:
name: Ansible Lint
runs-on: docker
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- name: Install pip
run: |
apt update
@ -24,7 +24,7 @@ jobs:
# work in our environmnet.
# Rather manually setup python (pip) before instead.
- name: Run ansible-lint
uses: https://github.com/ansible/ansible-lint@v25.9.2
uses: https://github.com/ansible/ansible-lint@d7cd7cfa2469536527aceaef9ef2ec6f2fb331cb # v25.9.2
with:
setup_python: "false"
requirements_file: "requirements.yml"

View file

@ -6,3 +6,4 @@ docker_compose__configuration_files:
content: "{{ lookup('ansible.builtin.template', 'resources/z9/yate/docker_compose/regexroute.conf.j2') }}"
- name: regfile.conf
content: "{{ lookup('ansible.builtin.template', 'resources/z9/yate/docker_compose/regfile.conf.j2') }}"
docker_compose__restart_cmd: "exec yate sh -c 'kill -1 1'"

View file

@ -4,7 +4,7 @@ all:
ansible_host: authoritative-dns.z9.ccchh.net
ansible_user: chaos
dooris:
ansible_host: dooris.z9.ccchh.net
ansible_host: 10.31.208.201
ansible_user: chaos
light:
ansible_host: light.z9.ccchh.net

View file

@ -1,17 +1,13 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended",
// Parts from config:best-practices:
// https://docs.renovatebot.com/presets-config/#configbest-practices
":configMigration",
"abandonments:recommended",
"security:minimumReleaseAgeNpm",
"config:recommended", // Included in config:best-practices anyway, but added for clarity.
"config:best-practices",
":ignoreUnstable",
":disableRateLimiting",
":rebaseStalePrs",
":label(renovate)"
":label(renovate)",
"group:allDigest"
],
"semanticCommits": "disabled",
"packageRules": [
@ -32,6 +28,12 @@
"matchDatasources": ["docker"],
"matchPackageNames": ["docker.io/pretix/standalone"],
"versioning": "regex:^(?<major>\\d+\\.\\d+)(?:\\.(?<minor>\\d+))$"
},
// Since Forgejo seems to clean up older tag versions, so older digests, disable digest pinning for our images.
{
"matchDatasources": ["docker"],
"matchPackageNames": ["git.hamburg.ccc.de/*"],
"pinDigests": false
}
],
"customManagers": [

View file

@ -6,6 +6,3 @@ collections:
- name: community.sops
version: ">=2.2.4"
source: https://galaxy.ansible.com
- name: community.docker
version: ">=5.0.0"
source: https://galaxy.ansible.com

View file

@ -3,7 +3,7 @@
services:
database:
image: docker.io/library/mariadb:11
image: docker.io/library/mariadb:11@sha256:ae6119716edac6998ae85508431b3d2e666530ddf4e94c61a10710caec9b0f71
environment:
- "MARIADB_DATABASE=wordpress"
- "MARIADB_ROOT_PASSWORD={{ secret__mariadb_root_password }}"
@ -17,7 +17,7 @@ services:
restart: unless-stopped
app:
image: docker.io/library/wordpress:6-php8.1
image: docker.io/library/wordpress:6-php8.1@sha256:75f79f9c45a587b283e47fd21c6e51077d0c9dbbba529377faaa0c28d5b8f5a4
environment:
- "WORDPRESS_DB_HOST=database"
- "WORDPRESS_DB_NAME=wordpress"

View file

@ -2,13 +2,12 @@
services:
prometheus:
image: docker.io/prom/prometheus:v3.7.2
image: docker.io/prom/prometheus:v3.7.2@sha256:23031bfe0e74a13004252caaa74eccd0d62b6c6e7a04711d5b8bf5b7e113adc7
container_name: prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--web.enable-remote-write-receiver'
- '--enable-feature=promql-experimental-functions'
- '--storage.tsdb.retention.time=28d'
ports:
- 9090:9090
restart: unless-stopped
@ -19,7 +18,7 @@ services:
- prom_data:/prometheus
alertmanager:
image: docker.io/prom/alertmanager:v0.28.1
image: docker.io/prom/alertmanager:v0.28.1@sha256:27c475db5fb156cab31d5c18a4251ac7ed567746a2483ff264516437a39b15ba
container_name: alertmanager
command:
- '--config.file=/etc/alertmanager/alertmanager.yaml'
@ -32,7 +31,7 @@ services:
- alertmanager_data:/alertmanager
grafana:
image: docker.io/grafana/grafana:12.2.1
image: docker.io/grafana/grafana:12.2.1@sha256:35c41e0fd0295f5d0ee5db7e780cf33506abfaf47686196f825364889dee878b
container_name: grafana
ports:
- 3000:3000
@ -46,7 +45,7 @@ services:
- graf_data:/var/lib/grafana
pve-exporter:
image: docker.io/prompve/prometheus-pve-exporter:3.5.5
image: docker.io/prompve/prometheus-pve-exporter:3.5.5@sha256:79a5598906697b1a5a006d09f0200528a77c6ff1568faf018539ac65824454df
container_name: pve-exporter
ports:
- 9221:9221
@ -59,7 +58,7 @@ services:
- /dev/null:/etc/prometheus/pve.yml
loki:
image: docker.io/grafana/loki:3.5.7
image: docker.io/grafana/loki:3.5.7@sha256:0eaee7bf39cc83aaef46914fb58f287d4f4c4be6ec96b86c2ed55719a75e49c8
container_name: loki
ports:
- 13100:3100
@ -70,7 +69,7 @@ services:
- loki_data:/var/loki
ntfy-alertmanager-ccchh-critical:
image: docker.io/xenrox/ntfy-alertmanager:0.5.0
image: docker.io/xenrox/ntfy-alertmanager:0.5.0@sha256:5fea88db3bf0257d98c007ab0c4ef064c6d67d7b7ceead7d6956dfa0a5cb333b
container_name: ntfy-alertmanager-ccchh-critical
volumes:
- ./configs/ntfy-alertmanager-ccchh-critical:/etc/ntfy-alertmanager/config
@ -79,7 +78,7 @@ services:
restart: unless-stopped
ntfy-alertmanager-fux-critical:
image: docker.io/xenrox/ntfy-alertmanager:0.5.0
image: docker.io/xenrox/ntfy-alertmanager:0.5.0@sha256:5fea88db3bf0257d98c007ab0c4ef064c6d67d7b7ceead7d6956dfa0a5cb333b
container_name: ntfy-alertmanager-fux-critical
volumes:
- ./configs/ntfy-alertmanager-fux-critical:/etc/ntfy-alertmanager/config
@ -88,7 +87,7 @@ services:
restart: unless-stopped
ntfy-alertmanager-ccchh:
image: docker.io/xenrox/ntfy-alertmanager:0.5.0
image: docker.io/xenrox/ntfy-alertmanager:0.5.0@sha256:5fea88db3bf0257d98c007ab0c4ef064c6d67d7b7ceead7d6956dfa0a5cb333b
container_name: ntfy-alertmanager-ccchh
volumes:
- ./configs/ntfy-alertmanager-ccchh:/etc/ntfy-alertmanager/config
@ -97,7 +96,7 @@ services:
restart: unless-stopped
ntfy-alertmanager-fux:
image: docker.io/xenrox/ntfy-alertmanager:0.5.0
image: docker.io/xenrox/ntfy-alertmanager:0.5.0@sha256:5fea88db3bf0257d98c007ab0c4ef064c6d67d7b7ceead7d6956dfa0a5cb333b
container_name: ntfy-alertmanager-fux
volumes:
- ./configs/ntfy-alertmanager-fux:/etc/ntfy-alertmanager/config

View file

@ -46,7 +46,7 @@ services:
- "8080:8080"
db:
image: docker.io/library/postgres:15.14
image: docker.io/library/postgres:15.14@sha256:424e79b81868f5fc5cf515eaeac69d288692ebcca7db86d98f91b50d4bce64bb
restart: unless-stopped
networks:
- keycloak

View file

@ -1,7 +1,7 @@
services:
mailman-core:
restart: unless-stopped
image: docker.io/maxking/mailman-core:0.5 # Use a specific version tag (tag latest is not published)
image: docker.io/maxking/mailman-core:0.5@sha256:cb8e412bb18d74480f996da68f46e92473b6103995e71bc5aeba139b255cc3d2 # Use a specific version tag (tag latest is not published)
container_name: mailman-core
hostname: mailman-core
volumes:
@ -25,7 +25,7 @@ services:
mailman-web:
restart: unless-stopped
image: docker.io/maxking/mailman-web:0.5 # Use a specific version tag (tag latest is not published)
image: docker.io/maxking/mailman-web:0.5@sha256:014726db85586fb53541f66f6ce964bf07e939791cfd5ffc796cd6d243696a18 # Use a specific version tag (tag latest is not published)
container_name: mailman-web
hostname: mailman-web
depends_on:
@ -56,7 +56,7 @@ services:
- POSTGRES_DB=mailmandb
- POSTGRES_USER=mailman
- POSTGRES_PASSWORD=wvQjbMRnwFuxGEPz
image: docker.io/library/postgres:12-alpine
image: docker.io/library/postgres:12-alpine@sha256:7c8f4870583184ebadf7f17a6513620aac5f365a7938dc6a6911c1d5df2f481a
volumes:
- /opt/mailman/database:/var/lib/postgresql/data
networks:

View file

@ -1,7 +1,7 @@
---
services:
ntfy:
image: docker.io/binwiederhier/ntfy:v2.14.0
image: docker.io/binwiederhier/ntfy:v2.14.0@sha256:5a051798d14138c3ecb12c038652558ab6a077e1aceeb867c151cbf5fa8451ef
container_name: ntfy
command:
- serve

View file

@ -4,7 +4,7 @@
services:
onlyoffice:
image: docker.io/onlyoffice/documentserver:9.1.0
image: docker.io/onlyoffice/documentserver:9.1.0@sha256:34b92f4a67bfd939bd6b75893e8217556e3b977f81e49472f7e28737b741ba1d
restart: unless-stopped
volumes:
- "./onlyoffice/DocumentServer/logs:/var/log/onlyoffice"

View file

@ -3,7 +3,7 @@
services:
database:
image: docker.io/library/postgres:15-alpine
image: docker.io/library/postgres:15-alpine@sha256:64583b3cb4f2010277bdd9749456de78e5c36f8956466ba14b0b96922e510950
environment:
- "POSTGRES_USER=hedgedoc"
- "POSTGRES_PASSWORD={{ secret__hedgedoc_db_password }}"
@ -13,7 +13,7 @@ services:
restart: unless-stopped
app:
image: quay.io/hedgedoc/hedgedoc:1.10.3
image: quay.io/hedgedoc/hedgedoc:1.10.3@sha256:ca58fd73ecf05c89559b384fb7a1519c18c8cbba5c21a0018674ed820b9bdb73
environment:
- "CMD_DB_URL=postgres://hedgedoc:{{ secret__hedgedoc_db_password }}@database:5432/hedgedoc"
- "CMD_DOMAIN=pad.hamburg.ccc.de"

View file

@ -3,7 +3,7 @@
services:
database:
image: docker.io/library/postgres:15-alpine
image: docker.io/library/postgres:15-alpine@sha256:64583b3cb4f2010277bdd9749456de78e5c36f8956466ba14b0b96922e510950
environment:
- "POSTGRES_USER=pretalx"
- "POSTGRES_PASSWORD={{ secret__pretalx_db_password }}"
@ -15,7 +15,7 @@ services:
- pretalx_net
redis:
image: docker.io/library/redis:8.2.2
image: docker.io/library/redis:8.2.2@sha256:4521b581dbddea6e7d81f8fe95ede93f5648aaa66a9dacd581611bf6fe7527bd
restart: unless-stopped
volumes:
- redis:/data
@ -23,7 +23,7 @@ services:
- pretalx_net
static:
image: docker.io/library/nginx:1.29.3
image: docker.io/library/nginx:1.29.3@sha256:f547e3d0d5d02f7009737b284abc87d808e4252b42dceea361811e9fc606287f
restart: unless-stopped
volumes:
- public:/usr/share/nginx/html
@ -33,7 +33,7 @@ services:
- pretalx_net
pretalx:
image: docker.io/pretalx/standalone:v2025.1.0
image: docker.io/pretalx/standalone:v2025.1.0@sha256:fb2d15f11bcae8bb15430084ed81a150cfdf7c79705450583b51e352ba486e8e
entrypoint: gunicorn
command:
- "pretalx.wsgi"
@ -78,7 +78,7 @@ services:
- pretalx_net
celery:
image: docker.io/pretalx/standalone:v2025.1.0
image: docker.io/pretalx/standalone:v2025.1.0@sha256:fb2d15f11bcae8bb15430084ed81a150cfdf7c79705450583b51e352ba486e8e
command:
- taskworker
restart: unless-stopped

View file

@ -1,7 +1,7 @@
---
services:
database:
image: docker.io/library/postgres:15-alpine
image: docker.io/library/postgres:15-alpine@sha256:64583b3cb4f2010277bdd9749456de78e5c36f8956466ba14b0b96922e510950
environment:
- "POSTGRES_USER=pretix"
- "POSTGRES_PASSWORD={{ secret__pretix_db_password }}"
@ -13,7 +13,7 @@ services:
restart: unless-stopped
redis:
image: docker.io/library/redis:7.4.6
image: docker.io/library/redis:7.4.6@sha256:a9cc41d6d01da2aa26c219e4f99ecbeead955a7b656c1c499cce8922311b2514
ports:
- "6379:6379"
volumes:
@ -25,7 +25,7 @@ services:
backend:
pretix:
image: docker.io/pretix/standalone:2024.8
image: docker.io/pretix/standalone:2024.8@sha256:110bac37efa5f736227f158f38e421ed738d03dccc274dfb415b258ab0f75cfe
command: ["all"]
ports:
- "8345:80"

View file

@ -38,7 +38,11 @@ server {
location = / {
#return 302 https://wiki.hamburg.ccc.de/infrastructure:service-overview#tickets_pretix;
return 302 https://tickets.hamburg.ccc.de/hackertours/39c3ht/;
return 302 https://tickets.hamburg.ccc.de/hackertours/eh22ht/;
}
location = /hackertours/eh22/ {
return 302 https://tickets.hamburg.ccc.de/hackertours/eh22ht/;
}
location / {

View file

@ -1,7 +1,7 @@
services:
# https://github.com/richardg867/WaybackProxy
waybackproxy:
image: cttynul/waybackproxy:latest
image: cttynul/waybackproxy:latest@sha256:e001d5b1d746522cd1ab2728092173c0d96f08086cbd3e49cdf1e298b8add22e
environment:
DATE: 19990101
DATE_TOLERANCE: 730

View file

@ -17,15 +17,7 @@ HostKey /etc/ssh/ssh_host_ed25519_key
HostKey /etc/ssh/ssh_host_rsa_key
HostKey /etc/ssh/ssh_host_ecdsa_key
{% if ansible_facts["distribution"] == "Debian" and ansible_facts["distribution_major_version"] == "13" %}
KexAlgorithms sntrup761x25519-sha512,mlkem768x25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256
{% elif ansible_facts["distribution"] == "Debian" and ansible_facts["distribution_major_version"] == "12" %}
KexAlgorithms sntrup761x25519-sha512,curve25519-sha256@libssh.org,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256
{% else %}
KexAlgorithms curve25519-sha256@libssh.org,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256
{% endif %}
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr

View file

@ -7,18 +7,17 @@ A use case for the deployment of the additional configuration files is Composes
## Supported Distributions
Should work on Debian-based distributions.
The following distributions are supported:
- Debian 11
## Required Arguments
- `docker_compose__compose_file_content`: The content to deploy to the Compose file at `/ansible_docker_compose/compose.yaml`.
For the required arguments look at the [`argument_specs.yaml`](./meta/argument_specs.yaml).
## Optional Arguments
## `hosts`
- `docker_compose__env_file_content`: The content to deploy to the `.env` file at `/ansible_docker_compose/.env`.
- `docker_compose__configuration_files`: A list of configuration files to deploy to the `/ansible_docker_compose/configs/` directory.
- `docker_compose__configuration_files.*.name`: The name of the configuration file.
- `docker_compose__configuration_files.*.content`: The content to deploy to the configuration file.
The `hosts` for this role need to be the machines, for which you want to make sure the given Compose file is deployed and all services of it are up-to-date and running.
## Links & Resources

View file

@ -1 +1,2 @@
docker_compose__configuration_files: [ ]
docker_compose__restart_cmd: ""

View file

@ -1,11 +1,13 @@
- name: docker compose down
community.docker.docker_compose_v2:
project_src: /ansible_docker_compose
state: absent
ansible.builtin.command:
cmd: /usr/bin/docker compose down
chdir: /ansible_docker_compose
become: true
- name: docker compose restart
community.docker.docker_compose_v2:
project_src: /ansible_docker_compose
state: restarted
changed_when: true # This is always changed.
- name: docker compose reload script
ansible.builtin.command:
cmd: /usr/bin/docker compose {{ docker_compose__restart_cmd }}
chdir: /ansible_docker_compose
become: true
changed_when: true # Mark this as always changed (for now?).
when: docker_compose__restart_cmd != ""

View file

@ -2,20 +2,31 @@ argument_specs:
main:
options:
docker_compose__compose_file_content:
description: >-
The content of the Compose file at
`/ansible_docker_compose/compose.yaml`.
type: str
required: true
docker_compose__env_file_content:
description: >-
The content of the .env file at
`/ansible_docker_compose/.env`.
type: str
required: false
docker_compose__configuration_files:
description: >-
A list of configuration files to be deployed in the
`/ansible_docker_compose/configs/` directory.
type: list
elements: dict
required: false
default: [ ]
options:
name:
description: The name of the configuration file.
type: str
required: true
content:
description: The content of the configuration file.
type: str
required: true

View file

@ -1,3 +1,10 @@
---
dependencies:
- role: distribution_check
vars:
distribution_check__distribution_support_spec:
- name: Debian
major_versions:
- 11
- 12
- role: docker

View file

@ -59,7 +59,7 @@
state: absent
become: true
loop: "{{ docker_compose__config_files_to_remove.files }}"
notify: docker compose restart
# notify: docker compose down
- name: make sure all given configuration files are deployed
ansible.builtin.copy:
@ -70,19 +70,45 @@
group: root
become: true
loop: "{{ docker_compose__configuration_files }}"
notify: docker compose restart
# notify: docker compose down
notify: docker compose reload script
- name: Flush handlers to make "docker compose down" and "docker compose restart" handlers run now
- name: Flush handlers to make "docker compose down" handler run now
ansible.builtin.meta: flush_handlers
- name: docker compose up
community.docker.docker_compose_v2:
project_src: /ansible_docker_compose
state: present
build: always
pull: always
remove_orphans: true
- name: docker compose ps --format json before docker compose up
ansible.builtin.command:
cmd: /usr/bin/docker compose ps --format json
chdir: /ansible_docker_compose
become: true
changed_when: false
register: docker_compose__ps_json_before_up
- name: docker compose up --detach --pull always --build
ansible.builtin.command:
cmd: /usr/bin/docker compose up --detach --pull always --build --remove-orphans
chdir: /ansible_docker_compose
become: true
changed_when: false
# The changed for this task is tried to be determined by the "potentially
# report changed" task together with the "docker compose ps --format json
# [...]" tasks.
- name: docker compose ps --format json after docker compose up
ansible.builtin.command:
cmd: /usr/bin/docker compose ps --format json
chdir: /ansible_docker_compose
become: true
changed_when: false
register: docker_compose__ps_json_after_up
# Doesn't work anymore. Dunno why.
# TODO: Fix
# - name: potentially report changed
# ansible.builtin.debug:
# msg: "If this reports changed, then the docker compose containers changed."
# changed_when: (docker_compose__ps_json_before_up.stdout | from_json | community.general.json_query('[].ID') | sort)
# != (docker_compose__ps_json_after_up.stdout | from_json | community.general.json_query('[].ID') | sort)
- name: Make sure anacron is installed
become: true

View file

@ -0,0 +1,8 @@
---
dependencies:
- role: distribution_check
vars:
distribution_check__distribution_support_spec:
- name: Debian
major_versions:
- "11"

View file

@ -7,7 +7,11 @@
- python3
- python3-pip
- python3-setuptools
- python3-poetry
- name: Ensure python peotry is installed
become: true
ansible.builtin.pip:
name: poetry
- name: Ensure foobazdmx user exists
become: true

8
roles/ola/meta/main.yaml Normal file
View file

@ -0,0 +1,8 @@
---
dependencies:
- role: distribution_check
vars:
distribution_check__distribution_support_spec:
- name: Debian
major_versions:
- "11"