Compare commits

..

2 commits

Author SHA1 Message Date
385f625c10
ci: move Ansible Lint job to ubuntu-latest runner to make it work again
Some checks failed
/ Ansible Lint (push) Failing after 1m20s
Move Ansible Lint job to ubuntu-latest runner to make it work again.
Moving it to the ubuntu-latest runner also removes the need for manual
dependency setup.
2025-10-22 00:42:30 +02:00
96ecd033c8
(test) disable semantic commit prefixes
Some checks failed
/ Ansible Lint (push) Failing after 50s
2025-10-22 00:29:59 +02:00
43 changed files with 319 additions and 416 deletions

View file

@ -8,25 +8,12 @@ on:
jobs:
ansible-lint:
name: Ansible Lint
runs-on: docker
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
- name: Install pip
run: |
apt update
apt install -y pip
- name: Install python jmespath
run: |
pip install jmespath
env:
PIP_BREAK_SYSTEM_PACKAGES: 1
# Don't let it setup python as the then called setup-python action doesn't
# work in our environmnet.
# Rather manually setup python (pip) before instead.
- uses: actions/checkout@v4
- name: Run ansible-lint
uses: https://github.com/ansible/ansible-lint@d7cd7cfa2469536527aceaef9ef2ec6f2fb331cb # v25.9.2
uses: https://github.com/ansible/ansible-lint@v24.10.0
with:
setup_python: "false"
requirements_file: "requirements.yml"
env:
PIP_BREAK_SYSTEM_PACKAGES: 1

View file

@ -1,6 +1,4 @@
# renovate: datasource=docker depName=git.hamburg.ccc.de/ccchh/oci-images/nextcloud
nextcloud__version: 32
# renovate: datasource=docker depName=docker.io/library/postgres
nextcloud__postgres_version: 15.14
nextcloud__fqdn: cloud.hamburg.ccc.de
nextcloud__data_dir: /data/nextcloud

View file

@ -1,5 +1,4 @@
# renovate: datasource=github-releases depName=netbox packageName=netbox-community/netbox
netbox__version: "v4.4.5"
netbox__version: "v4.1.7"
netbox__config: "{{ lookup('ansible.builtin.template', 'resources/chaosknoten/netbox/netbox/configuration.py.j2') }}"
netbox__custom_pipeline_oidc_group_and_role_mapping: true

View file

@ -1,7 +1,4 @@
ansible_pull__age_private_key: ENC[AES256_GCM,data:tP84jDYh2zeWjf7wqDoefm9zaeg/Q2TWUyIstOcrjYHgrZdGLk64skLuGyH5q4FxQL9QEhe9qBT+AAxxKE6fU630/M1LVOR4Sls=,iv:I9W6KxIoisJFFMtOrN5u8KgnsmuIgF9RvzWanLNGVVM=,tag:w9bhDahR4Ai4/nLLeR58lA==,type:str]
secret__sunders_db_root_password: ENC[AES256_GCM,data:m3Xt6dOKibRflon/rWG9KmdBPHEBbqE/GIpKdFI1Di7Lpl/THxzrgx12mTK6aZnwDrM=,iv:hD/UGwo88ye9CxyTCEQ0SVon2+ipPjeA9NF2/OhYwmc=,tag:DRdQ5hvTgUO5FVae/ul7kQ==,type:str]
secret__sunders_db_camera_password: ENC[AES256_GCM,data:tOt4ImpedgfGvRpcThPO30YyEl/bP244ruJQzAYodJIsEhFuk5LxHpPASEnsqlN6m3M=,iv:rQXBjiYWZlzeUdaqDdTlrdbSSqGaPDeZOPhUaMjgcjU=,tag:lkSlIdJWFowyPfWEjpC/Zg==,type:str]
secret__sunders_db_camera_select_password: ENC[AES256_GCM,data:PveGcD2WmvpMc8bafGY1c45aQ2XH/ym2yj5YacauQPeZO6Xem3kaxU0kwjs0Wd26ugc=,iv:tk288L9i0lxsJbTFq5ET5IiKkJfMQwc6uKNFXILcD7o=,tag:hOIivp3mOtDNBCsKvrSrBw==,type:str]
sops:
age:
- recipient: age1na0nh9ndnr9cxpnlvstrxskr4fxf4spnkw48ufl7m43f98y40y7shhnvgd
@ -13,8 +10,8 @@ sops:
S3NiK3R6UWQ5UU0xUmYwa1hqMUo5c28K4EVQwBcALc6k53CNsemfMy2s6AGO5LJf
3U1zeFtEcsvEnUfkvFT//M7cB6pUqQF0KIq1VnnFoQF7IpvSN23lxg==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2025-11-01T16:32:10Z"
mac: ENC[AES256_GCM,data:8Q6DBSFtzwHuVxduRlZYxlRWO0trSoesNGUR8r/dWnp9ashFBSZqVyffXb4Vq6DB5thANJ6/b3PCNsHdiAKn6Ai2UT8G0HimFjUUgNpZxo4xoNGmDhDvfdBgUL6O2pHhY+ojjguUXDYeYc99+eaxfKqZ3w+PAPaySltKm99foz8=,iv:ILOErdiWbUjk9kovXXZYcAqZFQp2Wo1Tm14sgK3niWg=,tag:Q2gT6wbQyhDXjoQEG2Lngw==,type:str]
lastmodified: "2025-10-14T23:43:05Z"
mac: ENC[AES256_GCM,data:15TRSKlDhjQy3yMcFhz2Den2YorcrpJmCw0BVl10qlG8u9G7Vw/7aV/hJnZdkCz3w1ZkEbNS6DCKxCLs1Qgf2SEPaG/cRraO2mcl+YH7k4gb5LMzu81fRkbCx66B4LG+DY8fsAJeO4mxui2m0ZAHb2SNFIP4Q4vdLav3jTaiwAc=,iv:71qa6JTc+S5MLynGc27tx1WBGrpvTCSCoEv01SZnPF8=,tag:ju4WP1MK1/sWw7TAitzM0Q==,type:str]
pgp:
- created_at: "2025-10-15T08:45:25Z"
enc: |-
@ -210,4 +207,4 @@ sops:
-----END PGP MESSAGE-----
fp: 878FEA3CB6A6F6E7CD80ECBE28506E3585F9F533
unencrypted_suffix: _unencrypted
version: 3.11.0
version: 3.10.2

View file

@ -1,13 +0,0 @@
docker_compose__compose_file_content: "{{ lookup('ansible.builtin.template', 'resources/chaosknoten/sunders/docker_compose/compose.yaml.j2') }}"
certbot__version_spec: ""
certbot__acme_account_email_address: le-admin@hamburg.ccc.de
certbot__certificate_domains:
- "sunders.hamburg.ccc.de"
certbot__new_cert_commands:
- "systemctl reload nginx.service"
nginx__version_spec: ""
nginx__configurations:
- name: sunders.hamburg.ccc.de
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/sunders/nginx/sunders.hamburg.ccc.de.conf') }}"

View file

@ -1,5 +1,4 @@
docker_compose__compose_file_content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/zammad/docker_compose/compose.yaml') }}"
docker_compose__env_file_content: "{{ lookup('ansible.builtin.template', 'resources/chaosknoten/zammad/docker_compose/.env.j2') }}"
docker_compose__compose_file_content: "{{ lookup('ansible.builtin.template', 'resources/chaosknoten/zammad/docker_compose/compose.yaml.j2') }}"
docker_compose__configuration_files: [ ]
certbot__version_spec: ""

View file

@ -106,7 +106,6 @@ docker_compose_hosts:
pretalx:
zammad:
ntfy:
sunders:
nextcloud_hosts:
hosts:
cloud:
@ -127,7 +126,6 @@ nginx_hosts:
wiki:
zammad:
ntfy:
sunders:
public_reverse_proxy_hosts:
hosts:
public-reverse-proxy:
@ -147,7 +145,6 @@ certbot_hosts:
wiki:
zammad:
ntfy:
sunders:
prometheus_node_exporter_hosts:
hosts:
ccchoir:

View file

@ -6,3 +6,4 @@ docker_compose__configuration_files:
content: "{{ lookup('ansible.builtin.template', 'resources/z9/yate/docker_compose/regexroute.conf.j2') }}"
- name: regfile.conf
content: "{{ lookup('ansible.builtin.template', 'resources/z9/yate/docker_compose/regfile.conf.j2') }}"
docker_compose__restart_cmd: "exec yate sh -c 'kill -1 1'"

View file

@ -4,7 +4,7 @@ all:
ansible_host: authoritative-dns.z9.ccchh.net
ansible_user: chaos
dooris:
ansible_host: dooris.z9.ccchh.net
ansible_host: 10.31.208.201
ansible_user: chaos
light:
ansible_host: light.z9.ccchh.net

View file

@ -3,11 +3,7 @@
"extends": [
"config:recommended", // Included in config:best-practices anyway, but added for clarity.
"config:best-practices",
":ignoreUnstable",
":disableRateLimiting",
":rebaseStalePrs",
":label(renovate)",
"group:allDigest"
":ignoreUnstable"
],
"semanticCommits": "disabled",
"packageRules": [
@ -23,29 +19,6 @@
"minor",
"patch"
]
},
{
"matchDatasources": ["docker"],
"matchPackageNames": ["docker.io/pretix/standalone"],
"versioning": "regex:^(?<major>\\d+\\.\\d+)(?:\\.(?<minor>\\d+))$"
},
// Since Forgejo seems to clean up older tag versions, so older digests, disable digest pinning for our images.
{
"matchDatasources": ["docker"],
"matchPackageNames": ["git.hamburg.ccc.de/*"],
"pinDigests": false
}
],
"customManagers": [
// Custom manager using regex for letting Renovate find dependencies in inventory variables.
{
"customType": "regex",
"managerFilePatterns": [
"/^inventories/.*?_vars/.*?\\.ya?ml$/"
],
"matchStrings": [
"# renovate: datasource=(?<datasource>[a-zA-Z0-9-._]+?) depName=(?<depName>[^\\s]+?)(?: packageName=(?<packageName>[^\\s]+?))?(?: versioning=(?<versioning>[^\\s]+?))?\\s*.+?\\s*:\\s*[\"']?(?<currentValue>.+?)[\"']?\\s"
]
}
],
"docker-compose": {

View file

@ -6,6 +6,3 @@ collections:
- name: community.sops
version: ">=2.2.4"
source: https://galaxy.ansible.com
- name: community.docker
version: ">=5.0.0"
source: https://galaxy.ansible.com

View file

@ -3,7 +3,7 @@
services:
database:
image: docker.io/library/mariadb:11@sha256:ae6119716edac6998ae85508431b3d2e666530ddf4e94c61a10710caec9b0f71
image: docker.io/library/mariadb:11
environment:
- "MARIADB_DATABASE=wordpress"
- "MARIADB_ROOT_PASSWORD={{ secret__mariadb_root_password }}"
@ -17,7 +17,7 @@ services:
restart: unless-stopped
app:
image: docker.io/library/wordpress:6-php8.1@sha256:75f79f9c45a587b283e47fd21c6e51077d0c9dbbba529377faaa0c28d5b8f5a4
image: docker.io/library/wordpress:6-php8.1
environment:
- "WORDPRESS_DB_HOST=database"
- "WORDPRESS_DB_NAME=wordpress"

View file

@ -2,7 +2,7 @@
services:
prometheus:
image: docker.io/prom/prometheus:v3.7.2@sha256:23031bfe0e74a13004252caaa74eccd0d62b6c6e7a04711d5b8bf5b7e113adc7
image: docker.io/prom/prometheus:v3.7.1
container_name: prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
@ -18,7 +18,7 @@ services:
- prom_data:/prometheus
alertmanager:
image: docker.io/prom/alertmanager:v0.28.1@sha256:27c475db5fb156cab31d5c18a4251ac7ed567746a2483ff264516437a39b15ba
image: docker.io/prom/alertmanager:v0.28.1
container_name: alertmanager
command:
- '--config.file=/etc/alertmanager/alertmanager.yaml'
@ -31,7 +31,7 @@ services:
- alertmanager_data:/alertmanager
grafana:
image: docker.io/grafana/grafana:12.2.1@sha256:35c41e0fd0295f5d0ee5db7e780cf33506abfaf47686196f825364889dee878b
image: docker.io/grafana/grafana:12.2.1
container_name: grafana
ports:
- 3000:3000
@ -45,7 +45,7 @@ services:
- graf_data:/var/lib/grafana
pve-exporter:
image: docker.io/prompve/prometheus-pve-exporter:3.5.5@sha256:79a5598906697b1a5a006d09f0200528a77c6ff1568faf018539ac65824454df
image: docker.io/prompve/prometheus-pve-exporter:3.5.5
container_name: pve-exporter
ports:
- 9221:9221
@ -58,7 +58,7 @@ services:
- /dev/null:/etc/prometheus/pve.yml
loki:
image: docker.io/grafana/loki:3.5.7@sha256:0eaee7bf39cc83aaef46914fb58f287d4f4c4be6ec96b86c2ed55719a75e49c8
image: docker.io/grafana/loki:3.5.7
container_name: loki
ports:
- 13100:3100
@ -69,7 +69,7 @@ services:
- loki_data:/var/loki
ntfy-alertmanager-ccchh-critical:
image: docker.io/xenrox/ntfy-alertmanager:0.5.0@sha256:5fea88db3bf0257d98c007ab0c4ef064c6d67d7b7ceead7d6956dfa0a5cb333b
image: docker.io/xenrox/ntfy-alertmanager:0.5.0
container_name: ntfy-alertmanager-ccchh-critical
volumes:
- ./configs/ntfy-alertmanager-ccchh-critical:/etc/ntfy-alertmanager/config
@ -78,7 +78,7 @@ services:
restart: unless-stopped
ntfy-alertmanager-fux-critical:
image: docker.io/xenrox/ntfy-alertmanager:0.5.0@sha256:5fea88db3bf0257d98c007ab0c4ef064c6d67d7b7ceead7d6956dfa0a5cb333b
image: docker.io/xenrox/ntfy-alertmanager:0.5.0
container_name: ntfy-alertmanager-fux-critical
volumes:
- ./configs/ntfy-alertmanager-fux-critical:/etc/ntfy-alertmanager/config
@ -87,7 +87,7 @@ services:
restart: unless-stopped
ntfy-alertmanager-ccchh:
image: docker.io/xenrox/ntfy-alertmanager:0.5.0@sha256:5fea88db3bf0257d98c007ab0c4ef064c6d67d7b7ceead7d6956dfa0a5cb333b
image: docker.io/xenrox/ntfy-alertmanager:0.5.0
container_name: ntfy-alertmanager-ccchh
volumes:
- ./configs/ntfy-alertmanager-ccchh:/etc/ntfy-alertmanager/config
@ -96,7 +96,7 @@ services:
restart: unless-stopped
ntfy-alertmanager-fux:
image: docker.io/xenrox/ntfy-alertmanager:0.5.0@sha256:5fea88db3bf0257d98c007ab0c4ef064c6d67d7b7ceead7d6956dfa0a5cb333b
image: docker.io/xenrox/ntfy-alertmanager:0.5.0
container_name: ntfy-alertmanager-fux
volumes:
- ./configs/ntfy-alertmanager-fux:/etc/ntfy-alertmanager/config

View file

@ -46,7 +46,7 @@ services:
- "8080:8080"
db:
image: docker.io/library/postgres:15.14@sha256:424e79b81868f5fc5cf515eaeac69d288692ebcca7db86d98f91b50d4bce64bb
image: docker.io/library/postgres:15.14
restart: unless-stopped
networks:
- keycloak

View file

@ -1,7 +1,7 @@
services:
mailman-core:
restart: unless-stopped
image: docker.io/maxking/mailman-core:0.5@sha256:cb8e412bb18d74480f996da68f46e92473b6103995e71bc5aeba139b255cc3d2 # Use a specific version tag (tag latest is not published)
image: docker.io/maxking/mailman-core:0.5 # Use a specific version tag (tag latest is not published)
container_name: mailman-core
hostname: mailman-core
volumes:
@ -25,7 +25,7 @@ services:
mailman-web:
restart: unless-stopped
image: docker.io/maxking/mailman-web:0.5@sha256:014726db85586fb53541f66f6ce964bf07e939791cfd5ffc796cd6d243696a18 # Use a specific version tag (tag latest is not published)
image: docker.io/maxking/mailman-web:0.5 # Use a specific version tag (tag latest is not published)
container_name: mailman-web
hostname: mailman-web
depends_on:
@ -56,7 +56,7 @@ services:
- POSTGRES_DB=mailmandb
- POSTGRES_USER=mailman
- POSTGRES_PASSWORD=wvQjbMRnwFuxGEPz
image: docker.io/library/postgres:12-alpine@sha256:7c8f4870583184ebadf7f17a6513620aac5f365a7938dc6a6911c1d5df2f481a
image: docker.io/library/postgres:12-alpine
volumes:
- /opt/mailman/database:/var/lib/postgresql/data
networks:

View file

@ -1,7 +1,7 @@
---
services:
ntfy:
image: docker.io/binwiederhier/ntfy:v2.14.0@sha256:5a051798d14138c3ecb12c038652558ab6a077e1aceeb867c151cbf5fa8451ef
image: docker.io/binwiederhier/ntfy:v2.14.0
container_name: ntfy
command:
- serve

View file

@ -4,7 +4,7 @@
services:
onlyoffice:
image: docker.io/onlyoffice/documentserver:9.1.0@sha256:34b92f4a67bfd939bd6b75893e8217556e3b977f81e49472f7e28737b741ba1d
image: docker.io/onlyoffice/documentserver:9.1.0
restart: unless-stopped
volumes:
- "./onlyoffice/DocumentServer/logs:/var/log/onlyoffice"

View file

@ -3,7 +3,7 @@
services:
database:
image: docker.io/library/postgres:15-alpine@sha256:64583b3cb4f2010277bdd9749456de78e5c36f8956466ba14b0b96922e510950
image: docker.io/library/postgres:15-alpine
environment:
- "POSTGRES_USER=hedgedoc"
- "POSTGRES_PASSWORD={{ secret__hedgedoc_db_password }}"
@ -13,7 +13,7 @@ services:
restart: unless-stopped
app:
image: quay.io/hedgedoc/hedgedoc:1.10.3@sha256:ca58fd73ecf05c89559b384fb7a1519c18c8cbba5c21a0018674ed820b9bdb73
image: quay.io/hedgedoc/hedgedoc:1.10.3
environment:
- "CMD_DB_URL=postgres://hedgedoc:{{ secret__hedgedoc_db_password }}@database:5432/hedgedoc"
- "CMD_DOMAIN=pad.hamburg.ccc.de"

View file

@ -3,7 +3,7 @@
services:
database:
image: docker.io/library/postgres:15-alpine@sha256:64583b3cb4f2010277bdd9749456de78e5c36f8956466ba14b0b96922e510950
image: docker.io/library/postgres:15-alpine
environment:
- "POSTGRES_USER=pretalx"
- "POSTGRES_PASSWORD={{ secret__pretalx_db_password }}"
@ -15,7 +15,7 @@ services:
- pretalx_net
redis:
image: docker.io/library/redis:8.2.2@sha256:4521b581dbddea6e7d81f8fe95ede93f5648aaa66a9dacd581611bf6fe7527bd
image: docker.io/library/redis:8.2.2
restart: unless-stopped
volumes:
- redis:/data
@ -23,7 +23,7 @@ services:
- pretalx_net
static:
image: docker.io/library/nginx:1.29.3@sha256:f547e3d0d5d02f7009737b284abc87d808e4252b42dceea361811e9fc606287f
image: docker.io/library/nginx:1.29.2
restart: unless-stopped
volumes:
- public:/usr/share/nginx/html
@ -33,7 +33,7 @@ services:
- pretalx_net
pretalx:
image: docker.io/pretalx/standalone:v2025.1.0@sha256:fb2d15f11bcae8bb15430084ed81a150cfdf7c79705450583b51e352ba486e8e
image: docker.io/pretalx/standalone:v2025.1.0
entrypoint: gunicorn
command:
- "pretalx.wsgi"
@ -78,7 +78,7 @@ services:
- pretalx_net
celery:
image: docker.io/pretalx/standalone:v2025.1.0@sha256:fb2d15f11bcae8bb15430084ed81a150cfdf7c79705450583b51e352ba486e8e
image: docker.io/pretalx/standalone:v2025.1.0
command:
- taskworker
restart: unless-stopped

View file

@ -29,7 +29,6 @@ map $host $upstream_acme_challenge_host {
wiki.hamburg.ccc.de 172.31.17.146:31820;
www.hamburg.ccc.de 172.31.17.151:31820;
tickets.hamburg.ccc.de 172.31.17.148:31820;
sunders.hamburg.ccc.de 172.31.17.170:31820;
zammad.hamburg.ccc.de 172.31.17.152:31820;
eh03.easterhegg.eu 172.31.17.151:31820;
eh05.easterhegg.eu 172.31.17.151:31820;

View file

@ -43,7 +43,6 @@ stream {
staging.hamburg.ccc.de 172.31.17.151:8443;
spaceapi.hamburg.ccc.de 172.31.17.151:8443;
tickets.hamburg.ccc.de 172.31.17.148:8443;
sunders.hamburg.ccc.de 172.31.17.170:8443;
zammad.hamburg.ccc.de 172.31.17.152:8443;
c3cat.de 172.31.17.151:8443;
www.c3cat.de 172.31.17.151:8443;

View file

@ -1,57 +0,0 @@
# Source:
# https://git.hamburg.ccc.de/CCCHH/sunders/src/branch/main/docker-compose.yml
services:
db:
image: mariadb:12.0.2
command: --max_allowed_packet=3250585600
environment:
MYSQL_ROOT_PASSWORD: "{{ secret__sunders_db_root_password }}"
MYSQL_DATABASE: camera
MYSQL_USER: camera
MYSQL_PASSWORD: "{{ secret__sunders_db_camera_password }}"
volumes:
- mariadb:/var/lib/mysql
healthcheck:
test: ["CMD", "mariadb-admin", "ping", "-h", "localhost", "-uroot", "-p{{ secret__sunders_db_root_password }}"]
interval: 10s
timeout: 5s
start_period: 30s
retries: 5
web:
image: git.hamburg.ccc.de/ccchh/sunders/web:latest
environment:
MYSQL_HOST: db
MYSQL_DB: camera
CAMERA_SELECT_USER: camera_select
CAMERA_SELECT_USER_PASSWORD: "{{ secret__sunders_db_camera_select_password }}"
DEFAULT_ZOOM: 12
DEFAULT_LAT: 0
DEFAULT_LON: 0
DEFAULT_LANGUAGE: en
IMPRESSUM_URL: https://hamburg.ccc.de/imprint/
ports:
- "8080:80"
depends_on:
data_handler:
condition: service_started
data_handler:
image: git.hamburg.ccc.de/ccchh/sunders/data_handler:latest
environment:
MYSQL_HOST: db
MYSQL_DB: camera
MYSQL_USER: root
MYSQL_PASSWORD: "{{ secret__sunders_db_root_password }}"
CAMERA_USER: camera
CAMERA_USER_PASSWORD: "{{ secret__sunders_db_camera_password }}"
CAMERA_SELECT_USER: camera_select
CAMERA_SELECT_USER_PASSWORD: "{{ secret__sunders_db_camera_select_password }}"
depends_on:
db:
condition: service_healthy
restart: true
volumes:
mariadb:

View file

@ -1,42 +0,0 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name sunders.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/sunders.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/sunders.hamburg.ccc.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/sunders.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 443;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
# Hide the X-Forwarded header.
proxy_hide_header X-Forwarded;
# Assume we are the only Reverse Proxy (well using Proxy Protocol, but that
# is transparent).
# Also provide "_hidden" for by, since it's not relevant.
proxy_set_header Forwarded "for=$remote_addr;proto=https;host=$host;by=_hidden";
location / {
proxy_pass http://127.0.0.1:8080/;
}
}

View file

@ -1,7 +1,7 @@
---
services:
database:
image: docker.io/library/postgres:15-alpine@sha256:64583b3cb4f2010277bdd9749456de78e5c36f8956466ba14b0b96922e510950
image: docker.io/library/postgres:15-alpine
environment:
- "POSTGRES_USER=pretix"
- "POSTGRES_PASSWORD={{ secret__pretix_db_password }}"
@ -13,7 +13,7 @@ services:
restart: unless-stopped
redis:
image: docker.io/library/redis:7.4.6@sha256:a9cc41d6d01da2aa26c219e4f99ecbeead955a7b656c1c499cce8922311b2514
image: docker.io/library/redis:7.4.6
ports:
- "6379:6379"
volumes:
@ -25,7 +25,7 @@ services:
backend:
pretix:
image: docker.io/pretix/standalone:2024.8@sha256:110bac37efa5f736227f158f38e421ed738d03dccc274dfb415b258ab0f75cfe
image: docker.io/pretix/standalone:2024.8
command: ["all"]
ports:
- "8345:80"

View file

@ -1,4 +0,0 @@
POSTGRES_PASS={{ secret__zammad_db_password }}
POSTGRES_VERSION=15-alpine
REDIS_VERSION=7-alpine
NGINX_SERVER_SCHEME=https

View file

@ -1,149 +0,0 @@
---
version: "3.8"
# Taken from: https://github.com/zammad/zammad-docker-compose/blob/master/docker-compose.yml
# Version: v14.1.1
# Update from new tag by replacing all content.
# Configuration should be done in the .env.j2.
x-shared:
zammad-service: &zammad-service
environment: &zammad-environment
MEMCACHE_SERVERS: ${MEMCACHE_SERVERS:-zammad-memcached:11211}
POSTGRESQL_DB: ${POSTGRES_DB:-zammad_production}
POSTGRESQL_HOST: ${POSTGRES_HOST:-zammad-postgresql}
POSTGRESQL_USER: ${POSTGRES_USER:-zammad}
POSTGRESQL_PASS: ${POSTGRES_PASS:-zammad}
POSTGRESQL_PORT: ${POSTGRES_PORT:-5432}
POSTGRESQL_OPTIONS: ${POSTGRESQL_OPTIONS:-?pool=50}
POSTGRESQL_DB_CREATE:
REDIS_URL: ${REDIS_URL:-redis://zammad-redis:6379}
S3_URL:
# Backup settings
BACKUP_DIR: "${BACKUP_DIR:-/var/tmp/zammad}"
BACKUP_TIME: "${BACKUP_TIME:-03:00}"
HOLD_DAYS: "${HOLD_DAYS:-10}"
TZ: "${TZ:-Europe/Berlin}"
# Allow passing in these variables via .env:
AUTOWIZARD_JSON:
AUTOWIZARD_RELATIVE_PATH:
ELASTICSEARCH_ENABLED:
ELASTICSEARCH_SCHEMA:
ELASTICSEARCH_HOST:
ELASTICSEARCH_PORT:
ELASTICSEARCH_USER:
ELASTICSEARCH_PASS:
ELASTICSEARCH_NAMESPACE:
ELASTICSEARCH_REINDEX:
NGINX_PORT:
NGINX_CLIENT_MAX_BODY_SIZE:
NGINX_SERVER_NAME:
NGINX_SERVER_SCHEME:
RAILS_TRUSTED_PROXIES:
ZAMMAD_HTTP_TYPE:
ZAMMAD_FQDN:
ZAMMAD_WEB_CONCURRENCY:
ZAMMAD_PROCESS_SESSIONS_JOBS_WORKERS:
ZAMMAD_PROCESS_SCHEDULED_JOBS_WORKERS:
ZAMMAD_PROCESS_DELAYED_JOBS_WORKERS:
# ZAMMAD_SESSION_JOBS_CONCURRENT is deprecated, please use ZAMMAD_PROCESS_SESSIONS_JOBS_WORKERS instead.
ZAMMAD_SESSION_JOBS_CONCURRENT:
# Variables used by ngingx-proxy container for reverse proxy creations
# for docs refer to https://github.com/nginx-proxy/nginx-proxy
VIRTUAL_HOST:
VIRTUAL_PORT:
# Variables used by acme-companion for retrieval of LetsEncrypt certificate
# for docs refer to https://github.com/nginx-proxy/acme-companion
LETSENCRYPT_HOST:
LETSENCRYPT_EMAIL:
image: ${IMAGE_REPO:-ghcr.io/zammad/zammad}:${VERSION:-6.5.2}
restart: ${RESTART:-always}
volumes:
- zammad-storage:/opt/zammad/storage
depends_on:
- zammad-memcached
- zammad-postgresql
- zammad-redis
services:
zammad-backup:
<<: *zammad-service
command: ["zammad-backup"]
volumes:
- zammad-backup:/var/tmp/zammad
- zammad-storage:/opt/zammad/storage:ro
user: 0:0
zammad-elasticsearch:
image: elasticsearch:${ELASTICSEARCH_VERSION:-8.19.4}
restart: ${RESTART:-always}
volumes:
- elasticsearch-data:/usr/share/elasticsearch/data
environment:
discovery.type: single-node
xpack.security.enabled: 'false'
ES_JAVA_OPTS: ${ELASTICSEARCH_JAVA_OPTS:--Xms1g -Xmx1g}
zammad-init:
<<: *zammad-service
command: ["zammad-init"]
depends_on:
- zammad-postgresql
restart: on-failure
user: 0:0
zammad-memcached:
command: memcached -m 256M
image: memcached:${MEMCACHE_VERSION:-1.6.39-alpine}
restart: ${RESTART:-always}
zammad-nginx:
<<: *zammad-service
command: ["zammad-nginx"]
expose:
- "${NGINX_PORT:-8080}"
ports:
- "${NGINX_EXPOSE_PORT:-8080}:${NGINX_PORT:-8080}"
depends_on:
- zammad-railsserver
zammad-postgresql:
environment:
POSTGRES_DB: ${POSTGRES_DB:-zammad_production}
POSTGRES_USER: ${POSTGRES_USER:-zammad}
POSTGRES_PASSWORD: ${POSTGRES_PASS:-zammad}
image: postgres:${POSTGRES_VERSION:-17.6-alpine}
restart: ${RESTART:-always}
volumes:
- postgresql-data:/var/lib/postgresql/data
zammad-railsserver:
<<: *zammad-service
command: ["zammad-railsserver"]
zammad-redis:
image: redis:${REDIS_VERSION:-7.4.5-alpine}
restart: ${RESTART:-always}
volumes:
- redis-data:/data
zammad-scheduler:
<<: *zammad-service
command: ["zammad-scheduler"]
zammad-websocket:
<<: *zammad-service
command: ["zammad-websocket"]
volumes:
elasticsearch-data:
driver: local
postgresql-data:
driver: local
redis-data:
driver: local
zammad-backup:
driver: local
zammad-storage:
driver: local

View file

@ -0,0 +1,162 @@
---
{#
https://github.com/zammad/zammad-docker-compose
Docker Compose does not allow defining variables in the compose file (only in .env files), so we use Jinja variables instead
see https://github.com/zammad/zammad-docker-compose/blob/master/.env
#}
{%- set ELASTICSEARCH_VERSION = "8.19.4" | quote -%}
{%- set IMAGE_REPO = "ghcr.io/zammad/zammad" | quote -%}
{%- set MEMCACHE_SERVERS = "zammad-memcached:11211" | quote -%}
{%- set MEMCACHE_VERSION = "1.6-alpine" | quote -%}
{%- set POSTGRES_DB = "zammad_production" | quote -%}
{%- set POSTGRES_HOST = "zammad-postgresql" | quote -%}
{%- set POSTGRES_USER = "zammad" | quote -%}
{%- set POSTGRES_PASS = secret__zammad_db_password | quote -%}
{%- set POSTGRES_PORT = "5432" | quote -%}
{%- set POSTGRES_VERSION = "15-alpine" | quote -%}
{%- set REDIS_URL = "redis://zammad-redis:6379" | quote -%}
{%- set REDIS_VERSION = "7-alpine" | quote -%}
{%- set RESTART = "always" | quote -%}
{%- set VERSION = "6" | quote -%}
x-shared:
zammad-service: &zammad-service
environment: &zammad-environment
MEMCACHE_SERVERS: {{ MEMCACHE_SERVERS }}
POSTGRESQL_DB: {{ POSTGRES_DB }}
POSTGRESQL_HOST: {{ POSTGRES_HOST }}
POSTGRESQL_USER: {{ POSTGRES_USER }}
POSTGRESQL_PASS: {{ POSTGRES_PASS }}
POSTGRESQL_PORT: {{ POSTGRES_PORT }}
REDIS_URL: {{ REDIS_URL }}
# Allow passing in these variables via .env:
AUTOWIZARD_JSON:
AUTOWIZARD_RELATIVE_PATH:
ELASTICSEARCH_ENABLED:
ELASTICSEARCH_HOST:
ELASTICSEARCH_PORT:
ELASTICSEARCH_SCHEMA:
ELASTICSEARCH_NAMESPACE:
ELASTICSEARCH_REINDEX:
ELASTICSEARCH_SSL_VERIFY:
NGINX_PORT:
NGINX_SERVER_NAME:
NGINX_SERVER_SCHEME: https
POSTGRESQL_DB_CREATE:
POSTGRESQL_OPTIONS:
RAILS_TRUSTED_PROXIES:
ZAMMAD_WEB_CONCURRENCY:
ZAMMAD_SESSION_JOBS:
ZAMMAD_PROCESS_SCHEDULED:
ZAMMAD_PROCESS_DELAYED_JOBS_WORKERS:
image: {{ IMAGE_REPO }}:{{ VERSION }}
restart: {{ RESTART }}
volumes:
- zammad-storage:/opt/zammad/storage
- zammad-var:/opt/zammad/var
depends_on:
- zammad-memcached
- zammad-postgresql
- zammad-redis
services:
zammad-backup:
command: ["zammad-backup"]
depends_on:
- zammad-railsserver
- zammad-postgresql
entrypoint: /usr/local/bin/backup.sh
environment:
<<: *zammad-environment
BACKUP_TIME: "03:00"
HOLD_DAYS: "10"
TZ: Europe/Berlin
image: postgres:{{ POSTGRES_VERSION }}
restart: {{ RESTART }}
volumes:
- zammad-backup:/var/tmp/zammad
- zammad-storage:/opt/zammad/storage:ro
- zammad-var:/opt/zammad/var:ro
- ./scripts/backup.sh:/usr/local/bin/backup.sh:ro
zammad-elasticsearch:
image: elasticsearch:{{ ELASTICSEARCH_VERSION }}
restart: {{ RESTART }}
volumes:
- elasticsearch-data:/usr/share/elasticsearch/data
environment:
discovery.type: single-node
xpack.security.enabled: 'false'
ES_JAVA_OPTS: ${ELASTICSEARCH_JAVA_OPTS:--Xms1g -Xmx1g}
zammad-init:
<<: *zammad-service
command: ["zammad-init"]
depends_on:
- zammad-postgresql
restart: on-failure
user: 0:0
volumes:
- zammad-storage:/opt/zammad/storage
- zammad-var:/opt/zammad/var
zammad-memcached:
command: memcached -m 256M
image: memcached:{{ MEMCACHE_VERSION }}
restart: {{ RESTART }}
zammad-nginx:
<<: *zammad-service
command: ["zammad-nginx"]
expose:
- "8080"
ports:
- "8080:8080"
depends_on:
- zammad-railsserver
volumes:
- zammad-var:/opt/zammad/var:ro # required for the zammad-ready check file
zammad-postgresql:
environment:
POSTGRES_DB: {{ POSTGRES_DB }}
POSTGRES_USER: {{ POSTGRES_USER }}
POSTGRES_PASSWORD: {{ POSTGRES_PASS }}
image: postgres:{{ POSTGRES_VERSION }}
restart: {{ RESTART }}
volumes:
- postgresql-data:/var/lib/postgresql/data
zammad-railsserver:
<<: *zammad-service
command: ["zammad-railsserver"]
zammad-redis:
image: redis:{{ REDIS_VERSION }}
restart: {{ RESTART }}
volumes:
- redis-data:/data
zammad-scheduler:
<<: *zammad-service
command: ["zammad-scheduler"]
volumes:
- /ansible_docker_compose/zammad-scheduler-database.yml:/opt/zammad/config/database.yml # workaround for connection pool issue
zammad-websocket:
<<: *zammad-service
command: ["zammad-websocket"]
volumes:
elasticsearch-data:
driver: local
postgresql-data:
driver: local
redis-data:
driver: local
zammad-backup:
driver: local
zammad-storage:
driver: local
zammad-var:
driver: local

View file

@ -1,7 +1,7 @@
services:
# https://github.com/richardg867/WaybackProxy
waybackproxy:
image: cttynul/waybackproxy:latest@sha256:e001d5b1d746522cd1ab2728092173c0d96f08086cbd3e49cdf1e298b8add22e
image: cttynul/waybackproxy:latest
environment:
DATE: 19990101
DATE_TOLERANCE: 730

View file

@ -17,15 +17,7 @@ HostKey /etc/ssh/ssh_host_ed25519_key
HostKey /etc/ssh/ssh_host_rsa_key
HostKey /etc/ssh/ssh_host_ecdsa_key
{% if ansible_facts["distribution"] == "Debian" and ansible_facts["distribution_major_version"] == "13" %}
KexAlgorithms mlkem768x25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256
{% elif ansible_facts["distribution"] == "Debian" and ansible_facts["distribution_major_version"] == "12" %}
KexAlgorithms sntrup761x25519-sha512,curve25519-sha256@libssh.org,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256
{% else %}
KexAlgorithms curve25519-sha256@libssh.org,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256
{% endif %}
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr

View file

@ -1,24 +1,23 @@
# Role `docker_compose`
A role for deploying a Docker-Compose-based application.
It deploys the given Compose file, an optional `.env` file, as well as configuration files to the specified hosts and makes sure all services are up-to-date and running.
The Compose file gets deployed to `/ansible_docker_compose/compose.yaml`, the `.env` file to `/ansible_docker_compose/.env` and the configuration files get deployed into the `/ansible_docker_compose/configs/` directory.
It deploys the given Compose file as well as configuration files to the specified hosts and makes sure all services are up-to-date and running.
The Compose file gets deployed to `/ansible_docker_compose/compose.yaml` and the configuration files get deployed into the `/ansible_docker_compose/configs/` directory.
A use case for the deployment of the additional configuration files is Composes top-level element `configs` in conjunction with the `configs` option for services.
## Supported Distributions
Should work on Debian-based distributions.
The following distributions are supported:
- Debian 11
## Required Arguments
- `docker_compose__compose_file_content`: The content to deploy to the Compose file at `/ansible_docker_compose/compose.yaml`.
For the required arguments look at the [`argument_specs.yaml`](./meta/argument_specs.yaml).
## Optional Arguments
## `hosts`
- `docker_compose__env_file_content`: The content to deploy to the `.env` file at `/ansible_docker_compose/.env`.
- `docker_compose__configuration_files`: A list of configuration files to deploy to the `/ansible_docker_compose/configs/` directory.
- `docker_compose__configuration_files.*.name`: The name of the configuration file.
- `docker_compose__configuration_files.*.content`: The content to deploy to the configuration file.
The `hosts` for this role need to be the machines, for which you want to make sure the given Compose file is deployed and all services of it are up-to-date and running.
## Links & Resources

View file

@ -1 +1,2 @@
docker_compose__configuration_files: [ ]
docker_compose__restart_cmd: ""

View file

@ -1,11 +1,13 @@
- name: docker compose down
community.docker.docker_compose_v2:
project_src: /ansible_docker_compose
state: absent
ansible.builtin.command:
cmd: /usr/bin/docker compose down
chdir: /ansible_docker_compose
become: true
- name: docker compose restart
community.docker.docker_compose_v2:
project_src: /ansible_docker_compose
state: restarted
changed_when: true # This is always changed.
- name: docker compose reload script
ansible.builtin.command:
cmd: /usr/bin/docker compose {{ docker_compose__restart_cmd }}
chdir: /ansible_docker_compose
become: true
changed_when: true # Mark this as always changed (for now?).
when: docker_compose__restart_cmd != ""

View file

@ -2,20 +2,25 @@ argument_specs:
main:
options:
docker_compose__compose_file_content:
description: >-
The content of the Compose file at
`/ansible_docker_compose/compose.yaml`.
type: str
required: true
docker_compose__env_file_content:
type: str
required: false
docker_compose__configuration_files:
description: >-
A list of configuration files to be deployed in the
`/ansible_docker_compose/configs/` directory.
type: list
elements: dict
required: false
default: [ ]
options:
name:
description: The name of the configuration file.
type: str
required: true
content:
description: The content of the configuration file.
type: str
required: true

View file

@ -1,3 +1,10 @@
---
dependencies:
- role: distribution_check
vars:
distribution_check__distribution_support_spec:
- name: Debian
major_versions:
- 11
- 12
- role: docker

View file

@ -17,17 +17,6 @@
become: true
notify: docker compose down
- name: deploy the .env file
ansible.builtin.copy:
content: "{{ docker_compose__env_file_content }}"
dest: /ansible_docker_compose/.env
mode: "0644"
owner: root
group: root
become: true
when: docker_compose__env_file_content is defined
notify: docker compose down
- name: make sure the `/ansible_docker_compose/configs` directory exists
ansible.builtin.file:
path: /ansible_docker_compose/configs
@ -59,7 +48,7 @@
state: absent
become: true
loop: "{{ docker_compose__config_files_to_remove.files }}"
notify: docker compose restart
# notify: docker compose down
- name: make sure all given configuration files are deployed
ansible.builtin.copy:
@ -70,19 +59,45 @@
group: root
become: true
loop: "{{ docker_compose__configuration_files }}"
notify: docker compose restart
# notify: docker compose down
notify: docker compose reload script
- name: Flush handlers to make "docker compose down" and "docker compose restart" handlers run now
- name: Flush handlers to make "docker compose down" handler run now
ansible.builtin.meta: flush_handlers
- name: docker compose up
community.docker.docker_compose_v2:
project_src: /ansible_docker_compose
state: present
build: always
pull: always
remove_orphans: true
- name: docker compose ps --format json before docker compose up
ansible.builtin.command:
cmd: /usr/bin/docker compose ps --format json
chdir: /ansible_docker_compose
become: true
changed_when: false
register: docker_compose__ps_json_before_up
- name: docker compose up --detach --pull always --build
ansible.builtin.command:
cmd: /usr/bin/docker compose up --detach --pull always --build --remove-orphans
chdir: /ansible_docker_compose
become: true
changed_when: false
# The changed for this task is tried to be determined by the "potentially
# report changed" task together with the "docker compose ps --format json
# [...]" tasks.
- name: docker compose ps --format json after docker compose up
ansible.builtin.command:
cmd: /usr/bin/docker compose ps --format json
chdir: /ansible_docker_compose
become: true
changed_when: false
register: docker_compose__ps_json_after_up
# Doesn't work anymore. Dunno why.
# TODO: Fix
# - name: potentially report changed
# ansible.builtin.debug:
# msg: "If this reports changed, then the docker compose containers changed."
# changed_when: (docker_compose__ps_json_before_up.stdout | from_json | community.general.json_query('[].ID') | sort)
# != (docker_compose__ps_json_after_up.stdout | from_json | community.general.json_query('[].ID') | sort)
- name: Make sure anacron is installed
become: true

View file

@ -0,0 +1,8 @@
---
dependencies:
- role: distribution_check
vars:
distribution_check__distribution_support_spec:
- name: Debian
major_versions:
- "11"

View file

@ -7,7 +7,11 @@
- python3
- python3-pip
- python3-setuptools
- python3-poetry
- name: Ensure python peotry is installed
become: true
ansible.builtin.pip:
name: poetry
- name: Ensure foobazdmx user exists
become: true

View file

@ -14,3 +14,11 @@
loop:
- "netbox.service"
- "netbox-rq.service"
- name: Ensure netbox housekeeping timer is set up and up-to-date
ansible.builtin.systemd_service:
daemon_reload: true
name: "netbox-housekeeping.timer"
enabled: true
state: restarted
become: true

View file

@ -108,3 +108,17 @@
- "netbox.service"
- "netbox-rq.service"
notify: Ensure netbox systemd services are set up and up-to-date
- name: Ensure provided housekeeping systemd service and timer are copied
ansible.builtin.copy:
remote_src: true
src: "/opt/netbox/contrib/{{ item }}"
dest: "/etc/systemd/system/{{ item }}"
mode: "0644"
owner: root
group: root
become: true
loop:
- "netbox-housekeeping.service"
- "netbox-housekeeping.timer"
notify: Ensure netbox housekeeping timer is set up and up-to-date

View file

@ -34,7 +34,7 @@ services:
OVERWRITEPROTOCOL: "https"
db:
image: docker.io/library/postgres:{{ nextcloud__postgres_version }}
image: postgres:{{ nextcloud__postgres_version }}
restart: unless-stopped
#ports:
# - 127.0.0.1:5432:5432
@ -48,7 +48,7 @@ services:
POSTGRES_PASSWORD: "{{ nextcloud__postgres_password }}"
redis:
image: docker.io/library/redis:alpine
image: redis:alpine
restart: unless-stopped
networks:
- nextcloud

8
roles/ola/meta/main.yaml Normal file
View file

@ -0,0 +1,8 @@
---
dependencies:
- role: distribution_check
vars:
distribution_check__distribution_support_spec:
- name: Debian
major_versions:
- "11"

View file

@ -6,8 +6,5 @@ Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/bin/docker run --rm \
--pull=always \
-v "/etc/renovate/config.js:/usr/src/app/config.js" \
--mount "type=volume,src=renovate,dst=/tmp/renovate" \
--env "RENOVATE_BASE_DIR=/tmp/renovate" \
docker.io/renovate/renovate:latest
renovate/renovate