Compare commits

...

23 commits

Author SHA1 Message Date
35502e53ff Update docker.io/library/redis Docker tag to v8
Some checks failed
/ Ansible Lint (push) Failing after 2m2s
/ Ansible Lint (pull_request) Failing after 2m1s
2026-01-07 17:16:48 +00:00
fbd3ea5496
base_config: disable cloud-init ssh module to avoid hostkey regeneration
Some checks failed
/ Ansible Lint (push) Failing after 1m55s
It should run once on first boot anyway and since it apparently runs for
every change in the Proxmox cloud init config, disable it, so it
doesn't, since it's annoying to have "random" hostkey changes.
2026-01-07 18:09:48 +01:00
80ddb2efc9
router: enable a DHCP server for the v4-NAT network as well
As the hosts don't really need a static v4, just do DHCP.
2026-01-07 17:25:27 +01:00
a328e92971 Should be compatible with trixie/13
Some checks failed
/ Ansible Lint (push) Failing after 2m5s
2026-01-03 14:03:26 +01:00
25db54b8ad Make sure pip is installed 2026-01-03 14:02:56 +01:00
944c8cde82
onlyoffice(host): move to new network and hostname
Some checks failed
/ Ansible Lint (push) Failing after 2m5s
2025-12-17 03:34:39 +01:00
366456eff8
keycloak(host): move to new network and hostname
Some checks failed
/ Ansible Lint (push) Failing after 1m56s
Also just listen on port 8443 for keycloak-admin proxy protocol.
2025-12-16 21:50:40 +01:00
1ca71a053e
pad(host): move to new network and hostname
Some checks failed
/ Ansible Lint (push) Failing after 1m57s
2025-12-16 21:12:21 +01:00
b9add5bda3
cloud(host): set correct new proxy protocol reverse proxy ip 2025-12-16 20:59:15 +01:00
570600fce3
eh22-wiki(host): move to new network and hostname
Some checks failed
/ Ansible Lint (push) Failing after 1m59s
2025-12-16 20:58:05 +01:00
5a476f2103
cloud(host): move to new network and hostname
Some checks failed
/ Ansible Lint (push) Failing after 2m0s
2025-12-16 20:47:44 +01:00
b72dee0d6d
wiki(host): actually have nginx listen on v6
Some checks failed
/ Ansible Lint (push) Failing after 1m58s
2025-12-16 19:52:24 +01:00
8b94a49f5e
wiki(host): move to new network and internal hostname
Some checks failed
/ Ansible Lint (push) Failing after 2m2s
2025-12-16 19:23:33 +01:00
5f98dca56c
router(host): expose public v6 networks
Also prepare for exposing public v4 networks later.
2025-12-16 19:03:36 +01:00
66ee44366b public-reverse-proxy: New IP of wiki VM 2025-12-14 15:39:03 +01:00
183b91b9f2
router(host): add nftables config for basic router functionality
Some checks failed
/ Ansible Lint (push) Failing after 1m56s
2025-12-13 22:07:38 +01:00
d0618e3820
nftables(role): introduce role for deploying nftables 2025-12-13 22:07:37 +01:00
a9e394da06
router(host): add systemd-networkd-based network config 2025-12-13 22:07:37 +01:00
d6ba70523c
systemd_networkd(role): introd. role for deploy. systemd-networkd config 2025-12-13 22:07:35 +01:00
766aa125c4
router(host): introduce router 2025-12-13 22:07:07 +01:00
c39cb0e390
we dont need to set a specific alloy version
Some checks failed
/ Ansible Lint (push) Failing after 2m1s
2025-12-06 22:11:53 +01:00
df3710f019
grafana: set alloy to version v1.11.3
Some checks failed
/ Ansible Lint (push) Failing after 2m4s
1.12.0 is buggy
2025-12-02 22:55:29 +01:00
0eaaf9227c Update all stable non-major dependencies
Some checks failed
/ Ansible Lint (pull_request) Failing after 2m4s
/ Ansible Lint (push) Failing after 2m6s
2025-11-19 13:30:39 +00:00
48 changed files with 382 additions and 54 deletions

View file

@ -24,7 +24,7 @@ jobs:
# work in our environmnet.
# Rather manually setup python (pip) before instead.
- name: Run ansible-lint
uses: https://github.com/ansible/ansible-lint@v25.9.2
uses: https://github.com/ansible/ansible-lint@v25.11.0
with:
setup_python: "false"
requirements_file: "requirements.yml"

View file

@ -1,11 +1,11 @@
# renovate: datasource=docker depName=git.hamburg.ccc.de/ccchh/oci-images/nextcloud
nextcloud__version: 32
# renovate: datasource=docker depName=docker.io/library/postgres
nextcloud__postgres_version: 15.14
nextcloud__postgres_version: 15.15
nextcloud__fqdn: cloud.hamburg.ccc.de
nextcloud__data_dir: /data/nextcloud
nextcloud__extra_configuration: "{{ lookup('ansible.builtin.template', 'resources/chaosknoten/cloud/nextcloud/extra_configuration.config.php.j2') }}"
nextcloud__use_custom_new_user_skeleton: true
nextcloud__custom_new_user_skeleton_directory: "resources/chaosknoten/cloud/nextcloud/new_user_skeleton_directory/"
nextcloud__proxy_protocol_reverse_proxy_ip: 172.31.17.140
nextcloud__proxy_protocol_reverse_proxy_ip: "2a00:14b0:4200:3000:125::1"
nextcloud__certbot_acme_account_email_address: le-admin@hamburg.ccc.de

View file

@ -53,7 +53,6 @@ nginx__configurations:
- name: metrics.hamburg.ccc.de
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/nginx/metrics.hamburg.ccc.de.conf') }}"
alloy_config: |
prometheus.remote_write "default" {
endpoint {

View file

@ -1,5 +1,5 @@
# renovate: datasource=github-releases depName=netbox packageName=netbox-community/netbox
netbox__version: "v4.4.5"
netbox__version: "v4.4.6"
netbox__config: "{{ lookup('ansible.builtin.template', 'resources/chaosknoten/netbox/netbox/configuration.py.j2') }}"
netbox__custom_pipeline_oidc_group_and_role_mapping: true

View file

@ -0,0 +1,2 @@
systemd_networkd__config_dir: 'resources/chaosknoten/router/systemd_networkd/'
nftables__config: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/router/nftables/nftables.conf') }}"

View file

@ -7,13 +7,13 @@ all:
chaosknoten:
ansible_host: chaosknoten.hamburg.ccc.de
cloud:
ansible_host: cloud-intern.hamburg.ccc.de
ansible_host: cloud.hosts.hamburg.ccc.de
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
ansible_ssh_common_args: -J ssh://chaos@router.hamburg.ccc.de
eh22-wiki:
ansible_host: eh22-wiki-intern.hamburg.ccc.de
ansible_host: eh22-wiki.hosts.hamburg.ccc.de
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
ansible_ssh_common_args: -J ssh://chaos@router.hamburg.ccc.de
grafana:
ansible_host: grafana-intern.hamburg.ccc.de
ansible_user: chaos
@ -23,9 +23,9 @@ all:
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
keycloak:
ansible_host: keycloak-intern.hamburg.ccc.de
ansible_host: keycloak.hosts.hamburg.ccc.de
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
ansible_ssh_common_args: -J ssh://chaos@router.hamburg.ccc.de
lists:
ansible_host: lists.hamburg.ccc.de
ansible_user: chaos
@ -37,13 +37,13 @@ all:
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
onlyoffice:
ansible_host: onlyoffice-intern.hamburg.ccc.de
ansible_host: onlyoffice.hosts.hamburg.ccc.de
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
ansible_ssh_common_args: -J ssh://chaos@router.hamburg.ccc.de
pad:
ansible_host: pad-intern.hamburg.ccc.de
ansible_host: pad.hosts.hamburg.ccc.de
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
ansible_ssh_common_args: -J ssh://chaos@router.hamburg.ccc.de
pretalx:
ansible_host: pretalx-intern.hamburg.ccc.de
ansible_user: chaos
@ -51,10 +51,13 @@ all:
public-reverse-proxy:
ansible_host: public-reverse-proxy.hamburg.ccc.de
ansible_user: chaos
wiki:
ansible_host: wiki-intern.hamburg.ccc.de
router:
ansible_host: router.hamburg.ccc.de
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@public-reverse-proxy.hamburg.ccc.de
wiki:
ansible_host: wiki.hosts.hamburg.ccc.de
ansible_user: chaos
ansible_ssh_common_args: -J ssh://chaos@router.hamburg.ccc.de
zammad:
ansible_host: zammad-intern.hamburg.ccc.de
ansible_user: chaos
@ -88,12 +91,19 @@ base_config_hosts:
pad:
pretalx:
public-reverse-proxy:
router:
tickets:
wiki:
zammad:
ntfy:
sunders:
renovate:
systemd_networkd_hosts:
hosts:
router:
nftables_hosts:
hosts:
router:
docker_compose_hosts:
hosts:
ccchoir:
@ -173,6 +183,7 @@ infrastructure_authorized_keys_hosts:
pad:
pretalx:
public-reverse-proxy:
router:
wiki:
zammad:
ntfy:

View file

@ -4,6 +4,16 @@
roles:
- base_config
- name: Ensure systemd-networkd config deployment on systemd_networkd_hosts
hosts: systemd_networkd_hosts
roles:
- systemd_networkd
- name: Ensure nftables deployment on nftables_hosts
hosts: nftables_hosts
roles:
- nftables
- name: Ensure deployment of infrastructure authorized keys
hosts: infrastructure_authorized_keys_hosts
roles:

View file

@ -3,11 +3,12 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
set_real_ip_from 2a00:14b0:4200:3000:125::1;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -2,7 +2,7 @@
services:
prometheus:
image: docker.io/prom/prometheus:v3.7.2
image: docker.io/prom/prometheus:v3.7.3
container_name: prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
@ -19,7 +19,7 @@ services:
- prom_data:/prometheus
alertmanager:
image: docker.io/prom/alertmanager:v0.28.1
image: docker.io/prom/alertmanager:v0.29.0
container_name: alertmanager
command:
- '--config.file=/etc/alertmanager/alertmanager.yaml'
@ -32,7 +32,7 @@ services:
- alertmanager_data:/alertmanager
grafana:
image: docker.io/grafana/grafana:12.2.1
image: docker.io/grafana/grafana:12.3.0
container_name: grafana
ports:
- 3000:3000
@ -59,7 +59,7 @@ services:
- /dev/null:/etc/prometheus/pve.yml
loki:
image: docker.io/grafana/loki:3.5.7
image: docker.io/grafana/loki:3.6.0
container_name: loki
ports:
- 13100:3100

View file

@ -46,7 +46,7 @@ services:
- "8080:8080"
db:
image: docker.io/library/postgres:15.14
image: docker.io/library/postgres:15.15
restart: unless-stopped
networks:
- keycloak

View file

@ -4,11 +4,12 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
set_real_ip_from 2a00:14b0:4200:3000:125::1;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -4,11 +4,12 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
set_real_ip_from 2a00:14b0:4200:3000:125::1;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -7,12 +7,13 @@ server {
##listen [::]:443 ssl http2;
# Listen on a custom port for the proxy protocol.
listen 8444 ssl http2 proxy_protocol;
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
set_real_ip_from 2a00:14b0:4200:3000:125::1;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -1,7 +1,7 @@
---
services:
ntfy:
image: docker.io/binwiederhier/ntfy:v2.14.0
image: docker.io/binwiederhier/ntfy:v2.15.0
container_name: ntfy
command:
- serve

View file

@ -3,11 +3,13 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
set_real_ip_from 2a00:14b0:4200:3000:125::1;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -3,11 +3,12 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
set_real_ip_from 2a00:14b0:4200:3000:125::1;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -15,7 +15,7 @@ services:
- pretalx_net
redis:
image: docker.io/library/redis:8.2.2
image: docker.io/library/redis:8.4.0
restart: unless-stopped
volumes:
- redis:/data

View file

@ -6,27 +6,27 @@ map $host $upstream_acme_challenge_host {
staging.c3cat.de 172.31.17.151:31820;
ccchoir.de ccchoir-intern.hamburg.ccc.de:31820;
www.ccchoir.de ccchoir-intern.hamburg.ccc.de:31820;
cloud.hamburg.ccc.de 172.31.17.143:31820;
cloud.hamburg.ccc.de cloud.hosts.hamburg.ccc.de:31820;
element.hamburg.ccc.de 172.31.17.151:31820;
git.hamburg.ccc.de 172.31.17.154:31820;
grafana.hamburg.ccc.de 172.31.17.145:31820;
hackertours.hamburg.ccc.de 172.31.17.151:31820;
staging.hackertours.hamburg.ccc.de 172.31.17.151:31820;
hamburg.ccc.de 172.31.17.151:31820;
id.hamburg.ccc.de 172.31.17.144:31820;
invite.hamburg.ccc.de 172.31.17.144:31820;
keycloak-admin.hamburg.ccc.de 172.31.17.144:31820;
id.hamburg.ccc.de keycloak.hosts.hamburg.ccc.de:31820;
invite.hamburg.ccc.de keycloak.hosts.hamburg.ccc.de:31820;
keycloak-admin.hamburg.ccc.de keycloak.hosts.hamburg.ccc.de:31820;
matrix.hamburg.ccc.de 172.31.17.150:31820;
mas.hamburg.ccc.de 172.31.17.150:31820;
element-admin.hamburg.ccc.de 172.31.17.151:31820;
netbox.hamburg.ccc.de 172.31.17.167:31820;
onlyoffice.hamburg.ccc.de 172.31.17.147:31820;
pad.hamburg.ccc.de 172.31.17.141:31820;
onlyoffice.hamburg.ccc.de onlyoffice.hosts.hamburg.ccc.de:31820;
pad.hamburg.ccc.de pad.hosts.hamburg.ccc.de:31820;
pretalx.hamburg.ccc.de 172.31.17.157:31820;
spaceapi.hamburg.ccc.de 172.31.17.151:31820;
staging.hamburg.ccc.de 172.31.17.151:31820;
wiki.ccchh.net 172.31.17.146:31820;
wiki.hamburg.ccc.de 172.31.17.146:31820;
wiki.ccchh.net wiki.hosts.hamburg.ccc.de:31820;
wiki.hamburg.ccc.de wiki.hosts.hamburg.ccc.de:31820;
www.hamburg.ccc.de 172.31.17.151:31820;
tickets.hamburg.ccc.de 172.31.17.148:31820;
sunders.hamburg.ccc.de 172.31.17.170:31820;
@ -38,7 +38,7 @@ map $host $upstream_acme_challenge_host {
eh11.easterhegg.eu 172.31.17.151:31820;
eh20.easterhegg.eu 172.31.17.151:31820;
www.eh20.easterhegg.eu 172.31.17.151:31820;
eh22.easterhegg.eu 172.31.17.165:31820;
eh22.easterhegg.eu eh22-wiki.hosts.hamburg.ccc.de:31820;
easterheggxxxx.hamburg.ccc.de 172.31.17.151:31820;
eh2003.hamburg.ccc.de 172.31.17.151:31820;
www.eh2003.hamburg.ccc.de 172.31.17.151:31820;

View file

@ -20,16 +20,16 @@ stream {
map $ssl_preread_server_name $address {
ccchoir.de ccchoir-intern.hamburg.ccc.de:8443;
www.ccchoir.de ccchoir-intern.hamburg.ccc.de:8443;
cloud.hamburg.ccc.de cloud-intern.hamburg.ccc.de:8443;
pad.hamburg.ccc.de pad-intern.hamburg.ccc.de:8443;
cloud.hamburg.ccc.de cloud.hosts.hamburg.ccc.de:8443;
pad.hamburg.ccc.de pad.hosts.hamburg.ccc.de:8443;
pretalx.hamburg.ccc.de pretalx-intern.hamburg.ccc.de:8443;
id.hamburg.ccc.de 172.31.17.144:8443;
invite.hamburg.ccc.de 172.31.17.144:8443;
keycloak-admin.hamburg.ccc.de 172.31.17.144:8444;
id.hamburg.ccc.de keycloak.hosts.hamburg.ccc.de:8443;
invite.hamburg.ccc.de keycloak.hosts.hamburg.ccc.de:8443;
keycloak-admin.hamburg.ccc.de keycloak.hosts.hamburg.ccc.de:8443;
grafana.hamburg.ccc.de 172.31.17.145:8443;
wiki.ccchh.net 172.31.17.146:8443;
wiki.hamburg.ccc.de 172.31.17.146:8443;
onlyoffice.hamburg.ccc.de 172.31.17.147:8443;
wiki.ccchh.net wiki.hosts.hamburg.ccc.de:8443;
wiki.hamburg.ccc.de wiki.hosts.hamburg.ccc.de:8443;
onlyoffice.hamburg.ccc.de onlyoffice.hosts.hamburg.ccc.de:8443;
hackertours.hamburg.ccc.de 172.31.17.151:8443;
staging.hackertours.hamburg.ccc.de 172.31.17.151:8443;
netbox.hamburg.ccc.de 172.31.17.167:8443;
@ -56,7 +56,7 @@ stream {
eh11.easterhegg.eu 172.31.17.151:8443;
eh20.easterhegg.eu 172.31.17.151:8443;
www.eh20.easterhegg.eu 172.31.17.151:8443;
eh22.easterhegg.eu 172.31.17.165:8443;
eh22.easterhegg.eu eh22-wiki.hosts.hamburg.ccc.de:8443;
easterheggxxxx.hamburg.ccc.de 172.31.17.151:8443;
eh2003.hamburg.ccc.de 172.31.17.151:8443;
www.eh2003.hamburg.ccc.de 172.31.17.151:8443;

View file

@ -0,0 +1,79 @@
#!/usr/sbin/nft -f
## Variables
# Interfaces
define if_net1_v4_wan = "net1"
define if_net2_v6_wan = "net2"
define if_net0_2_v4_nat = "net0.2"
define if_net0_3_ci_runner = "net0.3"
# Interface Groups
define wan_ifs = { $if_net1_v4_wan,
$if_net2_v6_wan }
define lan_ifs = { $if_net0_2_v4_nat,
$if_net0_3_ci_runner }
# define v4_exposed_ifs = { }
define v6_exposed_ifs = { $if_net0_2_v4_nat }
## Rules
table inet reverse-path-forwarding {
chain rpf-filter {
type filter hook prerouting priority mangle + 10; policy drop;
# Only allow packets if their source address is routed via their incoming interface.
# https://github.com/NixOS/nixpkgs/blob/d9d87c51960050e89c79e4025082ed965e770d68/nixos/modules/services/networking/firewall-nftables.nix#L100
fib saddr . mark . iif oif exists accept
}
}
table inet host {
chain input {
type filter hook input priority filter; policy drop;
iifname "lo" accept comment "allow loopback"
ct state invalid drop
ct state established,related accept
ip protocol icmp accept
ip6 nexthdr icmpv6 accept
# Allow SSH access.
tcp dport 22 accept comment "allow ssh access"
# Allow DHCP server access.
iifname { $if_net0_2_v4_nat, $if_net0_3_ci_runner } udp dport 67 accept comment "allow dhcp server access"
}
}
table ip v4nat {
chain prerouting {
type nat hook prerouting priority dstnat; policy accept;
}
chain postrouting {
type nat hook postrouting priority srcnat; policy accept;
oifname $if_net1_v4_wan masquerade
}
}
table inet forward {
chain forward {
type filter hook forward priority filter; policy drop;
ct state invalid drop
ct state established,related accept
# Allow internet access.
meta nfproto ipv6 iifname $lan_ifs oifname $if_net2_v6_wan accept comment "allow v6 internet access"
meta nfproto ipv4 iifname $lan_ifs oifname $if_net1_v4_wan accept comment "allow v4 internet access"
# Allow access to exposed networks from internet.
# meta nfproto ipv4 oifname $v4_exposed_ifs accept comment "allow v4 exposed network access"
meta nfproto ipv6 oifname $v6_exposed_ifs accept comment "allow v6 exposed network access"
}
}

View file

@ -0,0 +1,6 @@
[Match]
MACAddress=BC:24:11:54:11:15
Type=ether
[Link]
Name=net0

View file

@ -0,0 +1,6 @@
[Match]
MACAddress=BC:24:11:9A:FB:34
Type=ether
[Link]
Name=net1

View file

@ -0,0 +1,6 @@
[Match]
MACAddress=BC:24:11:AE:C7:04
Type=ether
[Link]
Name=net2

View file

@ -0,0 +1,7 @@
[NetDev]
Name=net0.2
Kind=vlan
[VLAN]
Id=2

View file

@ -0,0 +1,7 @@
[NetDev]
Name=net0.3
Kind=vlan
[VLAN]
Id=3

View file

@ -0,0 +1,12 @@
[Match]
Name=net0
[Link]
RequiredForOnline=no
[Network]
VLAN=net0.2
VLAN=net0.3
LinkLocalAddressing=no

View file

@ -0,0 +1,14 @@
[Match]
Name=net1
[Network]
DNS=212.12.50.158
IPForward=ipv4
IPv6AcceptRA=no
[Address]
Address=212.12.48.123/24
[Route]
Gateway=212.12.48.55

View file

@ -0,0 +1,14 @@
[Match]
Name=net2
[Network]
#DNS=212.12.50.158
IPForward=ipv6
IPv6AcceptRA=no
[Address]
Address=2a00:14b0:4200:3500::130:2/112
[Route]
Gateway=2a00:14b0:4200:3500::130:1

View file

@ -0,0 +1,29 @@
[Match]
Name=net0.2
Type=vlan
[Link]
RequiredForOnline=no
[Network]
Description=v4-NAT
# Masquerading done in nftables (nftables.conf).
IPv6SendRA=yes
DHCPServer=true
[DHCPServer]
PoolOffset=100
PoolSize=150
[Address]
Address=10.32.2.1/24
[IPv6SendRA]
UplinkInterface=net2
[IPv6Prefix]
Prefix=2a00:14b0:42:102::/64
Assign=true
Token=static:::1

View file

@ -0,0 +1,29 @@
[Match]
Name=net0.3
Type=vlan
[Link]
RequiredForOnline=no
[Network]
Description=ci-runners
# Masquerading done in nftables (nftables.conf).
IPv6SendRA=yes
DHCPServer=true
[DHCPServer]
PoolOffset=100
PoolSize=150
[Address]
Address=10.32.3.1/24
[IPv6SendRA]
UplinkInterface=net2
[IPv6Prefix]
Prefix=2a00:14b0:42:103::/64
Assign=true
Token=static:::1

View file

@ -13,7 +13,7 @@ services:
restart: unless-stopped
redis:
image: docker.io/library/redis:7.4.6
image: docker.io/library/redis:8.4.0
ports:
- "6379:6379"
volumes:

View file

@ -3,11 +3,12 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
set_real_ip_from 2a00:14b0:4200:3000:125::1;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
@ -21,6 +22,6 @@ server {
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
return 302 https://wiki.hamburg.ccc.de$request_uri;
}

View file

@ -3,11 +3,12 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
set_real_ip_from 2a00:14b0:4200:3000:125::1;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;

View file

@ -3,6 +3,7 @@
- name: ensure apt dependencies are installed
ansible.builtin.apt:
name:
- python3-pip
- virtualenv
- git
state: present

View file

@ -0,0 +1,13 @@
# Ensure the ssh module is disabled, so a cloud-init config change doesn't regenerate the host keys for no reason.
- name: check if cloud-init config file exists
ansible.builtin.stat:
path: /etc/cloud/cloud.cfg
register: base_config__stat_cloud_cfg
- name: ensure the cloud-init ssh module is disabled
ansible.builtin.replace:
path: /etc/cloud/cloud.cfg
regexp: " - ssh$"
replace: " #- ssh"
become: true
when: base_config__stat_cloud_cfg.stat.exists

View file

@ -7,3 +7,4 @@ dependencies:
major_versions:
- 11
- 12
- 13

View file

@ -7,3 +7,4 @@ dependencies:
major_versions:
- 11
- 12
- 13

View file

@ -7,3 +7,4 @@ dependencies:
major_versions:
- 11
- 12
- 13

View file

@ -4,6 +4,7 @@
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
listen [::]:8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.

11
roles/nftables/README.md Normal file
View file

@ -0,0 +1,11 @@
# Role `nftables`
Deploys nftables.
## Support Distributions
Should work on Debian-based distributions.
## Required Arguments
- `nftables__config`: nftables configuration to deploy.

View file

@ -0,0 +1,5 @@
- name: Restart nftables service
ansible.builtin.systemd_service:
name: nftables
state: restarted
become: true

View file

@ -0,0 +1,6 @@
argument_specs:
main:
options:
nftables__config:
type: str
required: true

View file

@ -0,0 +1,15 @@
- name: ensure nftables is installed
ansible.builtin.apt:
name: nftables
state: present
become: true
- name: deploy nftables configuration
ansible.builtin.copy:
content: "{{ nftables__config }}"
dest: "/etc/nftables.conf"
mode: "0644"
owner: root
group: root
become: true
notify: Restart nftables service

View file

@ -7,3 +7,4 @@ dependencies:
major_versions:
- "11"
- "12"
- "13"

View file

@ -7,3 +7,4 @@ dependencies:
major_versions:
- "11"
- "12"
- "13"

View file

@ -0,0 +1,11 @@
# Role `systemd_networkd`
Deploys the given systemd-networkd configuration files.
## Support Distributions
Should work on Debian-based distributions.
## Required Arguments
- `systemd_networkd__config_dir`: Directory with systemd-networkd configs to deploy.

View file

@ -0,0 +1,6 @@
argument_specs:
main:
options:
systemd_networkd__config_dir:
type: path
required: true

View file

@ -0,0 +1,14 @@
- name: ensure rsync is installed
ansible.builtin.apt:
name: rsync
state: present
become: true
- name: synchronize systemd-networkd configs
ansible.posix.synchronize:
src: "{{ systemd_networkd__config_dir }}"
dest: "/etc/systemd/network"
archive: false
recursive: true
delete: true
become: true