reorganize (config) files and templates into one "resources" dir
All checks were successful
/ Ansible Lint (push) Successful in 1m39s

This groups the files and templates for each host together and therefore
makes it easier to see all the (config) files for a host.

Also clean up incorrect, unused docker_compose config for mumble and
clean up unused engelsystem configs.
This commit is contained in:
June 2024-12-05 22:18:27 +01:00
commit d0a28589c6
Signed by: june
SSH key fingerprint: SHA256:o9EAq4Y9N9K0pBQeBTqhSDrND5E7oB+60ZNx0U1yPe0
83 changed files with 62 additions and 121 deletions

View file

@ -0,0 +1,45 @@
---
# see https://github.com/hedgedoc/container/blob/master/docker-compose.yml
services:
database:
image: docker.io/library/mariadb:11
environment:
- "MARIADB_DATABASE=wordpress"
- "MARIADB_ROOT_PASSWORD={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/ccchoir/DB_ROOT_PASSWORD", create=false, missing="error") }}"
- "MARIADB_PASSWORD={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/ccchoir/DB_PASSWORD", create=false, missing="error") }}"
- "MARIADB_USER=wordpress"
- "MARIADB_AUTO_UPGRADE=yes"
volumes:
- database:/var/lib/mysql
networks:
backend:
restart: unless-stopped
app:
image: docker.io/library/wordpress:6-php8.1
environment:
- "WORDPRESS_DB_HOST=database"
- "WORDPRESS_DB_NAME=wordpress"
- "WORDPRESS_DB_USER=wordpress"
- "WORDPRESS_TABLE_PREFIX=wp_"
- "WORDPRESS_DB_PASSWORD={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/ccchoir/DB_PASSWORD", create=false, missing="error") }}"
volumes:
- wordpress:/var/www/html/wp-content
ports:
- "127.0.0.1:3000:80"
networks:
backend:
frontend:
restart: unless-stopped
depends_on:
- database
volumes:
database: {}
wordpress: {}
networks:
backend:
internal: true
frontend:

View file

@ -0,0 +1,83 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name ccchoir.de;
ssl_certificate /etc/letsencrypt/live/ccchoir.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/ccchoir.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/ccchoir.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 443;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
# Hide the X-Forwarded header.
proxy_hide_header X-Forwarded;
# Assume we are the only Reverse Proxy (well using Proxy Protocol, but that
# is transparent).
# Also provide "_hidden" for by, since it's not relevant.
proxy_set_header Forwarded "for=$remote_addr;proto=https;host=$host;by=_hidden";
location / {
proxy_pass http://127.0.0.1:3000/;
}
}
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name www.ccchoir.de;
ssl_certificate /etc/letsencrypt/live/www.ccchoir.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/www.ccchoir.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/www.ccchoir.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 443;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
# Hide the X-Forwarded header.
proxy_hide_header X-Forwarded;
# Assume we are the only Reverse Proxy (well using Proxy Protocol, but that
# is transparent).
# Also provide "_hidden" for by, since it's not relevant.
proxy_set_header Forwarded "for=$remote_addr;proto=https;host=$host;by=_hidden";
location / {
proxy_pass http://127.0.0.1:3000/;
}
}

View file

@ -0,0 +1,98 @@
<?php
$CONFIG = array (
'memcache.local' => '\\OC\\Memcache\\APCu',
'apps_paths' =>
array (
0 =>
array (
'path' => '/var/www/html/apps',
'url' => '/apps',
'writable' => false,
),
1 =>
array (
'path' => '/var/www/html/custom_apps',
'url' => '/custom_apps',
'writable' => true,
),
),
'instanceid' => 'oc9uqhr7buka',
'passwordsalt' => 'SK2vmQeTEHrkkwx9K+hC1WX33lPJDs',
'secret' => '3dBt5THD2ehg0yWdVDAvMmsY8yLtrfk/gE560lkMqYqgh6lu',
'trusted_domains' =>
array (
0 => 'cloud.hamburg.ccc.de',
),
'datadirectory' => '/var/www/html/data',
'dbtype' => 'mysql',
'version' => '25.0.9.2',
'overwrite.cli.url' => 'https://cloud.hamburg.ccc.de',
'dbname' => 'nextcloud',
'dbhost' => 'database',
'dbport' => '',
'dbtableprefix' => 'oc_',
'mysql.utf8mb4' => true,
'dbuser' => 'nextcloud',
'dbpassword' => 'TdBLMQQeKbz1zab3sySUsGxo3',
'installed' => true,
// Some Nextcloud options that might make sense here
'allow_user_to_change_display_name' => false,
'lost_password_link' => 'disabled',
// URL of provider. All other URLs are auto-discovered from .well-known
'oidc_login_provider_url' => 'https://id.ccchh.net/realms/ccchh',
// Client ID and secret registered with the provider
'oidc_login_client_id' => 'cloud',
'oidc_login_client_secret' => '{{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/cloud/kc-client-secret", create=false, missing="error") }}',
// Automatically redirect the login page to the provider
'oidc_login_auto_redirect' => true,
// Redirect to this page after logging out the user
//'oidc_login_logout_url' => 'https://openid.example.com/thankyou',
// If set to true the user will be redirected to the
// logout endpoint of the OIDC provider after logout
// in Nextcloud. After successfull logout the OIDC
// provider will redirect back to 'oidc_login_logout_url' (MUST be set).
'oidc_login_end_session_redirect' => true,
// Quota to assign if no quota is specified in the OIDC response (bytes)
//
// NOTE: If you want to allow NextCloud to manage quotas, omit this option. Do not set it to
// zero or -1 or ''.
'oidc_login_default_quota' => '1000000000',
// Login button text
'oidc_login_button_text' => 'Log in via id.ccchh.net',
// Hide the NextCloud password change form.
'oidc_login_hide_password_form' => false,
// Use ID Token instead of UserInfo
'oidc_login_use_id_token' => false,
'oidc_login_attributes' => array (
'id' => 'preferred_username',
'name' => 'name',
'mail' => 'email',
'quota' => 'ownCloudQuota',
'home' => 'homeDirectory',
'ldap_uid' => 'uid',
'groups' => 'ownCloudGroups',
'login_filter' => 'realm_access_roles',
'photoURL' => 'picture',
'is_admin' => 'ownCloudAdmin',
),
// Default group to add users to (optional, defaults to nothing)
//'oidc_login_default_group' => 'oidc',
'oidc_login_filter_allowed_values' => null,
// Set OpenID Connect scope
'oidc_login_scope' => 'openid profile',
// The `id` attribute in `oidc_login_attributes` must return the
// "Internal Username" (see expert settings in LDAP integration)
'oidc_login_proxy_ldap' => false,
// Fallback to direct login if login from OIDC fails
// Note that no error message will be displayed if enabled
'oidc_login_disable_registration' => false,
//'oidc_login_redir_fallback' => false,
// If you get your groups from the oidc_login_attributes, you might want
// to create them if they are not already existing, Default is `false`.
'oidc_create_groups' => true,
// Enable use of WebDAV via OIDC bearer token.
'oidc_login_webdav_enabled' => true,
// Enable authentication with user/password for DAV clients that do not
// support token authentication (e.g. DAVx⁵)
'oidc_login_password_authentication' => false,
);

View file

@ -0,0 +1,17 @@
<?php
$CONFIG = array (
'default_phone_region' => 'DE',
'hide_login_form' => true,
'mail_smtpmode' => 'smtp',
'mail_smtphost' => 'cow.hamburg.ccc.de',
'mail_smtpport' => 465,
'mail_smtpsecure' => 'ssl',
'mail_smtpauth' => true,
'mail_smtpauthtype' => 'LOGIN',
'mail_smtpname' => 'no-reply@cloud.hamburg.ccc.de',
'mail_from_address' => 'no-reply',
'mail_domain' => 'cloud.hamburg.ccc.de',
'mail_smtppassword' => '{{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/cloud/smtp_password", create=false, missing="error") }}',
'mail_smtpdebug' => true,
'maintenance_window_start' => 1,
);

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1,007 KiB

View file

@ -0,0 +1,10 @@
# CCCHH Nextcloud
Willkommen auf der CCCHH Nextcloud Instanz.
Hier kannst du Dateien ablegen und teilen, Termine verwalten und vieles mehr.
Weitere Infos:
- <https://wiki.ccchh.net/infrastructure:services:cloud>
- <https://docs.nextcloud.com/server/latest/user_manual/de/>

View file

@ -0,0 +1,40 @@
# Links & References:
# - https://prometheus.io/docs/alerting/latest/configuration/
# - https://github.com/prometheus/alertmanager/blob/48a99764a1fc9279fc828de83e7a03ae2219abc7/doc/examples/simple.yml
route:
group_by: ["alertname", "site", "type", "hypervisor"]
group_wait: 30s
group_interval: 5m
repeat_interval: 3h
receiver: ccchh-infrastructure-alerts
{# Disable these for now, but might be interesting in the future.
# Inhibition rules allow to mute a set of alerts given that another alert is
# firing.
# We use this to mute any warning-level notifications if the same alert is
# already critical.
inhibit_rules:
- source_matchers: [severity="critical"]
target_matchers: [severity="warning"]
# Apply inhibition if the alertname is the same.
# CAUTION:
# If all label names listed in `equal` are missing
# from both the source and target alerts,
# the inhibition rule will apply!
equal: [alertname, cluster, service] #}
templates:
- "/etc/alertmanager/templates/*.tmpl"
receivers:
- name: "ccchh-infrastructure-alerts"
telegram_configs:
- send_resolved: true
bot_token: {{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/grafana/alertmanager_telegram_bot_token", create=false, missing="error") }}
chat_id: -1002434372415
parse_mode: HTML
message: {{ "'{{ template \"alert-message.telegram.ccchh\" . }}'" }}

View file

@ -0,0 +1,35 @@
{{/*
Links & Resources
- https://prometheus.io/blog/2016/03/03/custom-alertmanager-templates/
- https://prometheus.io/docs/alerting/latest/notifications/
- https://gist.github.com/jidckii/5ac5f8f20368b56de72af70222509b7b
*/}}
{{ define "alert-item.telegram.ccchh.internal" }}
<b>[{{ .Labels.alertname }}] {{ .Labels.nodename }}</b>
{{- if .Annotations.summary }}
<i>Summary</i>: {{ .Annotations.summary }}
{{- end }}
{{- if .Annotations.description }}
<i>Description</i>: {{ .Annotations.description }}
{{- end }}
<i>Labels</i>:
{{ range .Labels.SortedPairs -}}
• <i>{{ .Name }}</i>: <code>{{ .Value }}</code>
{{ end }}
{{- end }}
{{ define "alert-message.telegram.ccchh" }}
{{- if .Alerts.Firing }}
<u>🔥{{ len .Alerts.Firing }} Alert(/s) Firing 🔥</u>
{{ range .Alerts.Firing -}}
{{ template "alert-item.telegram.ccchh.internal" . }}
{{- end }}
{{- end }}
{{- if .Alerts.Resolved }}
<u>✅{{ len .Alerts.Resolved }} Alert(/s) Resolved ✅</u>
{{ range .Alerts.Resolved -}}
{{ template "alert-item.telegram.ccchh.internal" . }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,61 @@
---
services:
prometheus:
image: prom/prometheus
container_name: prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
ports:
- 9090:9090
restart: unless-stopped
volumes:
- ./configs/prometheus.yml:/etc/prometheus/prometheus.yml
- ./configs/prometheus_alerts.rules.yaml:/etc/prometheus/rules/alerts.rules.yaml
- prom_data:/prometheus
alertmanager:
image: prom/alertmanager
container_name: alertmanager
command:
- '--config.file=/etc/alertmanager/alertmanager.yaml'
ports:
- 9093:9093
restart: unless-stopped
volumes:
- ./configs/alertmanager.yaml:/etc/alertmanager/alertmanager.yaml
- ./configs/alertmanager_alert_templates.tmpl:/etc/alertmanager/templates/alert_templates.tmpl
- alertmanager_data:/alertmanager
grafana:
image: grafana/grafana
container_name: grafana
ports:
- 3000:3000
restart: unless-stopped
environment:
- GF_SECURITY_ADMIN_USER=admin
- "GF_SECURITY_ADMIN_PASSWORD={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/grafana/GF_SECURITY_ADMIN_PASSWORD", create=false, missing="error") }}"
volumes:
- ./configs/grafana.ini:/etc/grafana/grafana.ini
- ./configs/grafana-datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml
- graf_data:/var/lib/grafana
pve-exporter:
image: prompve/prometheus-pve-exporter
container_name: pve-exporter
ports:
- 9221:9221
restart: unless-stopped
environment:
- PVE_USER=grafana@pve
- "PVE_PASSWORD={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/grafana/prometheus-exporter", create=false, missing="error") }}"
- PVE_VERIFY_SSL=false
volumes:
- /dev/null:/etc/prometheus/pve.yml
volumes:
graf_data: {}
prom_data: {}
alertmanager_data: {}

View file

@ -0,0 +1,9 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus:9090
isDefault: true
access: proxy
editable: true

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,25 @@
[server]
root_url = https://grafana.hamburg.ccc.de
[auth]
disable_login_form = true
# https://grafana.com/docs/grafana/latest/setup-grafana/configure-security/configure-authentication/keycloak/
[auth.generic_oauth]
enabled = true
auto_login = true
name = id.hamburg.ccc.de
allow_sign_up = true
client_id = grafana
client_secret = {{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/grafana/KEYCLOAK_SECRET", create=false, missing="error") }}
scopes = openid email profile offline_access roles
email_attribute_path = email
login_attribute_path = username
name_attribute_path = full_name
auth_url = https://id.hamburg.ccc.de/realms/ccchh/protocol/openid-connect/auth
token_url = https://id.hamburg.ccc.de/realms/ccchh/protocol/openid-connect/token
api_url = https://id.hamburg.ccc.de/realms/ccchh/protocol/openid-connect/userinfo
signout_redirect_url = https://id.hamburg.ccc.de/realms/ccchh/protocol/openid-connect/logout
role_attribute_path = "contains(roles[*], 'grafanaadmin') && 'GrafanaAdmin' || contains(roles[*], 'admin') && 'Admin' || contains(roles[*], 'editor') && 'Editor' || 'Viewer'"
allow_assign_grafana_admin = true
use_refresh_token = true

View file

@ -0,0 +1,114 @@
global:
scrape_interval: 15s
scrape_timeout: 10s
evaluation_interval: 15s
alerting:
alertmanagers:
- scheme: http
timeout: 10s
static_configs:
- targets:
- "alertmanager:9093"
rule_files:
- "/etc/prometheus/rules/*.rules.yaml"
scrape_configs:
- job_name: prometheus
honor_timestamps: true
metrics_path: /metrics
scheme: http
static_configs:
- targets:
- localhost:9090
- job_name: alertmanager
honor_timestamps: true
metrics_path: /metrics
scheme: http
static_configs:
- targets:
- alertmanager:9093
- job_name: c3lingo
honor_timestamps: true
scrape_interval: 5s
scrape_timeout: 1s
metrics_path: /mumblestats/metrics
scheme: https
static_configs:
- targets:
- mumble.c3lingo.org:443
- job_name: mumble
honor_timestamps: true
scrape_interval: 5s
scrape_timeout: 1s
metrics_path: /metrics
scheme: https
static_configs:
- targets:
- mumble.hamburg.ccc.de:443
- job_name: opnsense-ccchh
honor_timestamps: true
metrics_path: /metrics
scheme: http
static_configs:
- targets:
- 185.161.129.132:9100
- job_name: jitsi
honor_timestamps: true
scrape_interval: 5s
scrape_timeout: 1s
metrics_path: /metrics
scheme: http
static_configs:
- targets:
- jitsi.hamburg.ccc.de:9888 # Jitsi Video Bridge
- job_name: 'pve'
static_configs:
- targets:
- 212.12.48.126 # chaosknoten
metrics_path: /pve
params:
module: [ default ]
cluster: [ '1' ]
node: [ '1' ]
relabel_configs:
- source_labels: [ __address__ ]
target_label: __param_target
- source_labels: [ __param_target ]
target_label: instance
- target_label: __address__
replacement: pve-exporter:9221
- job_name: hosts
static_configs:
# Wieske Chaosknoten VMs
- labels:
site: wieske
type: virtual_machine
hypervisor: chaosknoten
targets:
- netbox-intern.hamburg.ccc.de:9100
- matrix-intern.hamburg.ccc.de:9100
- public-web-static-intern.hamburg.ccc.de:9100
- git-intern.hamburg.ccc.de:9100
- forgejo-actions-runner-intern.hamburg.ccc.de:9100
- eh22-wiki-intern.hamburg.ccc.de:9100
- nix-box-june-intern.hamburg.ccc.de:9100
- mjolnir-intern.hamburg.ccc.de:9100
- woodpecker-intern.hamburg.ccc.de:9100
- penpot-intern.hamburg.ccc.de:9100
- jitsi.hamburg.ccc.de:9100
- onlyoffice-intern.hamburg.ccc.de:9100
- ccchoir-intern.hamburg.ccc.de:9100
- tickets-intern.hamburg.ccc.de:9100
- keycloak-intern.hamburg.ccc.de:9100
- onlyoffice-intern.hamburg.ccc.de:9100
- pad-intern.hamburg.ccc.de:9100
- wiki-intern.hamburg.ccc.de:9100
- zammad-intern.hamburg.ccc.de:9100
- pretalx-intern.hamburg.ccc.de:9100
- labels:
site: wieske
type: physical_machine
targets:
- chaosknoten.hamburg.ccc.de:9100

View file

@ -0,0 +1,583 @@
# Links & Resources:
# - https://samber.github.io/awesome-prometheus-alerts/rules
groups:
- name: node-exporter
rules:
- alert: HostOutOfMemory
expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host out of memory (instance {{ $labels.instance }})
description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}"
- alert: HostMemoryUnderMemoryPressure
expr: (rate(node_vmstat_pgmajfault[1m]) > 1000) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host memory under memory pressure (instance {{ $labels.instance }})
description: "The node is under heavy memory pressure. High rate of major page faults\n VALUE = {{ $value }}"
# You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly
- alert: HostMemoryIsUnderutilized
expr: (100 - (avg_over_time(node_memory_MemAvailable_bytes[30m]) / node_memory_MemTotal_bytes * 100) < 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 1w
labels:
severity: info
annotations:
summary: Host Memory is underutilized (instance {{ $labels.instance }})
description: "Node memory is < 10% for 1 week. Consider reducing memory space. (instance {{ $labels.instance }})\n VALUE = {{ $value }}"
- alert: HostUnusualNetworkThroughputIn
expr: (sum by (instance) (rate(node_network_receive_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual network throughput in (instance {{ $labels.instance }})
description: "Host network interfaces are probably receiving too much data (> 100 MB/s)\n VALUE = {{ $value }}"
- alert: HostUnusualNetworkThroughputOut
expr: (sum by (instance) (rate(node_network_transmit_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m
labels:
severity: warning
annotations:
summary: Host unusual network throughput out (instance {{ $labels.instance }})
description: "Host network interfaces are probably sending too much data (> 100 MB/s)\n VALUE = {{ $value }}"
# Have different disk read and write rate alerts for VMs and physical machines.
- alert: VirtualHostUnusualDiskReadRate
expr: (sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{ype="virtual_machine", nodename=~".+", nodename!="forgejo-actions-runner", nodename!="woodpecker"}
for: 5m
labels:
severity: warning
annotations:
summary: Virtual host unusual disk read rate (instance {{ $labels.instance }})
description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}"
- alert: VirtualHostUnusualDiskWriteRate
expr: (sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{type="virtual_machine", nodename=~".+", nodename!="forgejo-actions-runner", nodename!="woodpecker"}
for: 2m
labels:
severity: warning
annotations:
summary: Virtual host unusual disk write rate (instance {{ $labels.instance }})
description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}"
# Some VMs are expected to have high Read / Write rates z.B. CI servers
- alert: VirtualHostUnusualDiskReadRate
expr: (sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{ype="virtual_machine", nodename="forgejo-actions-runner", nodename="woodpecker"}
for: 10m
labels:
severity: warning
annotations:
summary: Virtual host unusual disk read rate for 10 min (instance {{ $labels.instance }})
description: "Disk is probably reading too much data (> 50 MB/s)\n VALUE = {{ $value }}"
- alert: VirtualHostUnusualDiskWriteRate
expr: (sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 50) * on(instance) group_left (nodename) node_uname_info{type="virtual_machine", nodename="forgejo-actions-runner", nodename="woodpecker"}
for: 4m
labels:
severity: warning
annotations:
summary: Virtual host unusual disk write rate for 4 min (instance {{ $labels.instance }})
description: "Disk is probably writing too much data (> 50 MB/s)\n VALUE = {{ $value }}"
- alert: PhysicalHostUnusualDiskReadRate
expr: (sum by (instance) (rate(node_disk_read_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{type="physical_machine", nodename=~".+"}
for: 20m
labels:
severity: warning
annotations:
summary: Physical host unusual disk read rate (instance {{ $labels.instance }})
description: "Disk is probably reading too much data (> 100 MB/s)\n VALUE = {{ $value }}"
- alert: PhysicalHostUnusualDiskWriteRate
expr: (sum by (instance) (rate(node_disk_written_bytes_total[2m])) / 1024 / 1024 > 100) * on(instance) group_left (nodename) node_uname_info{type="physical_machine", nodename=~".+"}
for: 15m
labels:
severity: warning
annotations:
summary: Physical host unusual disk write rate (instance {{ $labels.instance }})
description: "Disk is probably writing too much data (> 100 MB/s)\n VALUE = {{ $value }}"
# Please add ignored mountpoints in node_exporter parameters like
# "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)".
# Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users.
- alert: HostOutOfDiskSpace
expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host out of disk space (instance {{ $labels.instance }})
description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}"
# Please add ignored mountpoints in node_exporter parameters like
# "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)".
# Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users.
- alert: HostDiskWillFillIn24Hours
expr: ((node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) predict_linear(node_filesystem_avail_bytes{fstype!~"tmpfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host disk will fill in 24 hours (instance {{ $labels.instance }})
description: "Filesystem is predicted to run out of space within the next 24 hours at current write rate\n VALUE = {{ $value }}"
- alert: HostOutOfInodes
expr: (node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host out of inodes (instance {{ $labels.instance }})
description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{ $value }}"
- alert: HostInodesWillFillIn24Hours
expr: (node_filesystem_files_free{fstype!="msdosfs"} / node_filesystem_files{fstype!="msdosfs"} * 100 < 10 and predict_linear(node_filesystem_files_free{fstype!="msdosfs"}[1h], 24 * 3600) < 0 and ON (instance, device, mountpoint) node_filesystem_readonly{fstype!="msdosfs"} == 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host inodes will fill in 24 hours (instance {{ $labels.instance }})
description: "Filesystem is predicted to run out of inodes within the next 24 hours at current write rate\n VALUE = {{ $value }}"
- alert: HostFilesystemDeviceError
expr: node_filesystem_device_error == 1
for: 2m
labels:
severity: critical
annotations:
summary: Host filesystem device error (instance {{ $labels.instance }})
description: "{{ $labels.instance }}: Device error with the {{ $labels.mountpoint }} filesystem\n VALUE = {{ $value }}"
- alert: HostUnusualDiskReadLatency
expr: (rate(node_disk_read_time_seconds_total[1m]) / rate(node_disk_reads_completed_total[1m]) > 0.1 and rate(node_disk_reads_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk read latency (instance {{ $labels.instance }})
description: "Disk latency is growing (read operations > 100ms)\n VALUE = {{ $value }}"
- alert: HostUnusualDiskWriteLatency
expr: (rate(node_disk_write_time_seconds_total[1m]) / rate(node_disk_writes_completed_total[1m]) > 0.1 and rate(node_disk_writes_completed_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host unusual disk write latency (instance {{ $labels.instance }})
description: "Disk latency is growing (write operations > 100ms)\n VALUE = {{ $value }}"
- alert: HostHighCpuLoad
expr: (sum by (instance) (avg by (mode, instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 10m
labels:
severity: warning
annotations:
summary: Host high CPU load (instance {{ $labels.instance }})
description: "CPU load is > 80%\n VALUE = {{ $value }}"
# We might want to introduce that later, tho maybe excluding hosts with one core, if possible and only for VMs?
# # You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly
# - alert: HostCpuIsUnderutilized
# expr: (100 - (rate(node_cpu_seconds_total{mode="idle"}[30m]) * 100) < 20) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
# for: 1w
# labels:
# severity: info
# annotations:
# summary: Host CPU is underutilized (instance {{ $labels.instance }})
# description: "CPU load is < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{ $value }}"
- alert: HostCpuStealNoisyNeighbor
expr: (avg by(instance) (rate(node_cpu_seconds_total{mode="steal"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: warning
annotations:
summary: Host CPU steal noisy neighbor (instance {{ $labels.instance }})
description: "CPU steal is > 10%. A noisy neighbor is killing VM performances or a spot instance may be out of credit.\n VALUE = {{ $value }}"
- alert: HostCpuHighIowait
expr: (avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) * 100 > 10) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: warning
annotations:
summary: Host CPU high iowait (instance {{ $labels.instance }})
description: "CPU iowait > 10%. A high iowait means that you are disk or network bound.\n VALUE = {{ $value }}"
# Have different disk IO alerts for VMs and physical machines and for physical machines different ones for hard and other disks.
- alert: PhysicalHostUnusualHardDiskIo
expr: (rate(node_disk_io_time_seconds_total{device=~"s.+"}[1m]) > 0.75) * on(instance) group_left (nodename) node_uname_info{type="physical_machine", nodename=~".+"}
for: 5m
labels:
severity: warning
annotations:
summary: Physical host unusual hard disk IO (instance {{ $labels.instance }})
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}"
- alert: PhysicalHostUnusualOtherDiskIo
expr: (rate(node_disk_io_time_seconds_total{device!~"s.+"}[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{type="physical_machine", nodename=~".+"}
for: 5m
labels:
severity: warning
annotations:
summary: Physical host unusual other (non-hard) disk IO (instance {{ $labels.instance }})
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}"
- alert: VirtualHostUnusualDiskIo
expr: (rate(node_disk_io_time_seconds_total[1m]) > 0.5) * on(instance) group_left (nodename) node_uname_info{type="virtual_machine", nodename=~".+"}
for: 5m
labels:
severity: warning
annotations:
summary: Virtual host unusual disk IO (instance {{ $labels.instance }})
description: "Time spent in IO is too high on {{ $labels.instance }}. Check storage for issues.\n VALUE = {{ $value }}"
# # x2 context switches is an arbitrary number.
# # The alert threshold depends on the nature of the application.
# # Please read: https://github.com/samber/awesome-prometheus-alerts/issues/58
# - alert: HostContextSwitchingHigh
# expr: (rate(node_context_switches_total[15m])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) / (rate(node_context_switches_total[1d])/count without(mode,cpu) (node_cpu_seconds_total{mode="idle"})) > 2
# for: 0m
# labels:
# severity: warning
# annotations:
# summary: Host context switching high (instance {{ $labels.instance }})
# description: "Context switching is growing on the node (twice the daily average during the last 15m)\n VALUE = {{ $value }}"
- alert: HostSwapIsFillingUp
expr: ((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host swap is filling up (instance {{ $labels.instance }})
description: "Swap is filling up (>80%)\n VALUE = {{ $value }}"
- alert: HostSystemdServiceCrashed
expr: (node_systemd_unit_state{state="failed"} == 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: warning
annotations:
summary: Host systemd service crashed (instance {{ $labels.instance }})
description: "systemd service crashed\n VALUE = {{ $value }}"
- alert: HostPhysicalComponentTooHot
expr: ((node_hwmon_temp_celsius * ignoring(label) group_left(instance, job, node, sensor) node_hwmon_sensor_label{label!="tctl"} > 75)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m
labels:
severity: warning
annotations:
summary: Host physical component too hot (instance {{ $labels.instance }})
description: "Physical hardware component too hot\n VALUE = {{ $value }}"
- alert: HostNodeOvertemperatureAlarm
expr: ((node_hwmon_temp_crit_alarm_celsius == 1) or (node_hwmon_temp_alarm == 1)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: critical
annotations:
summary: Host node overtemperature alarm (instance {{ $labels.instance }})
description: "Physical node temperature alarm triggered\n VALUE = {{ $value }}"
- alert: HostRaidArrayGotInactive
expr: (node_md_state{state="inactive"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: critical
annotations:
summary: Host RAID array got inactive (instance {{ $labels.instance }})
description: "RAID array {{ $labels.device }} is in a degraded state due to one or more disk failures. The number of spare drives is insufficient to fix the issue automatically.\n VALUE = {{ $value }}"
- alert: HostRaidDiskFailure
expr: (node_md_disks{state="failed"} > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host RAID disk failure (instance {{ $labels.instance }})
description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap\n VALUE = {{ $value }}"
- alert: HostKernelVersionDeviations
expr: (count(sum(label_replace(node_uname_info, "kernel", "$1", "release", "([0-9]+.[0-9]+.[0-9]+).*")) by (kernel)) > 1) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 6h
labels:
severity: warning
annotations:
summary: Host kernel version deviations (instance {{ $labels.instance }})
description: "Different kernel versions are running\n VALUE = {{ $value }}"
- alert: HostOomKillDetected
expr: (increase(node_vmstat_oom_kill[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: warning
annotations:
summary: Host OOM kill detected (instance {{ $labels.instance }})
description: "OOM kill detected\n VALUE = {{ $value }}"
- alert: HostEdacCorrectableErrorsDetected
expr: (increase(node_edac_correctable_errors_total[1m]) > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: info
annotations:
summary: Host EDAC Correctable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} correctable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}"
- alert: HostEdacUncorrectableErrorsDetected
expr: (node_edac_uncorrectable_errors_total > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 0m
labels:
severity: warning
annotations:
summary: Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} has had {{ printf \"%.0f\" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.\n VALUE = {{ $value }}"
- alert: HostNetworkReceiveErrors
expr: (rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Receive Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} receive errors in the last two minutes.\n VALUE = {{ $value }}"
- alert: HostNetworkTransmitErrors
expr: (rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Transmit Errors (instance {{ $labels.instance }})
description: "Host {{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf \"%.0f\" $value }} transmit errors in the last two minutes.\n VALUE = {{ $value }}"
- alert: HostNetworkBondDegraded
expr: ((node_bonding_active - node_bonding_slaves) != 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host Network Bond Degraded (instance {{ $labels.instance }})
description: "Bond \"{{ $labels.device }}\" degraded on \"{{ $labels.instance }}\".\n VALUE = {{ $value }}"
- alert: HostConntrackLimit
expr: (node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 5m
labels:
severity: warning
annotations:
summary: Host conntrack limit (instance {{ $labels.instance }})
description: "The number of conntrack is approaching limit\n VALUE = {{ $value }}"
- alert: HostClockSkew
expr: ((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 10m
labels:
severity: warning
annotations:
summary: Host clock skew (instance {{ $labels.instance }})
description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{ $value }}"
- alert: HostClockNotSynchronising
expr: (min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 2m
labels:
severity: warning
annotations:
summary: Host clock not synchronising (instance {{ $labels.instance }})
description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{ $value }}"
- alert: HostRequiresReboot
expr: (node_reboot_required > 0) * on(instance) group_left (nodename) node_uname_info{nodename=~".+"}
for: 4h
labels:
severity: info
annotations:
summary: Host requires reboot (instance {{ $labels.instance }})
description: "{{ $labels.instance }} requires a reboot.\n VALUE = {{ $value }}"
- name: prometheus
rules:
- alert: PrometheusJobMissing
expr: absent(up{job="prometheus"})
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus job missing (instance {{ $labels.instance }})
description: "A Prometheus job has disappeared\n VALUE = {{ $value }}"
- alert: PrometheusTargetMissing
expr: up == 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus target missing (instance {{ $labels.instance }})
description: "A Prometheus target has disappeared. An exporter might be crashed.\n VALUE = {{ $value }}"
- alert: PrometheusAllTargetsMissing
expr: sum by (job) (up) == 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus all targets missing (instance {{ $labels.instance }})
description: "A Prometheus job does not have living target anymore.\n VALUE = {{ $value }}"
- alert: PrometheusConfigurationReloadFailure
expr: prometheus_config_last_reload_successful != 1
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus configuration reload failure (instance {{ $labels.instance }})
description: "Prometheus configuration reload error\n VALUE = {{ $value }}"
- alert: PrometheusTooManyRestarts
expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus too many restarts (instance {{ $labels.instance }})
description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping.\n VALUE = {{ $value }}"
- alert: PrometheusAlertmanagerJobMissing
expr: absent(up{job="alertmanager"})
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus AlertManager job missing (instance {{ $labels.instance }})
description: "A Prometheus AlertManager job has disappeared\n VALUE = {{ $value }}"
- alert: PrometheusAlertmanagerConfigurationReloadFailure
expr: alertmanager_config_last_reload_successful != 1
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }})
description: "AlertManager configuration reload error\n VALUE = {{ $value }}"
- alert: PrometheusAlertmanagerConfigNotSynced
expr: count(count_values("config_hash", alertmanager_config_hash)) > 1
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus AlertManager config not synced (instance {{ $labels.instance }})
description: "Configurations of AlertManager cluster instances are out of sync\n VALUE = {{ $value }}"
# For testing.
# - alert: PrometheusAlertmanagerE2eDeadManSwitch
# expr: vector(1)
# for: 0m
# labels:
# severity: critical
# annotations:
# summary: Prometheus AlertManager E2E dead man switch (instance {{ $labels.instance }})
# description: "Prometheus DeadManSwitch is an always-firing alert. It's used as an end-to-end test of Prometheus through the Alertmanager.\n VALUE = {{ $value }}"
- alert: PrometheusNotConnectedToAlertmanager
expr: prometheus_notifications_alertmanagers_discovered < 1
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus not connected to alertmanager (instance {{ $labels.instance }})
description: "Prometheus cannot connect the alertmanager\n VALUE = {{ $value }}"
- alert: PrometheusRuleEvaluationFailures
expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus rule evaluation failures (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts.\n VALUE = {{ $value }}"
- alert: PrometheusTemplateTextExpansionFailures
expr: increase(prometheus_template_text_expansion_failures_total[3m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus template text expansion failures (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} template text expansion failures\n VALUE = {{ $value }}"
- alert: PrometheusRuleEvaluationSlow
expr: prometheus_rule_group_last_duration_seconds > prometheus_rule_group_interval_seconds
for: 5m
labels:
severity: warning
annotations:
summary: Prometheus rule evaluation slow (instance {{ $labels.instance }})
description: "Prometheus rule evaluation took more time than the scheduled interval. It indicates a slower storage backend access or too complex query.\n VALUE = {{ $value }}"
- alert: PrometheusNotificationsBacklog
expr: min_over_time(prometheus_notifications_queue_length[10m]) > 0
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus notifications backlog (instance {{ $labels.instance }})
description: "The Prometheus notification queue has not been empty for 10 minutes\n VALUE = {{ $value }}"
- alert: PrometheusAlertmanagerNotificationFailing
expr: rate(alertmanager_notifications_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus AlertManager notification failing (instance {{ $labels.instance }})
description: "Alertmanager is failing sending notifications\n VALUE = {{ $value }}"
- alert: PrometheusTargetEmpty
expr: prometheus_sd_discovered_targets == 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus target empty (instance {{ $labels.instance }})
description: "Prometheus has no target in service discovery\n VALUE = {{ $value }}"
- alert: PrometheusTargetScrapingSlow
expr: prometheus_target_interval_length_seconds{quantile="0.9"} / on (interval, instance, job) prometheus_target_interval_length_seconds{quantile="0.5"} > 1.05
for: 5m
labels:
severity: warning
annotations:
summary: Prometheus target scraping slow (instance {{ $labels.instance }})
description: "Prometheus is scraping exporters slowly since it exceeded the requested interval time. Your Prometheus server is under-provisioned.\n VALUE = {{ $value }}"
- alert: PrometheusLargeScrape
expr: increase(prometheus_target_scrapes_exceeded_sample_limit_total[10m]) > 10
for: 5m
labels:
severity: warning
annotations:
summary: Prometheus large scrape (instance {{ $labels.instance }})
description: "Prometheus has many scrapes that exceed the sample limit\n VALUE = {{ $value }}"
- alert: PrometheusTargetScrapeDuplicate
expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total[5m]) > 0
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus target scrape duplicate (instance {{ $labels.instance }})
description: "Prometheus has many samples rejected due to duplicate timestamps but different values\n VALUE = {{ $value }}"
- alert: PrometheusTsdbCheckpointCreationFailures
expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB checkpoint creation failures (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} checkpoint creation failures\n VALUE = {{ $value }}"
- alert: PrometheusTsdbCheckpointDeletionFailures
expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB checkpoint deletion failures (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} checkpoint deletion failures\n VALUE = {{ $value }}"
- alert: PrometheusTsdbCompactionsFailed
expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB compactions failed (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} TSDB compactions failures\n VALUE = {{ $value }}"
- alert: PrometheusTsdbHeadTruncationsFailed
expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB head truncations failed (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} TSDB head truncation failures\n VALUE = {{ $value }}"
- alert: PrometheusTsdbReloadFailures
expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB reload failures (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} TSDB reload failures\n VALUE = {{ $value }}"
- alert: PrometheusTsdbWalCorruptions
expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB WAL corruptions (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} TSDB WAL corruptions\n VALUE = {{ $value }}"
- alert: PrometheusTsdbWalTruncationsFailed
expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0
for: 0m
labels:
severity: critical
annotations:
summary: Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }})
description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures\n VALUE = {{ $value }}"
- alert: PrometheusTimeseriesCardinality
expr: label_replace(count by(__name__) ({__name__=~".+"}), "name", "$1", "__name__", "(.+)") > 10000
for: 0m
labels:
severity: warning
annotations:
summary: Prometheus timeseries cardinality (instance {{ $labels.instance }})
description: "The \"{{ $labels.name }}\" timeseries cardinality is getting very high: {{ $value }}\n VALUE = {{ $value }}"

View file

@ -0,0 +1,43 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name grafana.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/grafana.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/grafana.hamburg.ccc.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/grafana.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 443;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
# Hide the X-Forwarded header.
proxy_hide_header X-Forwarded;
# Assume we are the only Reverse Proxy (well using Proxy Protocol, but that
# is transparent).
# Also provide "_hidden" for by, since it's not relevant.
proxy_set_header Forwarded "for=$remote_addr;proto=https;host=$host;by=_hidden";
location / {
proxy_pass http://127.0.0.1:3000/;
}
}

View file

@ -0,0 +1,124 @@
## Secrets:
#
# Secrets should be provided via the relevant `x_secrets.env` files to the
# containers. Options to be set are documented by commented out environment
# variables.
#
## Links & Resources:
#
# https://www.keycloak.org/
# https://www.keycloak.org/documentation
# https://www.keycloak.org/getting-started/getting-started-docker
# https://www.keycloak.org/server/configuration
# https://www.keycloak.org/server/containers
# https://www.keycloak.org/server/configuration-production
# https://www.keycloak.org/server/db
# https://hub.docker.com/_/postgres
# https://github.com/docker-library/docs/blob/master/postgres/README.md
# https://www.keycloak.org/server/hostname
# https://www.keycloak.org/server/reverseproxy
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Forwarded
# https://www.keycloak.org/server/all-config
services:
keycloak:
image: git.hamburg.ccc.de/ccchh/oci-images/keycloak:26.0
pull_policy: always
restart: unless-stopped
command: start --optimized
depends_on:
- db
networks:
- keycloak
environment:
KEYCLOAK_ADMIN: admin
KEYCLOAK_ADMIN_PASSWORD: {{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/keycloak/KEYCLOAK_ADMIN_PASSWORD", create=false, missing="error") }}
KC_DB: postgres
KC_DB_URL_HOST: db
KC_DB_USERNAME: keycloak
KC_DB_PASSWORD: {{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/keycloak/KC_DB_PASSWORD", create=false, missing="error") }}
KC_HOSTNAME: https://id.hamburg.ccc.de
KC_HOSTNAME_BACKCHANNEL_DYNAMIC: false
KC_HOSTNAME_ADMIN: https://keycloak-admin.hamburg.ccc.de
KC_PROXY_HEADERS: xforwarded
KC_HTTP_ENABLED: true
ports:
- "8080:8080"
db:
image: postgres:15.2
restart: unless-stopped
networks:
- keycloak
volumes:
- "./database:/var/lib/postgresql/data"
environment:
POSTGRES_USER: keycloak
POSTGRES_PASSWORD: {{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/keycloak/POSTGRES_PASSWORD", create=false, missing="error") }}
POSTGRES_DB: keycloak
id-invite-web:
image: git.hamburg.ccc.de/ccchh/id-invite/id-invite:latest
command: web
restart: unless-stopped
networks:
- web
- email
- keycloak
ports:
- 3000:3000
environment:
- "APP_EMAIL_BASE_URI=http://id-invite-email:3000"
- "APP_KEYCLOAK_BASE_URI=http://id-invite-keycloak:3000"
- "BOTTLE_HOST=0.0.0.0"
- "BOTTLE_URL_SCHEME=https"
- "IDINVITE_INVITE_REQUIRES_GROUP=id_invite"
- "IDINVITE_URL=https://invite.hamburg.ccc.de"
- "IDINVITE_KEYCLOAK_NAME=CCCHH ID"
- "IDINVITE_VALID_HOURS=50"
- "IDINVITE_SECRET={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/keycloak/IDINVITE_TOKEN_SECRET", create=false, missing="error") }}"
- "IDINVITE_DISCOVERY_URL=https://id.hamburg.ccc.de/realms/ccchh/.well-known/openid-configuration"
- "IDINVITE_CLIENT_ID=id-invite"
- "IDINVITE_CLIENT_SECRET={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/keycloak/IDINVITE_CLIENT_SECRET", create=false, missing="error") }}"
- "MAIL_FROM=no-reply@hamburg.ccc.de"
- "BOTTLE_HOST=0.0.0.0"
id-invite-email:
image: git.hamburg.ccc.de/ccchh/id-invite/id-invite:latest
command: email
restart: unless-stopped
networks:
- email
- web
environment:
- "BOTTLE_HOST=0.0.0.0"
- "IDINVITE_KEYCLOAK_NAME=CCCHH ID"
- "MAIL_FROM=no-reply@id.hamburg.ccc.de"
- "SMTP_HOSTNAME=cow.hamburg.ccc.de"
- "SMTP_USERNAME=no-reply@id.hamburg.ccc.de"
- "SMTP_PASSWORD={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/keycloak/NO_REPLY_SMTP", create=false, missing="error") }}"
id-invite-keycloak:
image: git.hamburg.ccc.de/ccchh/id-invite/id-invite:latest
command: keycloak
restart: unless-stopped
networks:
- keycloak
environment:
- "BOTTLE_HOST=0.0.0.0"
- "IDINVITE_CLIENT_ID=id-invite"
- "IDINVITE_CLIENT_SECRET={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/keycloak/IDINVITE_CLIENT_SECRET", create=false, missing="error") }}"
- "KEYCLOAK_API_URL=http://keycloak:8080"
- "KEYCLOAK_API_USERNAME=id-invite"
- "KEYCLOAK_API_PASSWORD={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/keycloak/IDINVITE_ADMIN_PASSWORD", create=false, missing="error") }}"
- "KEYCLOAK_API_REALM=ccchh"
- 'KEYCLOAK_GROUPS=["user"]'
networks:
keycloak:
external: false
web:
email:
external: false

View file

@ -0,0 +1,69 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
# Also see: https://www.keycloak.org/server/reverseproxy
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name id.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/id.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/id.hamburg.ccc.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/id.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
# To not have 502s sometimes when logging through PVE use bigger buffer_sizes.
# The error seemed to occur after logging in and out and in. Maybe related
# to Keycloak logout settings, but probably not.
# See:
# https://stackoverflow.com/questions/56126864/why-do-i-get-502-when-trying-to-authenticate
# https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size
proxy_buffer_size 128k;
proxy_buffers 8 128k;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 443;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
# Hide the X-Forwarded header.
proxy_hide_header X-Forwarded;
# Assume we are the only Reverse Proxy (well using Proxy Protocol, but that
# is transparent).
# Also provide "_hidden" for by, since it's not relevant.
proxy_set_header Forwarded "for=$remote_addr;proto=https;host=$host;by=_hidden";
# Redirect a user opening any not set location on id.hamburg.ccc.de to the account management page.
location ^~ / {
return 307 https://id.hamburg.ccc.de/realms/ccchh/account/;
}
location /js/ {
proxy_pass http://127.0.0.1:8080/js/;
}
location /realms/ {
proxy_pass http://127.0.0.1:8080/realms/;
}
location /resources/ {
proxy_pass http://127.0.0.1:8080/resources/;
}
location /robots.txt {
proxy_pass http://127.0.0.1:8080/robots.txt;
}
}

View file

@ -0,0 +1,54 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
# Also see: https://www.keycloak.org/server/reverseproxy
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name invite.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/invite.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/invite.hamburg.ccc.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/invite.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
# To not have 502s sometimes when logging through PVE use bigger buffer_sizes.
# The error seemed to occur after logging in and out and in. Maybe related
# to Keycloak logout settings, but probably not.
# See:
# https://stackoverflow.com/questions/56126864/why-do-i-get-502-when-trying-to-authenticate
# https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size
proxy_buffer_size 128k;
proxy_buffers 8 128k;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 443;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
# Hide the X-Forwarded header.
proxy_hide_header X-Forwarded;
# Assume we are the only Reverse Proxy (well using Proxy Protocol, but that
# is transparent).
# Also provide "_hidden" for by, since it's not relevant.
proxy_set_header Forwarded "for=$remote_addr;proto=https;host=$host;by=_hidden";
# Redirect a user opening any not set location on invite.hamburg.ccc.de to the account management page.
location / {
proxy_pass http://127.0.0.1:3000/;
}
}

View file

@ -0,0 +1,73 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
# Also see: https://www.keycloak.org/server/reverseproxy
server {
# Disable this for now.
#listen 443 ssl http2;
##listen [::]:443 ssl http2;
# Listen on a custom port for the proxy protocol.
listen 8444 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name keycloak-admin.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/keycloak-admin.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/keycloak-admin.hamburg.ccc.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/keycloak-admin.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 443;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
# Hide the X-Forwarded header.
proxy_hide_header X-Forwarded;
# Assume we are the only Reverse Proxy (well using Proxy Protocol, but that
# is transparent).
# Also provide "_hidden" for by, since it's not relevant.
proxy_set_header Forwarded "for=$remote_addr;proto=https;host=$host;by=_hidden";
allow 185.161.129.132/32; # z9
allow 2a07:c480:0:100::/56; # z9
allow 213.240.180.39/32; # stbe home
allow 2a01:170:118b::1/64; # stbe home
deny all;
location ^~ / {
return 307 https://keycloak-admin.hamburg.ccc.de/admin/master/console/;
}
location /js/ {
proxy_pass http://127.0.0.1:8080/js/;
}
location /realms/ {
proxy_pass http://127.0.0.1:8080/realms/;
}
location /resources/ {
proxy_pass http://127.0.0.1:8080/resources/;
}
location /robots.txt {
proxy_pass http://127.0.0.1:8080/robots.txt;
}
location /admin/ {
proxy_pass http://127.0.0.1:8080/admin/;
}
}

View file

@ -0,0 +1,72 @@
services:
mailman-core:
restart: unless-stopped
image: maxking/mailman-core:0.5 # Use a specific version tag (tag latest is not published)
container_name: mailman-core
hostname: mailman-core
volumes:
- /opt/mailman/core:/opt/mailman/
stop_grace_period: 30s
links:
- database:database
depends_on:
- database
environment:
- DATABASE_URL=postgresql://mailman:wvQjbMRnwFuxGEPz@database/mailmandb
- DATABASE_TYPE=postgres
- DATABASE_CLASS=mailman.database.postgresql.PostgreSQLDatabase
- HYPERKITTY_API_KEY=ITfRjushI6FP0TLMnRpZxlfB2e17DN86
- MTA=postfix
ports:
- "127.0.0.1:8001:8001" # API
- "127.0.0.1:8024:8024" # LMTP - incoming emails
networks:
mailman:
mailman-web:
restart: unless-stopped
image: maxking/mailman-web:0.5 # Use a specific version tag (tag latest is not published)
container_name: mailman-web
hostname: mailman-web
depends_on:
- database
links:
- mailman-core:mailman-core
- database:database
volumes:
- /opt/mailman/web:/opt/mailman-web-data
environment:
- DATABASE_TYPE=postgres
- DATABASE_URL=postgresql://mailman:wvQjbMRnwFuxGEPz@database/mailmandb
- "DJANGO_ALLOWED_HOSTS=lists.hamburg.ccc.de,lists.c3lingo.org"
- HYPERKITTY_API_KEY=ITfRjushI6FP0TLMnRpZxlfB2e17DN86
- SERVE_FROM_DOMAIN=lists.hamburg.ccc.de
- SECRET_KEY=ugfknEYBaFVc62R1jlIjnkizQaqr7tSt
- MAILMAN_ADMIN_USER=ccchh-admin
- MAILMAN_ADMIN_EMAIL=tony@cowtest.hamburg.ccc.de
ports:
- "127.0.0.1:8000:8000" # HTTP
- "127.0.0.1:8080:8080" # uwsgi
networks:
mailman:
database:
restart: unless-stopped
environment:
- POSTGRES_DB=mailmandb
- POSTGRES_USER=mailman
- POSTGRES_PASSWORD=wvQjbMRnwFuxGEPz
image: postgres:12-alpine
volumes:
- /opt/mailman/database:/var/lib/postgresql/data
networks:
mailman:
networks:
mailman:
driver: bridge
ipam:
driver: default
config:
-
subnet: 172.19.199.0/24

View file

@ -0,0 +1,26 @@
server {
root /var/www/html;
server_name lists.c3lingo.org; # managed by Certbot
listen [::]:443 ssl; # managed by Certbot
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/lists.c3lingo.org/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/lists.c3lingo.org/privkey.pem; # managed by Certbot
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/lists.c3lingo.org/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
location /static {
alias /opt/mailman/web/static;
autoindex off;
}
location / {
uwsgi_pass localhost:8080;
include uwsgi_params;
uwsgi_read_timeout 300;
}
}

View file

@ -0,0 +1,26 @@
server {
root /var/www/html;
server_name lists.hamburg.ccc.de; # managed by Certbot
listen [::]:443 ssl; # managed by Certbot
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/lists.hamburg.ccc.de/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/lists.hamburg.ccc.de/privkey.pem; # managed by Certbot
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/lists.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
location /static {
alias /opt/mailman/web/static;
autoindex off;
}
location / {
uwsgi_pass localhost:8080;
include uwsgi_params;
uwsgi_read_timeout 300;
}
}

View file

@ -0,0 +1,28 @@
server {
root /var/www/html;
server_name mumble.hamburg.ccc.de; # managed by Certbot
listen [::]:443 ssl ipv6only=on; # managed by Certbot
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/mumble.hamburg.ccc.de/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/mumble.hamburg.ccc.de/privkey.pem; # managed by Certbot
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/mumble.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
location /static {
alias /opt/mailman/web/static;
autoindex off;
}
location / {
return 302 https://wiki.hamburg.ccc.de/infrastructure:services:mumble;
}
location /metrics {
proxy_pass http://127.0.0.1:9123/;
}
}

View file

@ -0,0 +1,17 @@
## Links & Resources
#
# https://helpcenter.onlyoffice.com/installation/docs-community-install-docker.aspx
services:
onlyoffice:
image: onlyoffice/documentserver:latest
restart: unless-stopped
volumes:
- "./onlyoffice/DocumentServer/logs:/var/log/onlyoffice"
- "./onlyoffice/DocumentServer/data:/var/www/onlyoffice/Data"
- "./onlyoffice/DocumentServer/lib:/var/lib/onlyoffice"
- "./onlyoffice/DocumentServer/db:/var/lib/postgresql"
ports:
- "8080:80"
environment:
JWT_SECRET: {{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/onlyoffice/JWT_SECRET", create=false, missing="error") }}

View file

@ -0,0 +1,37 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name onlyoffice.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/onlyoffice.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/onlyoffice.hamburg.ccc.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/onlyoffice.hamburg.ccc.de/chain.pem;
# replace with the IP address of your resolver
resolver 1.1.1.1;
location / {
proxy_set_header Host $host;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
proxy_pass http://127.0.0.1:8080/;
}
}

View file

@ -0,0 +1,67 @@
---
# see https://github.com/hedgedoc/container/blob/master/docker-compose.yml
services:
database:
image: docker.io/library/postgres:15-alpine
environment:
- "POSTGRES_USER=hedgedoc"
- "POSTGRES_PASSWORD={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/pad/DB_PASSWORD", create=false, missing="error") }}"
- "POSTGRES_DB=hedgedoc"
volumes:
- database:/var/lib/postgresql/data
restart: unless-stopped
app:
#image: quay.io/hedgedoc/hedgedoc:1.9.9
image: quay.io/hedgedoc/hedgedoc:latest
environment:
- "CMD_DB_URL=postgres://hedgedoc:{{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/pad/DB_PASSWORD", create=false, missing="error") }}@database:5432/hedgedoc"
- "CMD_DOMAIN=pad.hamburg.ccc.de"
- "CMD_PROTOCOL_USESSL=true"
- "CMD_HSTS_ENABLE=false"
- "CMD_URL_ADDPORT=false"
- "CMD_ALLOW_FREEURL=true"
- "CMD_ALLOW_EMAIL_REGISTER=false"
- "CMD_ALLOW_ANONYMOUS=false"
- "CMD_ALLOW_ANONYMOUS_EDITS=true"
- "CMD_ALLOW_ANONYMOUS_VIEWS=true"
- "CMD_DEFAULT_PERMISSION=limited"
- "CMD_EMAIL=false"
- "CMD_OAUTH2_USER_PROFILE_URL=https://id.hamburg.ccc.de/realms/ccchh/protocol/openid-connect/userinfo"
- "CMD_OAUTH2_USER_PROFILE_USERNAME_ATTR=preferred_username"
- "CMD_OAUTH2_USER_PROFILE_DISPLAY_NAME_ATTR=name"
- "CMD_OAUTH2_USER_PROFILE_EMAIL_ATTR=email"
- "CMD_OAUTH2_TOKEN_URL=https://id.hamburg.ccc.de/realms/ccchh/protocol/openid-connect/token"
- "CMD_OAUTH2_AUTHORIZATION_URL=https://id.hamburg.ccc.de/realms/ccchh/protocol/openid-connect/auth"
- "CMD_OAUTH2_CLIENT_ID=pad"
- "CMD_OAUTH2_CLIENT_SECRET={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/pad/KC_SECRET", create=false, missing="error") }}"
- "CMD_OAUTH2_PROVIDERNAME=Keycloak"
- "CMD_OAUTH2_SCOPE=openid email profile"
volumes:
- uploads:/hedgedoc/public/uploads
ports:
- "127.0.0.1:3000:3000"
restart: unless-stopped
depends_on:
- database
hedgedoc-expire:
image: git.hamburg.ccc.de/ccchh/hedgedoc-expire/hedgedoc-expire:latest
# command: "emailcheck"
command: "cron"
environment:
- "POSTGRES_HOSTNAME=database"
- "POSTGRES_USERNAME=hedgedoc"
- "POSTGRES_PASSWORD={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/pad/DB_PASSWORD", create=false, missing="error") }}"
- "SMTP_FROM=pad@hamburg.ccc.de"
- "SMTP_HOSTNAME=cow.hamburg.ccc.de"
- "SMTP_USERNAME=pad@hamburg.ccc.de"
- "SMTP_PASSWORD={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/pad/smtp_password", create=false, missing="error") }}"
- "URL=https://pad.hamburg.ccc.de"
depends_on:
- database
volumes:
database: {}
uploads: {}

View file

@ -0,0 +1,42 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name pad.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/pad.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/pad.hamburg.ccc.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/pad.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 443;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
# Hide the X-Forwarded header.
proxy_hide_header X-Forwarded;
# Assume we are the only Reverse Proxy (well using Proxy Protocol, but that
# is transparent).
# Also provide "_hidden" for by, since it's not relevant.
proxy_set_header Forwarded "for=$remote_addr;proto=https;host=$host;by=_hidden";
location / {
proxy_pass http://127.0.0.1:3000/;
}
}

View file

@ -0,0 +1,106 @@
---
# see https://github.com/pretalx/pretalx-docker/blob/main/docker-compose.yml
services:
database:
image: docker.io/library/postgres:15-alpine
environment:
- "POSTGRES_USER=pretalx"
- "POSTGRES_PASSWORD={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/pretalx/DB_PASSWORD", create=false, missing="error") }}"
- "POSTGRES_DB=pretalx"
volumes:
- database:/var/lib/postgresql/data
restart: unless-stopped
redis:
image: redis:latest
restart: unless-stopped
volumes:
- redis:/data
static:
image: docker.io/library/nginx
restart: unless-stopped
volumes:
- public:/usr/share/nginx/html
ports:
- 8081:80
pretalx:
image: pretalx/standalone:latest
entrypoint: gunicorn
command:
- "pretalx.wsgi"
- "--name"
- "pretalx"
- "--workers"
- "4"
- "--max-requests"
- "1200"
- "--max-requests-jitter"
- "50"
- "--log-level=info"
- "--bind=0.0.0.0:8080"
ports:
- 8080:8080
restart: unless-stopped
environment:
PRETALX_DATA_DIR: /data
PRETALX_FILESYSTEM_MEDIA: /public/media
PRETALX_FILESYSTEM_STATIC: /public/static
PRETALX_SITE_URL: https://pretalx.hamburg.ccc.de
PRETALX_DB_TYPE: postgresql
PRETALX_DB_NAME: pretalx
PRETALX_DB_USER: pretalx
PRETALX_DB_PASS: "{{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/pretalx/DB_PASSWORD", create=false, missing="error") }}"
PRETALX_DB_HOST: database
PRETALX_MAIL_FROM: "pretalx@hamburg.ccc.de"
PRETALX_MAIL_HOST: "cow-intern.hamburg.ccc.de"
PRETALX_CELERY_BACKEND: redis://redis/1
PRETALX_CELERY_BROKER: redis://redis/2
PRETALX_REDIS: redis://redis/3
PRETALX_REDIS_SESSIONS: "True"
# PRETALX_LOGGING_EMAIL: noc@hamburg.ccc.de
PRETALX_LANGUAGE_CODE: de
PRETALX_TIME_ZONE: Europe/Berlin
volumes:
- pretalx:/data
- public:/public
celery:
image: pretalx/standalone:latest
command:
- taskworker
restart: unless-stopped
environment:
PRETALX_DATA_DIR: /data
PRETALX_FILESYSTEM_MEDIA: /public/media
PRETALX_FILESYSTEM_STATIC: /public/static
PRETALX_SITE_URL: https://pretalx.hamburg.ccc.de
PRETALX_DB_TYPE: postgresql
PRETALX_DB_NAME: pretalx
PRETALX_DB_USER: pretalx
PRETALX_DB_PASS: "{{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/pretalx/DB_PASSWORD", create=false, missing="error") }}"
PRETALX_DB_HOST: database
PRETALX_MAIL_FROM: "pretalx@hamburg.ccc.de"
PRETALX_MAIL_HOST: "cow.hamburg.ccc.de"
PRETALX_MAIL_PORT: 587
PRETALX_MAIL_USER: pretalx@hamburg.ccc.de
PRETALX_MAIL_PASSWORD: "{{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/pretalx/PRETALX_MAIL_PASSWORD", create=false, missing="error") }}"
PRETALX_MAIL_TLS: "true"
PRETALX_CELERY_BACKEND: redis://redis/1
PRETALX_CELERY_BROKER: redis://redis/2
PRETALX_REDIS: redis://redis/3
PRETALX_REDIS_SESSIONS: "True"
# PRETALX_LOGGING_EMAIL: noc@hamburg.ccc.de
PRETALX_LANGUAGE_CODE: de
PRETALX_TIME_ZONE: Europe/Berlin
volumes:
- pretalx:/data
- public:/public
volumes:
database: {}
redis: {}
pretalx: {}
public: {}

View file

@ -0,0 +1,50 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name pretalx.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/pretalx.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/pretalx.hamburg.ccc.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/pretalx.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 443;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
# Hide the X-Forwarded header.
proxy_hide_header X-Forwarded;
# Assume we are the only Reverse Proxy (well using Proxy Protocol, but that
# is transparent).
# Also provide "_hidden" for by, since it's not relevant.
proxy_set_header Forwarded "for=$remote_addr;proto=https;host=$host;by=_hidden";
location /media {
proxy_pass http://127.0.0.1:8081/media/;
}
location /static {
proxy_pass http://127.0.0.1:8081/static/;
}
location / {
proxy_pass http://127.0.0.1:8080/;
}
}

View file

@ -0,0 +1,94 @@
# Keep this sorted alphabetically, please!
map $host $upstream_acme_challenge_host {
branding-resources.hamburg.ccc.de 172.31.17.151:31820;
c3cat.de 172.31.17.151:31820;
www.c3cat.de 172.31.17.151:31820;
staging.c3cat.de 172.31.17.151:31820;
ccchoir.de ccchoir-intern.hamburg.ccc.de:31820;
www.ccchoir.de ccchoir-intern.hamburg.ccc.de:31820;
cloud.hamburg.ccc.de 172.31.17.143:31820;
element.hamburg.ccc.de 172.31.17.151:31820;
git.hamburg.ccc.de 172.31.17.154:31820;
grafana.hamburg.ccc.de 172.31.17.145:31820;
hackertours.hamburg.ccc.de 172.31.17.151:31820;
staging.hackertours.hamburg.ccc.de 172.31.17.151:31820;
hamburg.ccc.de 172.31.17.151:31820;
id.hamburg.ccc.de 172.31.17.144:31820;
invite.hamburg.ccc.de 172.31.17.144:31820;
keycloak-admin.hamburg.ccc.de 172.31.17.144:31820;
matrix.hamburg.ccc.de 172.31.17.150:31820;
netbox.hamburg.ccc.de 172.31.17.149:31820;
onlyoffice.hamburg.ccc.de 172.31.17.147:31820;
pad.hamburg.ccc.de 172.31.17.141:31820;
pretalx.hamburg.ccc.de 172.31.17.157:31820;
spaceapi.hamburg.ccc.de 172.31.17.151:31820;
staging.hamburg.ccc.de 172.31.17.151:31820;
wiki.ccchh.net 172.31.17.146:31820;
wiki.hamburg.ccc.de 172.31.17.146:31820;
www.hamburg.ccc.de 172.31.17.151:31820;
tickets.hamburg.ccc.de 172.31.17.148:31820;
zammad.hamburg.ccc.de 172.31.17.152:31820;
eh03.easterhegg.eu 172.31.17.151:31820;
eh05.easterhegg.eu 172.31.17.151:31820;
eh07.easterhegg.eu 172.31.17.151:31820;
eh09.easterhegg.eu 172.31.17.151:31820;
eh11.easterhegg.eu 172.31.17.151:31820;
eh20.easterhegg.eu 172.31.17.151:31820;
www.eh20.easterhegg.eu 172.31.17.151:31820;
eh22.easterhegg.eu 172.31.17.159:31820;
easterheggxxxx.hamburg.ccc.de 172.31.17.151:31820;
eh2003.hamburg.ccc.de 172.31.17.151:31820;
www.eh2003.hamburg.ccc.de 172.31.17.151:31820;
easterhegg2003.hamburg.ccc.de 172.31.17.151:31820;
www.easterhegg2003.hamburg.ccc.de 172.31.17.151:31820;
eh2005.hamburg.ccc.de 172.31.17.151:31820;
www.eh2005.hamburg.ccc.de 172.31.17.151:31820;
easterhegg2005.hamburg.ccc.de 172.31.17.151:31820;
www.easterhegg2005.hamburg.ccc.de 172.31.17.151:31820;
eh2007.hamburg.ccc.de 172.31.17.151:31820;
www.eh2007.hamburg.ccc.de 172.31.17.151:31820;
eh07.hamburg.ccc.de 172.31.17.151:31820;
www.eh07.hamburg.ccc.de 172.31.17.151:31820;
easterhegg2007.hamburg.ccc.de 172.31.17.151:31820;
www.easterhegg2007.hamburg.ccc.de 172.31.17.151:31820;
eh2009.hamburg.ccc.de 172.31.17.151:31820;
www.eh2009.hamburg.ccc.de 172.31.17.151:31820;
eh09.hamburg.ccc.de 172.31.17.151:31820;
www.eh09.hamburg.ccc.de 172.31.17.151:31820;
easterhegg2009.hamburg.ccc.de 172.31.17.151:31820;
www.easterhegg2009.hamburg.ccc.de 172.31.17.151:31820;
eh2011.hamburg.ccc.de 172.31.17.151:31820;
www.eh2011.hamburg.ccc.de 172.31.17.151:31820;
eh11.hamburg.ccc.de 172.31.17.151:31820;
www.eh11.hamburg.ccc.de 172.31.17.151:31820;
easterhegg2011.hamburg.ccc.de 172.31.17.151:31820;
www.easterhegg2011.hamburg.ccc.de 172.31.17.151:31820;
eh20.hamburg.ccc.de 172.31.17.151:31820;
hacker.tours 172.31.17.151:31820;
staging.hacker.tours 172.31.17.151:31820;
woodpecker.hamburg.ccc.de 172.31.17.160:31820;
design.hamburg.ccc.de 172.31.17.162:31820;
hydra.hamburg.ccc.de 172.31.17.163:31820;
default "";
}
server {
listen 80 default_server;
resolver 212.12.50.158 192.76.134.90;
location /.well-known/acme-challenge/ {
proxy_pass http://$upstream_acme_challenge_host;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# This is http in any case.
proxy_set_header X-Forwarded-Proto http;
}
# Better safe than sorry.
# Don't do a permanent redirect to avoid acme challenge pain (even tho 443
# still should work).
location / {
return 307 https://$host$request_uri;
}
}

View file

@ -0,0 +1,128 @@
# This config is based on the standard `nginx.conf` shipping with the stable
# nginx package from the NGINX mirrors as of 2023-01.
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
# Listen on port 443 as a reverse proxy and use PROXY Protocol for the
# upstreams.
stream {
resolver 212.12.50.158 192.76.134.90;
map $ssl_preread_server_name $address {
ccchoir.de ccchoir-intern.hamburg.ccc.de:8443;
www.ccchoir.de ccchoir-intern.hamburg.ccc.de:8443;
cloud.hamburg.ccc.de cloud-intern.hamburg.ccc.de:8443;
pad.hamburg.ccc.de pad-intern.hamburg.ccc.de:8443;
pretalx.hamburg.ccc.de pretalx-intern.hamburg.ccc.de:8443;
id.hamburg.ccc.de 172.31.17.144:8443;
invite.hamburg.ccc.de 172.31.17.144:8443;
keycloak-admin.hamburg.ccc.de 172.31.17.144:8444;
grafana.hamburg.ccc.de 172.31.17.145:8443;
wiki.ccchh.net 172.31.17.146:8443;
wiki.hamburg.ccc.de 172.31.17.146:8443;
onlyoffice.hamburg.ccc.de 172.31.17.147:8443;
hackertours.hamburg.ccc.de 172.31.17.151:8443;
staging.hackertours.hamburg.ccc.de 172.31.17.151:8443;
netbox.hamburg.ccc.de 172.31.17.149:8443;
matrix.hamburg.ccc.de 172.31.17.150:8443;
element.hamburg.ccc.de 172.31.17.151:8443;
branding-resources.hamburg.ccc.de 172.31.17.151:8443;
www.hamburg.ccc.de 172.31.17.151:8443;
hamburg.ccc.de 172.31.17.151:8443;
staging.hamburg.ccc.de 172.31.17.151:8443;
spaceapi.hamburg.ccc.de 172.31.17.151:8443;
tickets.hamburg.ccc.de 172.31.17.148:8443;
zammad.hamburg.ccc.de 172.31.17.152:8443;
c3cat.de 172.31.17.151:8443;
www.c3cat.de 172.31.17.151:8443;
staging.c3cat.de 172.31.17.151:8443;
git.hamburg.ccc.de 172.31.17.154:8443;
eh03.easterhegg.eu 172.31.17.151:8443;
eh05.easterhegg.eu 172.31.17.151:8443;
eh07.easterhegg.eu 172.31.17.151:8443;
eh09.easterhegg.eu 172.31.17.151:8443;
eh11.easterhegg.eu 172.31.17.151:8443;
eh20.easterhegg.eu 172.31.17.151:8443;
www.eh20.easterhegg.eu 172.31.17.151:8443;
eh22.easterhegg.eu 172.31.17.159:8443;
easterheggxxxx.hamburg.ccc.de 172.31.17.151:8443;
eh2003.hamburg.ccc.de 172.31.17.151:8443;
www.eh2003.hamburg.ccc.de 172.31.17.151:8443;
easterhegg2003.hamburg.ccc.de 172.31.17.151:8443;
www.easterhegg2003.hamburg.ccc.de 172.31.17.151:8443;
eh2005.hamburg.ccc.de 172.31.17.151:8443;
www.eh2005.hamburg.ccc.de 172.31.17.151:8443;
easterhegg2005.hamburg.ccc.de 172.31.17.151:8443;
www.easterhegg2005.hamburg.ccc.de 172.31.17.151:8443;
eh2007.hamburg.ccc.de 172.31.17.151:8443;
www.eh2007.hamburg.ccc.de 172.31.17.151:8443;
eh07.hamburg.ccc.de 172.31.17.151:8443;
www.eh07.hamburg.ccc.de 172.31.17.151:8443;
easterhegg2007.hamburg.ccc.de 172.31.17.151:8443;
www.easterhegg2007.hamburg.ccc.de 172.31.17.151:8443;
eh2009.hamburg.ccc.de 172.31.17.151:8443;
www.eh2009.hamburg.ccc.de 172.31.17.151:8443;
eh09.hamburg.ccc.de 172.31.17.151:8443;
www.eh09.hamburg.ccc.de 172.31.17.151:8443;
easterhegg2009.hamburg.ccc.de 172.31.17.151:8443;
www.easterhegg2009.hamburg.ccc.de 172.31.17.151:8443;
eh2011.hamburg.ccc.de 172.31.17.151:8443;
www.eh2011.hamburg.ccc.de 172.31.17.151:8443;
eh11.hamburg.ccc.de 172.31.17.151:8443;
www.eh11.hamburg.ccc.de 172.31.17.151:8443;
easterhegg2011.hamburg.ccc.de 172.31.17.151:8443;
www.easterhegg2011.hamburg.ccc.de 172.31.17.151:8443;
eh20.hamburg.ccc.de 172.31.17.151:8443;
hacker.tours 172.31.17.151:8443;
staging.hacker.tours 172.31.17.151:8443;
woodpecker.hamburg.ccc.de 172.31.17.160:8443;
design.hamburg.ccc.de 172.31.17.162:8443;
hydra.hamburg.ccc.de 172.31.17.163:8443;
}
server {
listen 0.0.0.0:443;
listen [::]:443;
proxy_pass $address;
ssl_preread on;
proxy_protocol on;
}
server {
listen 0.0.0.0:8448;
listen [::]:8448;
proxy_pass 172.31.17.150:8448;
ssl_preread on;
proxy_protocol on;
}
}
# Still have the default http block, so the `acme_challenge.conf` works.
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}

View file

@ -0,0 +1,48 @@
---
services:
database:
image: docker.io/library/postgres:15-alpine
environment:
- "POSTGRES_USER=pretix"
- "POSTGRES_PASSWORD={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/tickets/DB_PASSWORD", create=false, missing="error") }}"
- "POSTGRES_DB=pretix"
volumes:
- database:/var/lib/postgresql/data
networks:
backend:
restart: unless-stopped
redis:
image: docker.io/library/redis:7
ports:
- "6379:6379"
volumes:
- redis:/rdata
# run redis-server, save a snapshot every 60 seconds if there has been at least 1 write
command: ["redis-server", "--save", "60", "1"]
restart: unless-stopped
networks:
backend:
pretix:
image: docker.io/pretix/standalone:2024.8
command: ["all"]
ports:
- "8345:80"
volumes:
- ./configs/pretix.cfg:/etc/pretix/pretix.cfg
- pretix:/data
restart: unless-stopped
networks:
backend:
frontend:
volumes:
database: {}
pretix: {}
redis: {}
networks:
backend:
internal: true
frontend:

View file

@ -0,0 +1,26 @@
[pretix]
instance_name=CCCHH Tickets
url=https://tickets.hamburg.ccc.de
currency=EUR
datadir=/data
trust_x_forwarded_for=on
trust_x_forwarded_proto=on
[database]
backend=postgresql
name=pretix
user=pretix
password={{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/tickets/DB_PASSWORD", create=false, missing="error") }}
host=database
[mail]
from=tickets@hamburg.ccc.de
host=cow-intern.hamburg.ccc.de
[redis]
location=redis://redis/0
sessions=true
[celery]
backend=redis://redis/0
broker=redis://redis/1

View file

@ -0,0 +1,48 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name tickets.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/tickets.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/tickets.hamburg.ccc.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/tickets.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 443;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
# Hide the X-Forwarded header.
proxy_hide_header X-Forwarded;
# Assume we are the only Reverse Proxy (well using Proxy Protocol, but that
# is transparent).
# Also provide "_hidden" for by, since it's not relevant.
proxy_set_header Forwarded "for=$remote_addr;proto=https;host=$host;by=_hidden";
location = / {
#return 302 https://wiki.hamburg.ccc.de/infrastructure:service-overview#tickets_pretix;
return 302 https://tickets.hamburg.ccc.de/hackertours/38c3/;
}
location / {
proxy_pass http://127.0.0.1:8345/;
}
}

View file

@ -0,0 +1,26 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name wiki.ccchh.net;
ssl_certificate /etc/letsencrypt/live/wiki.ccchh.net/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/wiki.ccchh.net/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/wiki.ccchh.net/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
return 302 https://wiki.hamburg.ccc.de$request_uri;
}

View file

@ -0,0 +1,85 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name wiki.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/wiki.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/wiki.hamburg.ccc.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/wiki.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
# Maximum file upload size is 20MB - change accordingly if needed
# See: https://www.dokuwiki.org/faq:uploadsize
client_max_body_size 20M;
client_body_buffer_size 128k;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
root /var/www/dokuwiki;
index doku.php;
#Remember to comment the below out when you're installing, and uncomment it when done.
location ~ /(conf/|bin/|inc/|vendor/|install.php) { deny all; }
#Support for X-Accel-Redirect
location ~ ^/data/ { internal ; }
location ~ ^/lib.*\.(js|css|gif|png|ico|jpg|jpeg)$ {
expires 365d;
}
location / { try_files $uri $uri/ @dokuwiki; }
location @dokuwiki {
# rewrites "doku.php/" out of the URLs if you set the userwrite setting to .htaccess in dokuwiki config page
rewrite ^/_media/(.*) /lib/exe/fetch.php?media=$1 last;
rewrite ^/_detail/(.*) /lib/exe/detail.php?media=$1 last;
rewrite ^/_export/([^/]+)/(.*) /doku.php?do=export_$1&id=$2 last;
rewrite ^/(.*) /doku.php?id=$1&$args last;
}
location ~ \.php$ {
try_files $uri $uri/ /doku.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param REDIRECT_STATUS 200;
fastcgi_pass unix:/var/run/php/php-fpm-dokuwiki.sock;
}
### Wiki-Migration redirects:
# Redirect MediaWikis Main_Page.
location = /Main_Page {
return 302 https://$host;
}
location /ChaosVPN {
return 302 https://oldwiki.hamburg.ccc.de$request_uri;
}
location ~ /EH(07|09|11) {
return 302 https://oldwiki.hamburg.ccc.de$request_uri;
}
location /Easter {
return 302 https://oldwiki.hamburg.ccc.de$request_uri;
}
}

View file

@ -0,0 +1,158 @@
---
{#
https://github.com/zammad/zammad-docker-compose
Docker Compose does not allow defining variables in the compose file (only in .env files), so we use Jinja variables instead
see https://github.com/zammad/zammad-docker-compose/blob/master/.env
#}
{%- set ELASTICSEARCH_VERSION = "8" | quote -%}
{%- set IMAGE_REPO = "ghcr.io/zammad/zammad" | quote -%}
{%- set MEMCACHE_SERVERS = "zammad-memcached:11211" | quote -%}
{%- set MEMCACHE_VERSION = "1.6-alpine" | quote -%}
{%- set POSTGRES_DB = "zammad_production" | quote -%}
{%- set POSTGRES_HOST = "zammad-postgresql" | quote -%}
{%- set POSTGRES_USER = "zammad" | quote -%}
{%- set POSTGRES_PASS = lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/zammad/DB_PASSWORD", create=false, missing="error") | quote -%}
{%- set POSTGRES_PORT = "5432" | quote -%}
{%- set POSTGRES_VERSION = "15-alpine" | quote -%}
{%- set REDIS_URL = "redis://zammad-redis:6379" | quote -%}
{%- set REDIS_VERSION = "7-alpine" | quote -%}
{%- set RESTART = "always" | quote -%}
{%- set VERSION = "6" | quote -%}
x-shared:
zammad-service: &zammad-service
environment: &zammad-environment
MEMCACHE_SERVERS: {{ MEMCACHE_SERVERS }}
POSTGRESQL_DB: {{ POSTGRES_DB }}
POSTGRESQL_HOST: {{ POSTGRES_HOST }}
POSTGRESQL_USER: {{ POSTGRES_USER }}
POSTGRESQL_PASS: {{ POSTGRES_PASS }}
POSTGRESQL_PORT: {{ POSTGRES_PORT }}
REDIS_URL: {{ REDIS_URL }}
# Allow passing in these variables via .env:
AUTOWIZARD_JSON:
AUTOWIZARD_RELATIVE_PATH:
ELASTICSEARCH_ENABLED:
ELASTICSEARCH_HOST:
ELASTICSEARCH_PORT:
ELASTICSEARCH_SCHEMA:
ELASTICSEARCH_NAMESPACE:
ELASTICSEARCH_REINDEX:
ELASTICSEARCH_SSL_VERIFY:
NGINX_PORT:
NGINX_SERVER_NAME:
NGINX_SERVER_SCHEME: https
POSTGRESQL_DB_CREATE:
POSTGRESQL_OPTIONS:
RAILS_TRUSTED_PROXIES:
ZAMMAD_WEB_CONCURRENCY:
ZAMMAD_SESSION_JOBS:
ZAMMAD_PROCESS_SCHEDULED:
ZAMMAD_PROCESS_DELAYED_JOBS_WORKERS:
image: {{ IMAGE_REPO }}:{{ VERSION }}
restart: {{ RESTART }}
volumes:
- zammad-storage:/opt/zammad/storage
- zammad-var:/opt/zammad/var
depends_on:
- zammad-memcached
- zammad-postgresql
- zammad-redis
services:
zammad-backup:
command: ["zammad-backup"]
depends_on:
- zammad-railsserver
- zammad-postgresql
entrypoint: /usr/local/bin/backup.sh
environment:
<<: *zammad-environment
BACKUP_TIME: "03:00"
HOLD_DAYS: "10"
TZ: Europe/Berlin
image: postgres:{{ POSTGRES_VERSION }}
restart: {{ RESTART }}
volumes:
- zammad-backup:/var/tmp/zammad
- zammad-storage:/opt/zammad/storage:ro
- zammad-var:/opt/zammad/var:ro
- ./scripts/backup.sh:/usr/local/bin/backup.sh:ro
zammad-elasticsearch:
image: bitnami/elasticsearch:{{ ELASTICSEARCH_VERSION }}
restart: {{ RESTART }}
volumes:
- elasticsearch-data:/bitnami/elasticsearch/data
zammad-init:
<<: *zammad-service
command: ["zammad-init"]
depends_on:
- zammad-postgresql
restart: on-failure
user: 0:0
volumes:
- zammad-storage:/opt/zammad/storage
- zammad-var:/opt/zammad/var
zammad-memcached:
command: memcached -m 256M
image: memcached:{{ MEMCACHE_VERSION }}
restart: {{ RESTART }}
zammad-nginx:
<<: *zammad-service
command: ["zammad-nginx"]
expose:
- "8080"
ports:
- "8080:8080"
depends_on:
- zammad-railsserver
volumes:
- zammad-var:/opt/zammad/var:ro # required for the zammad-ready check file
zammad-postgresql:
environment:
POSTGRES_DB: {{ POSTGRES_DB }}
POSTGRES_USER: {{ POSTGRES_USER }}
POSTGRES_PASSWORD: {{ POSTGRES_PASS }}
image: postgres:{{ POSTGRES_VERSION }}
restart: {{ RESTART }}
volumes:
- postgresql-data:/var/lib/postgresql/data
zammad-railsserver:
<<: *zammad-service
command: ["zammad-railsserver"]
zammad-redis:
image: redis:{{ REDIS_VERSION }}
restart: {{ RESTART }}
volumes:
- redis-data:/data
zammad-scheduler:
<<: *zammad-service
command: ["zammad-scheduler"]
volumes:
- /ansible_docker_compose/zammad-scheduler-database.yml:/opt/zammad/config/database.yml # workaround for connection pool issue
zammad-websocket:
<<: *zammad-service
command: ["zammad-websocket"]
volumes:
elasticsearch-data:
driver: local
postgresql-data:
driver: local
redis-data:
driver: local
zammad-backup:
driver: local
zammad-storage:
driver: local
zammad-var:
driver: local

View file

@ -0,0 +1,51 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
# Listen on a custom port for the proxy protocol.
listen 8443 ssl http2 proxy_protocol;
# Make use of the ngx_http_realip_module to set the $remote_addr and
# $remote_port to the client address and client port, when using proxy
# protocol.
# First set our proxy protocol proxy as trusted.
set_real_ip_from 172.31.17.140;
# Then tell the realip_module to get the addreses from the proxy protocol
# header.
real_ip_header proxy_protocol;
server_name zammad.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/zammad.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/zammad.hamburg.ccc.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/zammad.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 443;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
# Hide the X-Forwarded header.
proxy_hide_header X-Forwarded;
# Assume we are the only Reverse Proxy (well using Proxy Protocol, but that
# is transparent).
# Also provide "_hidden" for by, since it's not relevant.
proxy_set_header Forwarded "for=$remote_addr;proto=https;host=$host;by=_hidden";
proxy_read_timeout 86400;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header CLIENT_IP $remote_addr;
location ~/(ticket/zoom/.*) {
return 302 https://zammad.hamburg.ccc.de/#$1;
}
location / {
proxy_pass http://127.0.0.1:8080/;
}
}