Compare commits

..

1 commit

Author SHA1 Message Date
614eebadba WIP router(host): initial config 2025-02-23 18:49:19 +01:00
22 changed files with 25 additions and 321 deletions

View file

@ -1,6 +0,0 @@
# Used in deploy_hypervisor playbook.
hypervisor__template_vm_config:
- name: STORAGE
value: nvme0
- name: BRIDGE
value: vmbr4

View file

@ -12,28 +12,15 @@ docker_compose__configuration_files:
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/docker_compose/prometheus_alerts.rules.yaml') }}" content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/docker_compose/prometheus_alerts.rules.yaml') }}"
- name: alertmanager_alert_templates.tmpl - name: alertmanager_alert_templates.tmpl
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/docker_compose/alertmanager_alert_templates.tmpl') }}" content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/docker_compose/alertmanager_alert_templates.tmpl') }}"
- name: loki.yaml
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/docker_compose/loki.yaml') }}"
certbot__version_spec: "" certbot__version_spec: ""
certbot__acme_account_email_address: le-admin@hamburg.ccc.de certbot__acme_account_email_address: le-admin@hamburg.ccc.de
certbot__certificate_domains: certbot__certificate_domains:
- "grafana.hamburg.ccc.de" - "grafana.hamburg.ccc.de"
- "loki.hamburg.ccc.de"
certbot__new_cert_commands: certbot__new_cert_commands:
- "systemctl reload nginx.service" - "systemctl reload nginx.service"
nginx__version_spec: "" nginx__version_spec: ""
nginx__deploy_redirect_conf: false
nginx__deploy_htpasswds: true
nginx__htpasswds:
- name: loki
content: "{{ lookup('ansible.builtin.template', 'resources/chaosknoten/grafana/nginx/loki.htpasswd.j2') }}"
nginx__configurations: nginx__configurations:
- name: redirectv6
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/nginx/redirect.conf') }}"
- name: grafana.hamburg.ccc.de - name: grafana.hamburg.ccc.de
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/nginx/grafana.hamburg.ccc.de.conf') }}" content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/nginx/grafana.hamburg.ccc.de.conf') }}"
- name: loki.hamburg.ccc.de
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/nginx/loki.hamburg.ccc.de.conf') }}"

View file

@ -55,6 +55,9 @@ all:
public-reverse-proxy: public-reverse-proxy:
ansible_host: public-reverse-proxy.hamburg.ccc.de ansible_host: public-reverse-proxy.hamburg.ccc.de
ansible_user: chaos ansible_user: chaos
router:
ansible_host: router.hamburg.ccc.de
ansible_user: chaos
wiki: wiki:
ansible_host: wiki-intern.hamburg.ccc.de ansible_host: wiki-intern.hamburg.ccc.de
ansible_user: chaos ansible_user: chaos
@ -81,6 +84,7 @@ base_config_hosts:
pad: pad:
pretalx: pretalx:
public-reverse-proxy: public-reverse-proxy:
router:
tickets: tickets:
wiki: wiki:
zammad: zammad:
@ -161,6 +165,7 @@ infrastructure_authorized_keys_hosts:
pad: pad:
pretalx: pretalx:
public-reverse-proxy: public-reverse-proxy:
router:
wiki: wiki:
zammad: zammad:
wiki_hosts: wiki_hosts:
@ -171,9 +176,3 @@ netbox_hosts:
hosts: hosts:
eh22-netbox: eh22-netbox:
netbox: netbox:
proxmox_vm_template_hosts:
hosts:
chaosknoten:
ansible_pull_hosts:
hosts:
netbox:

View file

@ -6,11 +6,6 @@ all:
authoritative-dns: authoritative-dns:
ansible_host: authoritative-dns.z9.ccchh.net ansible_host: authoritative-dns.z9.ccchh.net
ansible_user: chaos ansible_user: chaos
thinkcccore0:
ansible_host: thinkcccore0.z9.ccchh.net
hypervisors:
hosts:
thinkcccore0:
nginx_hosts: nginx_hosts:
hosts: hosts:
light: light:
@ -24,6 +19,3 @@ infrastructure_authorized_keys_hosts:
hosts: hosts:
light: light:
authoritative-dns: authoritative-dns:
proxmox_vm_template_hosts:
hosts:
thinkcccore0:

View file

@ -1,61 +0,0 @@
- name: Ensure the VM template generation is set up
hosts: proxmox_vm_template_hosts
tasks:
- name: Ensure dependencies are present
ansible.builtin.apt:
name:
- git
- libguestfs-tools
become: true
- name: Ensure /usr/local/{lib,sbin} exist
ansible.builtin.file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: "0755"
become: true
loop:
- "/usr/local/lib/"
- "/usr/local/sbin/"
- name: Ensure the pve-template-vm repo is present
ansible.builtin.git:
repo: https://git.hamburg.ccc.de/CCCHH/pve-template-vm.git
dest: /usr/local/lib/pve-template-vm
version: main
force: true
depth: 1
single_branch: true
track_submodules: true
become: true
# /usr/local/sbin as the script uses qm, which is also found in /usr/sbin.
- name: Ensure symlink to build-proxmox-template exists in /usr/local/sbin
ansible.builtin.file:
src: /usr/local/lib/pve-template-vm/build-proxmox-template
dest: /usr/local/sbin/build-proxmox-template
state: link
owner: root
group: root
mode: '0755'
become: true
# This sets up a cron job running /usr/local/sbin/build-proxmox-template using the env vars defined in hypervisor__template_vm_config.
- name: Ensure cron job is present for building a fresh VM template every week on Friday 04:00
ansible.builtin.cron:
name: "ansible build proxmox template"
cron_file: ansible_build_proxmox_template
minute: 0
hour: 4
weekday: 5
user: root
job: "{% if hypervisor__template_vm_config is defined and hypervisor__template_vm_config | length > 0 %}\
/usr/bin/env \
{% for item in hypervisor__template_vm_config | default([]) %}\
{{ item.name }}=\"{{ item.value }}\" \
{% endfor %}\
{% endif %}\
/usr/local/sbin/build-proxmox-template"
become: true

View file

@ -20,25 +20,16 @@ Links & Resources
{{ define "alert-message.telegram.ccchh" }} {{ define "alert-message.telegram.ccchh" }}
{{- if .Alerts.Firing }} {{- if .Alerts.Firing }}
<u>🔥{{ len .Alerts.Firing }} Alert(/s) Firing 🔥</u> <u>🔥{{ len .Alerts.Firing }} Alert(/s) Firing 🔥</u>
{{- if le (len .Alerts.Firing) 6 }} {{ range .Alerts.Firing -}}
{{- range .Alerts.Firing }} {{ template "alert-item.telegram.ccchh.internal" . }}
{{ template "alert-item.telegram.ccchh.internal" . }} {{- end }}
{{- end }} {{- end }}
{{- else }} {{- if .Alerts.Resolved }}
There are too many alerts firing at once <u>✅{{ len .Alerts.Resolved }} Alert(/s) Resolved ✅</u>
{{- end }} {{ range .Alerts.Resolved -}}
{{- end }} {{ template "alert-item.telegram.ccchh.internal" . }}
{{- end }}
{{- if .Alerts.Resolved }} {{- end }}
<u>✅{{ len .Alerts.Resolved }} Alert(/s) Resolved ✅</u>
{{- if le (len .Alerts.Resolved) 6 }}
{{- range .Alerts.Resolved }}
{{ template "alert-item.telegram.ccchh.internal" . }}
{{- end }}
{{- else }}
There are too many resolved alerts to list
{{- end }}
{{- end }}
{{- end }} {{- end }}

View file

@ -55,19 +55,7 @@ services:
- /dev/null:/etc/prometheus/pve.yml - /dev/null:/etc/prometheus/pve.yml
loki:
image: grafana/loki:3
container_name: loki
ports:
- 13100:3100
- 19099:9099
restart: unless-stopped
volumes:
- ./configs/loki.yaml:/etc/loki/local-config.yaml
- loki_data:/var/loki
volumes: volumes:
graf_data: {} graf_data: {}
prom_data: {} prom_data: {}
alertmanager_data: {} alertmanager_data: {}
loki_data: {}

View file

@ -7,15 +7,3 @@ datasources:
isDefault: true isDefault: true
access: proxy access: proxy
editable: true editable: true
- name: Loki
type: loki
url: http://loki:3100
access: proxy
editable: true
jsonData:
timeout: 60
maxLines: 3000
httpHeaderName1: "X-Scope-OrgID"
secureJsonData:
httpHeaderValue1: "chaos"

View file

@ -1,52 +0,0 @@
auth_enabled: true
server:
http_listen_port: 3100
grpc_listen_port: 9099
log_level: warn
limits_config:
retention_period: 14d
common:
instance_addr: 127.0.0.1
path_prefix: /var/loki
storage:
filesystem:
chunks_directory: /var/loki/chunks
rules_directory: /var/loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
storage_config:
filesystem:
directory: /var/loki/chunks
index_queries_cache_config:
embedded_cache:
enabled: true
max_size_mb: 80
ttl: 30m
schema_config:
configs:
- from: 2025-04-28
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
chunk_store_config:
chunk_cache_config:
embedded_cache:
enabled: true
max_size_mb: 80
ttl: 30m
write_dedupe_cache_config:
embedded_cache:
enabled: true
max_size_mb: 80
ttl: 30m

View file

@ -1,71 +0,0 @@
server {
# Wieske
allow 172.31.17.128/25;
allow 212.12.51.128/28;
allow 2a00:14b0:42:100::/56;
# Z9
allow 2a07:c480:0:100::/56;
allow 2a07:c481:1::/48;
deny all;
listen [2a00:14b0:4200:3380:0000:5a5f:1dbc:6a39]:9099 ssl http2;
listen 172.31.17.145:9099 ssl http2;
server_name loki.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/loki.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/loki.hamburg.ccc.de/privkey.pem;
auth_basic "loki";
auth_basic_user_file loki.htpasswd;
location / {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 9099;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Scope-OrgID $remote_user;
grpc_pass grpc://localhost:19009;
}
}
server {
# Wieske
allow 172.31.17.128/25;
allow 212.12.51.128/28;
allow 2a00:14b0:42:100::/56;
# Z9
allow 2a07:c480:0:100::/56;
allow 2a07:c481:1::/48;
deny all;
listen [2a00:14b0:4200:3380:0000:5a5f:1dbc:6a39]:3100 ssl http2;
listen 172.31.17.145:3100 ssl http2;
server_name loki.hamburg.ccc.de;
ssl_certificate /etc/letsencrypt/live/loki.hamburg.ccc.de/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/loki.hamburg.ccc.de/privkey.pem;
# verify chain of trust of OCSP response using Root CA and Intermediate certs
ssl_trusted_certificate /etc/letsencrypt/live/loki.hamburg.ccc.de/chain.pem;
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
add_header Strict-Transport-Security "max-age=63072000" always;
auth_basic "loki";
auth_basic_user_file loki.htpasswd;
location / {
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Port 3100;
# This is https in any case.
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Scope-OrgID $remote_user;
proxy_pass http://127.0.0.1:13100;
}
}

View file

@ -1 +0,0 @@
chaos:{{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/grafana/loki_chaos_basic_auth", create=false, missing="error") }}

View file

@ -1,14 +0,0 @@
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
server {
listen 80 default_server;
listen [::]:80 default_server;
location / {
return 301 https://$host$request_uri;
}
location /.well-known/acme-challenge/ {
proxy_pass http://127.0.0.1:31820/.well-known/acme-challenge/;
}
}

View file

@ -22,7 +22,7 @@
services: services:
keycloak: keycloak:
image: git.hamburg.ccc.de/ccchh/oci-images/keycloak:26.1 image: git.hamburg.ccc.de/ccchh/oci-images/keycloak:26.0
pull_policy: always pull_policy: always
restart: unless-stopped restart: unless-stopped
command: start --optimized command: start --optimized
@ -46,7 +46,7 @@ services:
- "8080:8080" - "8080:8080"
db: db:
image: postgres:15.12 image: postgres:15.2
restart: unless-stopped restart: unless-stopped
networks: networks:
- keycloak - keycloak

View file

@ -43,7 +43,6 @@ server {
allow 185.161.129.132/32; # z9 allow 185.161.129.132/32; # z9
allow 2a07:c480:0:100::/56; # z9 allow 2a07:c480:0:100::/56; # z9
allow 2a07:c481:1::/48; # z9 new ipv6
allow 213.240.180.39/32; # stbe home allow 213.240.180.39/32; # stbe home
allow 2a01:170:118b::1/64; # stbe home allow 2a01:170:118b::1/64; # stbe home
deny all; deny all;

View file

@ -53,7 +53,6 @@ services:
restart: unless-stopped restart: unless-stopped
environment: environment:
PRETALX_DATA_DIR: /data PRETALX_DATA_DIR: /data
PRETALX_FILE_UPLOAD_LIMIT: 1000 # MB
PRETALX_FILESYSTEM_MEDIA: /public/media PRETALX_FILESYSTEM_MEDIA: /public/media
PRETALX_FILESYSTEM_STATIC: /public/static PRETALX_FILESYSTEM_STATIC: /public/static
PRETALX_SITE_URL: https://pretalx.hamburg.ccc.de PRETALX_SITE_URL: https://pretalx.hamburg.ccc.de

View file

@ -71,7 +71,6 @@ map $host $upstream_acme_challenge_host {
hydra.hamburg.ccc.de 172.31.17.163:31820; hydra.hamburg.ccc.de 172.31.17.163:31820;
cfp.eh22.easterhegg.eu 172.31.17.157:31820; cfp.eh22.easterhegg.eu 172.31.17.157:31820;
hub.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:31820; hub.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:31820;
hub-usercontent.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:31820;
netbox.eh22.easterhegg.eu eh22-netbox-intern.hamburg.ccc.de:31820; netbox.eh22.easterhegg.eu eh22-netbox-intern.hamburg.ccc.de:31820;
default ""; default "";
} }

View file

@ -89,7 +89,6 @@ stream {
hydra.hamburg.ccc.de 172.31.17.163:8443; hydra.hamburg.ccc.de 172.31.17.163:8443;
cfp.eh22.easterhegg.eu pretalx-intern.hamburg.ccc.de:8443; cfp.eh22.easterhegg.eu pretalx-intern.hamburg.ccc.de:8443;
hub.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:8443; hub.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:8443;
hub-usercontent.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:8443;
netbox.eh22.easterhegg.eu eh22-netbox-intern.hamburg.ccc.de:8443; netbox.eh22.easterhegg.eu eh22-netbox-intern.hamburg.ccc.de:8443;
} }

View file

@ -1,5 +1,3 @@
- name: restart the ssh service - name: reboot the system
ansible.builtin.systemd:
name: ssh.service
state: restarted
become: true become: true
ansible.builtin.reboot:

View file

@ -12,7 +12,8 @@
group: root group: root
src: sshd_config.j2 src: sshd_config.j2
notify: notify:
- restart the ssh service # Reboot instead of just restarting the ssh service, since I don't know how Ansible reacts, when it restarts the service it probably needs for the connection.
- reboot the system
- name: deactivate short moduli - name: deactivate short moduli
ansible.builtin.shell: ansible.builtin.shell:
@ -31,4 +32,5 @@
changed_when: changed_when:
- '"ansible-changed" in result.stdout' - '"ansible-changed" in result.stdout'
notify: notify:
- restart the ssh service # Reboot instead of just restarting the ssh service, since I don't know how Ansible reacts, when it restarts the service it probably needs for the connection.
- reboot the system

View file

@ -4,5 +4,3 @@ nginx__deploy_logging_conf: true
nginx__configurations: [ ] nginx__configurations: [ ]
nginx__use_custom_nginx_conf: false nginx__use_custom_nginx_conf: false
nginx__custom_nginx_conf: "" nginx__custom_nginx_conf: ""
nginx__deploy_htpasswds: false
nginx__htpasswds: [ ]

View file

@ -34,19 +34,3 @@ argument_specs:
type: str type: str
required: false required: false
default: "" default: ""
nginx__deploy_htpasswds:
type: bool
required: false
default: false
nginx__htpasswds:
type: list
elements: dict
required: false
default: [ ]
options:
name:
type: str
required: true
content:
type: str
required: true

View file

@ -131,20 +131,6 @@
label: "{{ item.name }}" label: "{{ item.name }}"
notify: Restart nginx notify: Restart nginx
- name: Ensure all given htpasswd files are deployed
when: nginx__deploy_htpasswds
ansible.builtin.copy:
content: "{{ item.content }}"
dest: "/etc/nginx/{{ item.name }}.htpasswd"
mode: "0644"
owner: root
group: root
become: true
loop: "{{ nginx__htpasswds }}"
loop_control:
label: "{{ item.name }}"
notify: Restart nginx
- name: Add names with suffixes from `nginx__configurations` to `nginx__config_files_to_exist` fact - name: Add names with suffixes from `nginx__configurations` to `nginx__config_files_to_exist` fact
ansible.builtin.set_fact: ansible.builtin.set_fact:
nginx__config_files_to_exist: "{{ nginx__config_files_to_exist + [ item.name + '.conf' ] }}" # noqa: jinja[spacing] nginx__config_files_to_exist: "{{ nginx__config_files_to_exist + [ item.name + '.conf' ] }}" # noqa: jinja[spacing]