Compare commits
1 commit
main
...
feature/ad
Author | SHA1 | Date | |
---|---|---|---|
614eebadba |
26 changed files with 25 additions and 475 deletions
collections
inventories
playbooks
resources/chaosknoten
grafana
docker_compose
nginx
keycloak
pretalx/docker_compose
public-reverse-proxy/nginx
roles
deploy_ssh_server_config
nginx
|
@ -1,4 +1,3 @@
|
|||
---
|
||||
collections:
|
||||
- community.general
|
||||
- grafana.grafana.alloy
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
# Used in deploy_hypervisor playbook.
|
||||
hypervisor__template_vm_config:
|
||||
- name: STORAGE
|
||||
value: nvme0
|
||||
- name: BRIDGE
|
||||
value: vmbr4
|
|
@ -12,109 +12,15 @@ docker_compose__configuration_files:
|
|||
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/docker_compose/prometheus_alerts.rules.yaml') }}"
|
||||
- name: alertmanager_alert_templates.tmpl
|
||||
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/docker_compose/alertmanager_alert_templates.tmpl') }}"
|
||||
- name: loki.yaml
|
||||
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/docker_compose/loki.yaml') }}"
|
||||
|
||||
certbot__version_spec: ""
|
||||
certbot__acme_account_email_address: le-admin@hamburg.ccc.de
|
||||
certbot__certificate_domains:
|
||||
- "grafana.hamburg.ccc.de"
|
||||
- "loki.hamburg.ccc.de"
|
||||
- "metrics.hamburg.ccc.de"
|
||||
|
||||
certbot__new_cert_commands:
|
||||
- "systemctl reload nginx.service"
|
||||
|
||||
nginx__version_spec: ""
|
||||
nginx__deploy_redirect_conf: false
|
||||
nginx__deploy_htpasswds: true
|
||||
nginx__htpasswds:
|
||||
- name: loki
|
||||
content: "{{ lookup('ansible.builtin.template', 'resources/chaosknoten/grafana/nginx/loki.htpasswd.j2') }}"
|
||||
- name: metrics
|
||||
content: "{{ lookup('ansible.builtin.template', 'resources/chaosknoten/grafana/nginx/metrics.htpasswd.j2') }}"
|
||||
nginx__configurations:
|
||||
- name: redirectv6
|
||||
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/nginx/redirect.conf') }}"
|
||||
- name: grafana.hamburg.ccc.de
|
||||
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/nginx/grafana.hamburg.ccc.de.conf') }}"
|
||||
- name: loki.hamburg.ccc.de
|
||||
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/nginx/loki.hamburg.ccc.de.conf') }}"
|
||||
- name: metrics.hamburg.ccc.de
|
||||
content: "{{ lookup('ansible.builtin.file', 'resources/chaosknoten/grafana/nginx/metrics.hamburg.ccc.de.conf') }}"
|
||||
|
||||
|
||||
alloy_config: |
|
||||
prometheus.remote_write "default" {
|
||||
endpoint {
|
||||
url = "https://metrics.hamburg.ccc.de/api/v1/write"
|
||||
basic_auth {
|
||||
username = "chaos"
|
||||
password = "{{ lookup('community.general.passwordstore', 'noc/vm-secrets/chaosknoten/grafana/metrics_chaos', create=false, missing='error') }}"
|
||||
}
|
||||
}
|
||||
}
|
||||
loki.write "default" {
|
||||
endpoint {
|
||||
url = "https://loki.hamburg.ccc.de/loki/api/v1/push"
|
||||
basic_auth {
|
||||
username = "chaos"
|
||||
password = "{{ lookup('community.general.passwordstore', 'noc/vm-secrets/chaosknoten/grafana/loki_chaos', create=false, missing='error') }}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
loki.relabel "journal" {
|
||||
forward_to = []
|
||||
|
||||
rule {
|
||||
source_labels = ["__journal__systemd_unit"]
|
||||
target_label = "systemd_unit"
|
||||
}
|
||||
rule {
|
||||
source_labels = ["__journal__hostname"]
|
||||
target_label = "instance"
|
||||
}
|
||||
rule {
|
||||
source_labels = ["__journal__transport"]
|
||||
target_label = "systemd_transport"
|
||||
}
|
||||
rule {
|
||||
source_labels = ["__journal_syslog_identifier"]
|
||||
target_label = "syslog_identifier"
|
||||
}
|
||||
rule {
|
||||
source_labels = ["__journal_priority_keyword"]
|
||||
target_label = "level"
|
||||
}
|
||||
}
|
||||
|
||||
loki.source.journal "read_journal" {
|
||||
forward_to = [loki.write.default.receiver]
|
||||
relabel_rules = loki.relabel.journal.rules
|
||||
format_as_json = true
|
||||
labels = {component = "loki.source.journal", host = "grafana", org = "ccchh"}
|
||||
}
|
||||
|
||||
logging {
|
||||
level = "info"
|
||||
}
|
||||
prometheus.exporter.unix "local_system" { }
|
||||
|
||||
prometheus.relabel "default" {
|
||||
forward_to = [prometheus.remote_write.default.receiver]
|
||||
rule {
|
||||
replacement = "org"
|
||||
target_label = "ccchh"
|
||||
}
|
||||
rule {
|
||||
target_label = "host"
|
||||
replacement = "grafana"
|
||||
}
|
||||
}
|
||||
|
||||
prometheus.scrape "scrape_metrics" {
|
||||
targets = prometheus.exporter.unix.local_system.targets
|
||||
forward_to = [prometheus.relabel.default.receiver]
|
||||
scrape_interval = "15s"
|
||||
}
|
||||
|
|
|
@ -55,6 +55,9 @@ all:
|
|||
public-reverse-proxy:
|
||||
ansible_host: public-reverse-proxy.hamburg.ccc.de
|
||||
ansible_user: chaos
|
||||
router:
|
||||
ansible_host: router.hamburg.ccc.de
|
||||
ansible_user: chaos
|
||||
wiki:
|
||||
ansible_host: wiki-intern.hamburg.ccc.de
|
||||
ansible_user: chaos
|
||||
|
@ -81,6 +84,7 @@ base_config_hosts:
|
|||
pad:
|
||||
pretalx:
|
||||
public-reverse-proxy:
|
||||
router:
|
||||
tickets:
|
||||
wiki:
|
||||
zammad:
|
||||
|
@ -161,6 +165,7 @@ infrastructure_authorized_keys_hosts:
|
|||
pad:
|
||||
pretalx:
|
||||
public-reverse-proxy:
|
||||
router:
|
||||
wiki:
|
||||
zammad:
|
||||
wiki_hosts:
|
||||
|
@ -171,12 +176,3 @@ netbox_hosts:
|
|||
hosts:
|
||||
eh22-netbox:
|
||||
netbox:
|
||||
proxmox_vm_template_hosts:
|
||||
hosts:
|
||||
chaosknoten:
|
||||
ansible_pull_hosts:
|
||||
hosts:
|
||||
netbox:
|
||||
alloy_hosts:
|
||||
hosts:
|
||||
grafana:
|
||||
|
|
|
@ -6,11 +6,6 @@ all:
|
|||
authoritative-dns:
|
||||
ansible_host: authoritative-dns.z9.ccchh.net
|
||||
ansible_user: chaos
|
||||
thinkcccore0:
|
||||
ansible_host: thinkcccore0.z9.ccchh.net
|
||||
hypervisors:
|
||||
hosts:
|
||||
thinkcccore0:
|
||||
nginx_hosts:
|
||||
hosts:
|
||||
light:
|
||||
|
@ -24,6 +19,3 @@ infrastructure_authorized_keys_hosts:
|
|||
hosts:
|
||||
light:
|
||||
authoritative-dns:
|
||||
proxmox_vm_template_hosts:
|
||||
hosts:
|
||||
thinkcccore0:
|
||||
|
|
|
@ -70,13 +70,5 @@
|
|||
- "o=Docker,n=${distro_codename}"
|
||||
- "o=nginx,n=${distro_codename}"
|
||||
|
||||
- name: Ensure Alloy is installed and Setup on alloy_hosts
|
||||
hosts: alloy_hosts
|
||||
become: true
|
||||
tasks:
|
||||
- name: Setup Alloy
|
||||
ansible.builtin.include_role:
|
||||
name: grafana.grafana.alloy
|
||||
|
||||
- name: Run ensure_eh22_styleguide_dir Playbook
|
||||
ansible.builtin.import_playbook: ensure_eh22_styleguide_dir.yaml
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
- name: Ensure the VM template generation is set up
|
||||
hosts: proxmox_vm_template_hosts
|
||||
tasks:
|
||||
- name: Ensure dependencies are present
|
||||
ansible.builtin.apt:
|
||||
name:
|
||||
- git
|
||||
- libguestfs-tools
|
||||
become: true
|
||||
|
||||
- name: Ensure /usr/local/{lib,sbin} exist
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0755"
|
||||
become: true
|
||||
loop:
|
||||
- "/usr/local/lib/"
|
||||
- "/usr/local/sbin/"
|
||||
|
||||
- name: Ensure the pve-template-vm repo is present
|
||||
ansible.builtin.git:
|
||||
repo: https://git.hamburg.ccc.de/CCCHH/pve-template-vm.git
|
||||
dest: /usr/local/lib/pve-template-vm
|
||||
version: main
|
||||
force: true
|
||||
depth: 1
|
||||
single_branch: true
|
||||
track_submodules: true
|
||||
become: true
|
||||
|
||||
# /usr/local/sbin as the script uses qm, which is also found in /usr/sbin.
|
||||
- name: Ensure symlink to build-proxmox-template exists in /usr/local/sbin
|
||||
ansible.builtin.file:
|
||||
src: /usr/local/lib/pve-template-vm/build-proxmox-template
|
||||
dest: /usr/local/sbin/build-proxmox-template
|
||||
state: link
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
become: true
|
||||
|
||||
# This sets up a cron job running /usr/local/sbin/build-proxmox-template using the env vars defined in hypervisor__template_vm_config.
|
||||
- name: Ensure cron job is present for building a fresh VM template every week on Friday 04:00
|
||||
ansible.builtin.cron:
|
||||
name: "ansible build proxmox template"
|
||||
cron_file: ansible_build_proxmox_template
|
||||
minute: 0
|
||||
hour: 4
|
||||
weekday: 5
|
||||
user: root
|
||||
job: "{% if hypervisor__template_vm_config is defined and hypervisor__template_vm_config | length > 0 %}\
|
||||
/usr/bin/env \
|
||||
{% for item in hypervisor__template_vm_config | default([]) %}\
|
||||
{{ item.name }}=\"{{ item.value }}\" \
|
||||
{% endfor %}\
|
||||
{% endif %}\
|
||||
/usr/local/sbin/build-proxmox-template"
|
||||
become: true
|
|
@ -20,25 +20,16 @@ Links & Resources
|
|||
|
||||
|
||||
{{ define "alert-message.telegram.ccchh" }}
|
||||
{{- if .Alerts.Firing }}
|
||||
<u>🔥{{ len .Alerts.Firing }} Alert(/s) Firing 🔥</u>
|
||||
{{- if le (len .Alerts.Firing) 6 }}
|
||||
{{- range .Alerts.Firing }}
|
||||
{{ template "alert-item.telegram.ccchh.internal" . }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
There are too many alerts firing at once
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- if .Alerts.Resolved }}
|
||||
<u>✅{{ len .Alerts.Resolved }} Alert(/s) Resolved ✅</u>
|
||||
{{- if le (len .Alerts.Resolved) 6 }}
|
||||
{{- range .Alerts.Resolved }}
|
||||
{{ template "alert-item.telegram.ccchh.internal" . }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
There are too many resolved alerts to list
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Alerts.Firing }}
|
||||
<u>🔥{{ len .Alerts.Firing }} Alert(/s) Firing 🔥</u>
|
||||
{{ range .Alerts.Firing -}}
|
||||
{{ template "alert-item.telegram.ccchh.internal" . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Alerts.Resolved }}
|
||||
<u>✅{{ len .Alerts.Resolved }} Alert(/s) Resolved ✅</u>
|
||||
{{ range .Alerts.Resolved -}}
|
||||
{{ template "alert-item.telegram.ccchh.internal" . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -6,7 +6,6 @@ services:
|
|||
container_name: prometheus
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--web.enable-remote-write-receiver'
|
||||
ports:
|
||||
- 9090:9090
|
||||
restart: unless-stopped
|
||||
|
@ -55,20 +54,8 @@ services:
|
|||
volumes:
|
||||
- /dev/null:/etc/prometheus/pve.yml
|
||||
|
||||
loki:
|
||||
image: grafana/loki:3
|
||||
container_name: loki
|
||||
ports:
|
||||
- 13100:3100
|
||||
- 19099:9099
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./configs/loki.yaml:/etc/loki/local-config.yaml
|
||||
- loki_data:/var/loki
|
||||
|
||||
volumes:
|
||||
graf_data: {}
|
||||
prom_data: {}
|
||||
alertmanager_data: {}
|
||||
loki_data: {}
|
||||
mimir_data: {}
|
||||
|
|
|
@ -7,15 +7,3 @@ datasources:
|
|||
isDefault: true
|
||||
access: proxy
|
||||
editable: true
|
||||
- name: Loki
|
||||
type: loki
|
||||
url: http://loki:3100
|
||||
access: proxy
|
||||
editable: true
|
||||
jsonData:
|
||||
timeout: 60
|
||||
maxLines: 3000
|
||||
httpHeaderName1: "X-Scope-OrgID"
|
||||
secureJsonData:
|
||||
httpHeaderValue1: "chaos"
|
||||
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
auth_enabled: true
|
||||
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
grpc_listen_port: 9099
|
||||
log_level: warn
|
||||
|
||||
limits_config:
|
||||
retention_period: 14d
|
||||
|
||||
common:
|
||||
instance_addr: 127.0.0.1
|
||||
path_prefix: /var/loki
|
||||
storage:
|
||||
filesystem:
|
||||
chunks_directory: /var/loki/chunks
|
||||
rules_directory: /var/loki/rules
|
||||
replication_factor: 1
|
||||
ring:
|
||||
kvstore:
|
||||
store: inmemory
|
||||
|
||||
storage_config:
|
||||
filesystem:
|
||||
directory: /var/loki/chunks
|
||||
index_queries_cache_config:
|
||||
embedded_cache:
|
||||
enabled: true
|
||||
max_size_mb: 80
|
||||
ttl: 30m
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
- from: 2025-04-28
|
||||
store: tsdb
|
||||
object_store: filesystem
|
||||
schema: v13
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
|
||||
chunk_store_config:
|
||||
chunk_cache_config:
|
||||
embedded_cache:
|
||||
enabled: true
|
||||
max_size_mb: 80
|
||||
ttl: 30m
|
||||
write_dedupe_cache_config:
|
||||
embedded_cache:
|
||||
enabled: true
|
||||
max_size_mb: 80
|
||||
ttl: 30m
|
|
@ -1,75 +0,0 @@
|
|||
server {
|
||||
# Wieske
|
||||
allow 172.31.17.128/25;
|
||||
allow 212.12.51.128/28;
|
||||
allow 2a00:14b0:42:100::/56;
|
||||
allow 2a00:14b0:4200:3380::/64;
|
||||
# Z9
|
||||
allow 2a07:c480:0:100::/56;
|
||||
allow 2a07:c481:1::/48;
|
||||
|
||||
deny all;
|
||||
|
||||
listen [2a00:14b0:4200:3380:0000:5a5f:1dbc:6a39]:50051 ssl;
|
||||
listen 172.31.17.145:50051 ssl;
|
||||
http2 on;
|
||||
|
||||
server_name loki.hamburg.ccc.de;
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/loki.hamburg.ccc.de/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/loki.hamburg.ccc.de/privkey.pem;
|
||||
|
||||
auth_basic "loki";
|
||||
auth_basic_user_file loki.htpasswd;
|
||||
location / {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Port 9099;
|
||||
# This is https in any case.
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header X-Scope-OrgID $remote_user;
|
||||
grpc_pass grpc://localhost:19099;
|
||||
}
|
||||
}
|
||||
|
||||
server {
|
||||
# Wieske
|
||||
allow 172.31.17.128/25;
|
||||
allow 212.12.51.128/28;
|
||||
allow 2a00:14b0:42:100::/56;
|
||||
allow 2a00:14b0:4200:3380::/64;
|
||||
# Z9
|
||||
allow 2a07:c480:0:100::/56;
|
||||
allow 2a07:c481:1::/48;
|
||||
deny all;
|
||||
|
||||
listen [2a00:14b0:4200:3380:0000:5a5f:1dbc:6a39]:443 ssl;
|
||||
listen 172.31.17.145:443 ssl;
|
||||
http2 on;
|
||||
|
||||
server_name loki.hamburg.ccc.de;
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/loki.hamburg.ccc.de/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/loki.hamburg.ccc.de/privkey.pem;
|
||||
# verify chain of trust of OCSP response using Root CA and Intermediate certs
|
||||
ssl_trusted_certificate /etc/letsencrypt/live/loki.hamburg.ccc.de/chain.pem;
|
||||
|
||||
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
|
||||
add_header Strict-Transport-Security "max-age=63072000" always;
|
||||
|
||||
auth_basic "loki";
|
||||
auth_basic_user_file loki.htpasswd;
|
||||
|
||||
location / {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# This is https in any case.
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header X-Scope-OrgID $remote_user;
|
||||
proxy_pass http://127.0.0.1:13100;
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
chaos:{{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/grafana/loki_chaos_basic_auth", create=false, missing="error") }}
|
|
@ -1,55 +0,0 @@
|
|||
server {
|
||||
# Wieske
|
||||
allow 172.31.17.128/25;
|
||||
allow 212.12.51.128/28;
|
||||
allow 2a00:14b0:42:100::/56;
|
||||
allow 2a00:14b0:4200:3380::/64;
|
||||
# Z9
|
||||
allow 2a07:c480:0:100::/56;
|
||||
allow 2a07:c481:1::/48;
|
||||
deny all;
|
||||
|
||||
listen [2a00:14b0:4200:3380:0000:5a5f:1dbc:6a39]:443 ssl;
|
||||
listen 172.31.17.145:443 ssl;
|
||||
http2 on;
|
||||
|
||||
server_name metrics.hamburg.ccc.de;
|
||||
|
||||
client_body_buffer_size 32k;
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/metrics.hamburg.ccc.de/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/metrics.hamburg.ccc.de/privkey.pem;
|
||||
# verify chain of trust of OCSP response using Root CA and Intermediate certs
|
||||
ssl_trusted_certificate /etc/letsencrypt/live/metrics.hamburg.ccc.de/chain.pem;
|
||||
|
||||
# HSTS (ngx_http_headers_module is required) (63072000 seconds)
|
||||
add_header Strict-Transport-Security "max-age=63072000" always;
|
||||
|
||||
auth_basic "metrics";
|
||||
auth_basic_user_file metrics.htpasswd;
|
||||
|
||||
location /api/v1/write {
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Port 3100;
|
||||
# This is https in any case.
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
|
||||
proxy_pass http://127.0.0.1:9090;
|
||||
}
|
||||
|
||||
location /ready {
|
||||
rewrite ^ /-/ready break;
|
||||
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
# This is https in any case.
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
|
||||
proxy_pass http://127.0.0.1:9090;
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
chaos:{{ lookup("community.general.passwordstore", "noc/vm-secrets/chaosknoten/grafana/metrics_chaos_basic_auth", create=false, missing="error") }}
|
|
@ -1,14 +0,0 @@
|
|||
# partly generated 2022-01-08, Mozilla Guideline v5.6, nginx 1.17.7, OpenSSL 1.1.1k, intermediate configuration
|
||||
# https://ssl-config.mozilla.org/#server=nginx&version=1.17.7&config=intermediate&openssl=1.1.1k&guideline=5.6
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
|
||||
location / {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
location /.well-known/acme-challenge/ {
|
||||
proxy_pass http://127.0.0.1:31820/.well-known/acme-challenge/;
|
||||
}
|
||||
}
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
services:
|
||||
keycloak:
|
||||
image: git.hamburg.ccc.de/ccchh/oci-images/keycloak:26.1
|
||||
image: git.hamburg.ccc.de/ccchh/oci-images/keycloak:26.0
|
||||
pull_policy: always
|
||||
restart: unless-stopped
|
||||
command: start --optimized
|
||||
|
@ -46,7 +46,7 @@ services:
|
|||
- "8080:8080"
|
||||
|
||||
db:
|
||||
image: postgres:15.12
|
||||
image: postgres:15.2
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- keycloak
|
||||
|
|
|
@ -43,7 +43,6 @@ server {
|
|||
|
||||
allow 185.161.129.132/32; # z9
|
||||
allow 2a07:c480:0:100::/56; # z9
|
||||
allow 2a07:c481:1::/48; # z9 new ipv6
|
||||
allow 213.240.180.39/32; # stbe home
|
||||
allow 2a01:170:118b::1/64; # stbe home
|
||||
deny all;
|
||||
|
|
|
@ -53,7 +53,6 @@ services:
|
|||
restart: unless-stopped
|
||||
environment:
|
||||
PRETALX_DATA_DIR: /data
|
||||
PRETALX_FILE_UPLOAD_LIMIT: 1000 # MB
|
||||
PRETALX_FILESYSTEM_MEDIA: /public/media
|
||||
PRETALX_FILESYSTEM_STATIC: /public/static
|
||||
PRETALX_SITE_URL: https://pretalx.hamburg.ccc.de
|
||||
|
|
|
@ -71,7 +71,6 @@ map $host $upstream_acme_challenge_host {
|
|||
hydra.hamburg.ccc.de 172.31.17.163:31820;
|
||||
cfp.eh22.easterhegg.eu 172.31.17.157:31820;
|
||||
hub.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:31820;
|
||||
hub-usercontent.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:31820;
|
||||
netbox.eh22.easterhegg.eu eh22-netbox-intern.hamburg.ccc.de:31820;
|
||||
default "";
|
||||
}
|
||||
|
|
|
@ -89,7 +89,6 @@ stream {
|
|||
hydra.hamburg.ccc.de 172.31.17.163:8443;
|
||||
cfp.eh22.easterhegg.eu pretalx-intern.hamburg.ccc.de:8443;
|
||||
hub.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:8443;
|
||||
hub-usercontent.eh22.easterhegg.eu eh22hub-intern.hamburg.ccc.de:8443;
|
||||
netbox.eh22.easterhegg.eu eh22-netbox-intern.hamburg.ccc.de:8443;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
- name: restart the ssh service
|
||||
ansible.builtin.systemd:
|
||||
name: ssh.service
|
||||
state: restarted
|
||||
- name: reboot the system
|
||||
become: true
|
||||
ansible.builtin.reboot:
|
||||
|
|
|
@ -12,7 +12,8 @@
|
|||
group: root
|
||||
src: sshd_config.j2
|
||||
notify:
|
||||
- restart the ssh service
|
||||
# Reboot instead of just restarting the ssh service, since I don't know how Ansible reacts, when it restarts the service it probably needs for the connection.
|
||||
- reboot the system
|
||||
|
||||
- name: deactivate short moduli
|
||||
ansible.builtin.shell:
|
||||
|
@ -31,4 +32,5 @@
|
|||
changed_when:
|
||||
- '"ansible-changed" in result.stdout'
|
||||
notify:
|
||||
- restart the ssh service
|
||||
# Reboot instead of just restarting the ssh service, since I don't know how Ansible reacts, when it restarts the service it probably needs for the connection.
|
||||
- reboot the system
|
||||
|
|
|
@ -4,5 +4,3 @@ nginx__deploy_logging_conf: true
|
|||
nginx__configurations: [ ]
|
||||
nginx__use_custom_nginx_conf: false
|
||||
nginx__custom_nginx_conf: ""
|
||||
nginx__deploy_htpasswds: false
|
||||
nginx__htpasswds: [ ]
|
||||
|
|
|
@ -34,19 +34,3 @@ argument_specs:
|
|||
type: str
|
||||
required: false
|
||||
default: ""
|
||||
nginx__deploy_htpasswds:
|
||||
type: bool
|
||||
required: false
|
||||
default: false
|
||||
nginx__htpasswds:
|
||||
type: list
|
||||
elements: dict
|
||||
required: false
|
||||
default: [ ]
|
||||
options:
|
||||
name:
|
||||
type: str
|
||||
required: true
|
||||
content:
|
||||
type: str
|
||||
required: true
|
||||
|
|
|
@ -131,20 +131,6 @@
|
|||
label: "{{ item.name }}"
|
||||
notify: Restart nginx
|
||||
|
||||
- name: Ensure all given htpasswd files are deployed
|
||||
when: nginx__deploy_htpasswds
|
||||
ansible.builtin.copy:
|
||||
content: "{{ item.content }}"
|
||||
dest: "/etc/nginx/{{ item.name }}.htpasswd"
|
||||
mode: "0644"
|
||||
owner: root
|
||||
group: root
|
||||
become: true
|
||||
loop: "{{ nginx__htpasswds }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
notify: Restart nginx
|
||||
|
||||
- name: Add names with suffixes from `nginx__configurations` to `nginx__config_files_to_exist` fact
|
||||
ansible.builtin.set_fact:
|
||||
nginx__config_files_to_exist: "{{ nginx__config_files_to_exist + [ item.name + '.conf' ] }}" # noqa: jinja[spacing]
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue