From 82d92d7c8faefc0b62fb539647b19dabe4d705e3 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 6 Apr 2026 06:53:19 +0000 Subject: [PATCH 1/6] Add CLAUDE.md with project context and nix eval guidance https://claude.ai/code/session_01PwAXuaoJx7qD5FhVLsn7Sn --- CLAUDE.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 CLAUDE.md diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..706dc1c --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,30 @@ +# FredOS NixOS Configuration + +This is a NixOS flake-based configuration for multiple hosts: +- **FredOS-Gaming** — gaming desktop +- **FredOS-Mediaserver** — home media server +- **FredOS-Macbook** — MacBook laptop + +## Structure + +- `flake.nix` — flake inputs/outputs; all hosts use `nixpkgs` unstable +- `common.nix` — shared configuration across all hosts +- `hosts/` — per-host NixOS configuration modules +- `hosts/hardware/` — hardware-specific configuration +- `home-manager/` — Home Manager configuration (via NixOS module) +- `services/` — modular service definitions imported by hosts +- `settings/` — shared settings/variables + +## Code Evaluation + +Always validate Nix expressions with `nix eval` before committing. For example: + +```bash +# Evaluate a specific attribute to check for syntax/type errors +nix eval .#nixosConfigurations.FredOS-Gaming.config.system.stateVersion + +# Evaluate the full flake outputs to catch top-level errors +nix eval .#nixosConfigurations --apply builtins.attrNames +``` + +Use `nix flake check` for a broader check of the flake. From f493d09c50f780101d8e9216270c60700d0cebe7 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 6 Apr 2026 07:00:50 +0000 Subject: [PATCH 2/6] Add CrowdSec setup readme for Docker-based deployment Documents API key generation, storage, bouncer registration, and useful cscli commands. https://claude.ai/code/session_01PwAXuaoJx7qD5FhVLsn7Sn --- services/crowdsec.md | 99 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 services/crowdsec.md diff --git a/services/crowdsec.md b/services/crowdsec.md new file mode 100644 index 0000000..eb8ab76 --- /dev/null +++ b/services/crowdsec.md @@ -0,0 +1,99 @@ +# CrowdSec Setup + +CrowdSec runs as a Docker (OCI) container on FredOS-Mediaserver. The firewall +bouncer runs as a native NixOS service and talks to the containerised LAPI over +localhost:8080. + +## Why Docker? + +The `crowdsec` package in nixpkgs unstable is incomplete — the NixOS module +does not reliably set up the LAPI and hub collections. The official CrowdSec +Docker image is well maintained and always up to date. + +## Architecture + +``` +[journald / log sources] + | + [CrowdSec LAPI] ← Docker container (port 8080 on localhost) + | +[firewall-bouncer] ← Native NixOS service (nftables/iptables) +``` + +## Initial Setup (first deploy) + +After running `nixos-rebuild switch`, the CrowdSec container will be running +but the firewall bouncer has no API key yet. + +**1. Generate a bouncer API key:** + +```bash +docker exec crowdsec cscli bouncers add firewall-bouncer +``` + +Copy the key printed to stdout — it is only shown once. + +**2. Store the key on the machine:** + +```bash +sudo mkdir -p /var/lib/secrets +echo -n "PASTE_KEY_HERE" | sudo tee /var/lib/secrets/crowdsec-bouncer-key +sudo chmod 600 /var/lib/secrets/crowdsec-bouncer-key +sudo chown root:root /var/lib/secrets/crowdsec-bouncer-key +``` + +**3. Restart the bouncer:** + +```bash +sudo systemctl restart crowdsec-firewall-bouncer +sudo systemctl status crowdsec-firewall-bouncer +``` + +The key file at `/var/lib/secrets/crowdsec-bouncer-key` is not managed by Nix +and must be created manually on each new machine. It should never be committed +to git. + +## Re-registering the Bouncer + +If the bouncer loses its registration (e.g. after a container wipe): + +```bash +# Remove the old registration +docker exec crowdsec cscli bouncers delete firewall-bouncer + +# Re-add and capture the new key +docker exec crowdsec cscli bouncers add firewall-bouncer + +# Update the key file and restart +echo -n "NEW_KEY_HERE" | sudo tee /var/lib/secrets/crowdsec-bouncer-key +sudo systemctl restart crowdsec-firewall-bouncer +``` + +## Useful Commands + +```bash +# View active bouncers +docker exec crowdsec cscli bouncers list + +# View active decisions (bans) +docker exec crowdsec cscli decisions list + +# View alerts +docker exec crowdsec cscli alerts list + +# Install/update a collection +docker exec crowdsec cscli collections install crowdsecurity/sshd + +# View installed collections +docker exec crowdsec cscli collections list +``` + +## Persistent Data + +The container mounts the following host paths: + +| Host path | Container path | Purpose | +|----------------------------------|-------------------------|--------------------------| +| `/var/lib/crowdsec/data` | `/var/lib/crowdsec/data`| GeoIP DB, decisions, etc | +| `/var/lib/crowdsec/config` | `/etc/crowdsec` | Config, hub, bouncers | +| `/var/log/crowdsec` | `/var/log/crowdsec` | CrowdSec logs | From f5bb08d7dd12c9e73a74274edbd1105f4df38804 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 6 Apr 2026 07:05:59 +0000 Subject: [PATCH 3/6] crowdsec: switch to Docker container with native firewall bouncer Replaces the incomplete nixpkgs NixOS module with the official CrowdSec Docker image for the LAPI, while keeping the firewall bouncer as a native systemd service. API key is read from /var/lib/secrets/crowdsec-bouncer-key at start time so it never enters the Nix store. https://claude.ai/code/session_01PwAXuaoJx7qD5FhVLsn7Sn --- services/crowdsec.nix | 125 ++++++++++++++++++++++++++++++++---------- 1 file changed, 97 insertions(+), 28 deletions(-) diff --git a/services/crowdsec.nix b/services/crowdsec.nix index 1feb160..4ec5510 100644 --- a/services/crowdsec.nix +++ b/services/crowdsec.nix @@ -1,43 +1,112 @@ { config, lib, pkgs, ... }: + +let + # Acquisition config is written to the host config dir before the container + # starts, so it persists across container restarts and reflects Nix config. + acquisYaml = '' + - source: journalctl + journalctl_filter: + - "-u" + - "sshd" + labels: + type: syslog + ''; + + # Generates /run/crowdsec-bouncer/config.yaml at service start, injecting the + # API key from /var/lib/secrets/crowdsec-bouncer-key without it ever entering + # the Nix store. See services/crowdsec.md for key setup instructions. + bouncerPreStart = pkgs.writeShellScript "crowdsec-bouncer-prestart" '' + set -euo pipefail + + KEY_FILE=/var/lib/secrets/crowdsec-bouncer-key + if [ ! -f "$KEY_FILE" ]; then + echo "ERROR: $KEY_FILE not found. See services/crowdsec.md for setup steps." >&2 + exit 1 + fi + + API_KEY=$(cat "$KEY_FILE") + + cat > /run/crowdsec-bouncer/config.yaml << EOF + mode: nftables + pid_dir: /run/crowdsec-bouncer/ + update_frequency: 10s + log_mode: stdout + log_level: info + api_url: http://127.0.0.1:8080 + api_key: $API_KEY + disable_ipv6: false + deny_action: DROP + deny_log: false + nftables: + ipv4: + enabled: true + set-only: false + table: crowdsec + chain: crowdsec-chain + ipv6: + enabled: true + set-only: false + table: crowdsec6 + chain: crowdsec-chain6 + EOF + ''; +in { config = lib.mkIf (config.networking.hostName == "FredOS-Mediaserver") { - services.crowdsec = { - enable = true; - autoUpdateService = true; - # Install detection collections on first boot - hub.collections = [ "crowdsecurity/linux" "crowdsecurity/sshd" ]; + virtualisation.docker.enable = true; + virtualisation.oci-containers.backend = "docker"; - settings = { - # Enable the Local API server (required for bouncer registration) - general.api.server.enable = true; - # Where the LAPI client credentials will be written on first boot - lapi.credentialsFile = "/var/lib/crowdsec/state/lapi-credentials.yaml"; - }; - - localConfig.acquisitions = [ - # SSH brute-force detection - { - source = "journalctl"; - journalctl_filter = [ "-u" "sshd" ]; - labels.type = "syslog"; - } + # CrowdSec LAPI runs as a Docker container. + # Collections are installed on first boot via the COLLECTIONS env var. + # Journals are mounted read-only so CrowdSec can run journalctl inside the container. + virtualisation.oci-containers.containers.crowdsec = { + image = "crowdsecurity/crowdsec:latest"; + ports = [ "127.0.0.1:8080:8080" ]; + volumes = [ + "/var/lib/crowdsec/data:/var/lib/crowdsec/data" + "/var/lib/crowdsec/config:/etc/crowdsec" + "/var/log/journal:/var/log/journal:ro" + "/run/log/journal:/run/log/journal:ro" + "/etc/machine-id:/etc/machine-id:ro" ]; + environment = { + COLLECTIONS = "crowdsecurity/linux crowdsecurity/sshd"; + }; }; - # The bouncer-register service uses raw cscli (no -c flag), so it looks for - # config at /etc/crowdsec/config.yaml. Symlink the Nix-generated config there. + # Write acquisition config into the host config dir before the container starts. + systemd.services.docker-crowdsec.preStart = '' + mkdir -p /var/lib/crowdsec/config/acquis.d + cat > /var/lib/crowdsec/config/acquis.d/nixos.yaml << 'ACQUIS' + ${acquisYaml} + ACQUIS + ''; + systemd.tmpfiles.rules = [ - "L+ /etc/crowdsec/config.yaml - - - - ${(pkgs.formats.yaml { }).generate "crowdsec.yaml" config.services.crowdsec.settings.general}" + "d /var/lib/crowdsec/data 0750 root root -" + "d /var/lib/crowdsec/config 0750 root root -" + "d /var/lib/secrets 0700 root root -" ]; - # Ensure /var/lib/crowdsec exists before crowdsec starts (race with tmpfiles-resetup) - systemd.services.crowdsec.after = [ "systemd-tmpfiles-resetup.service" ]; + # Firewall bouncer runs natively. API key is injected at start time from + # /var/lib/secrets/crowdsec-bouncer-key — see services/crowdsec.md. + systemd.services.crowdsec-firewall-bouncer = { + description = "CrowdSec nftables firewall bouncer"; + after = [ "network.target" "docker-crowdsec.service" ]; + wants = [ "docker-crowdsec.service" ]; + wantedBy = [ "multi-user.target" ]; - # Firewall bouncer — auto-registers to local CrowdSec LAPI - services.crowdsec-firewall-bouncer = { - enable = true; - settings.api_url = "http://127.0.0.1:8080"; + serviceConfig = { + Type = "simple"; + RuntimeDirectory = "crowdsec-bouncer"; + ExecStartPre = bouncerPreStart; + ExecStart = "${pkgs.crowdsec-firewall-bouncer}/bin/cs-firewall-bouncer -c /run/crowdsec-bouncer/config.yaml"; + Restart = "on-failure"; + RestartSec = "5s"; + AmbientCapabilities = [ "CAP_NET_ADMIN" "CAP_NET_RAW" ]; + CapabilityBoundingSet = [ "CAP_NET_ADMIN" "CAP_NET_RAW" ]; + }; }; }; } From 16363dc887adca5daa5dfae838f9b85e48d9a4f5 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 6 Apr 2026 08:21:23 +0000 Subject: [PATCH 4/6] fail2ban: add jails for SSH, nginx proxy manager, and Jellyfin Replaces bare enable flag with a dedicated service module covering: - SSH brute force via journald - Nginx Proxy Manager auth failures via Docker log files - Jellyfin auth failures via journald Includes incremental ban times (up to 1 week) and LAN ignore rules. https://claude.ai/code/session_01PwAXuaoJx7qD5FhVLsn7Sn --- common.nix | 1 + hosts/FredOS-Mediaserver.nix | 2 - services/fail2ban.nix | 75 ++++++++++++++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 2 deletions(-) create mode 100644 services/fail2ban.nix diff --git a/common.nix b/common.nix index 416895e..b484cd0 100644 --- a/common.nix +++ b/common.nix @@ -30,6 +30,7 @@ ./services/bazarr.nix ./services/cloudflare-ddns.nix ./services/crowdsec.nix + ./services/fail2ban.nix ]; ### Make build time quicker diff --git a/hosts/FredOS-Mediaserver.nix b/hosts/FredOS-Mediaserver.nix index a27c18a..72ed92a 100644 --- a/hosts/FredOS-Mediaserver.nix +++ b/hosts/FredOS-Mediaserver.nix @@ -18,8 +18,6 @@ yt-dlp ]; - services.fail2ban.enable = true; - # Enable Docker virtualisation.docker.enable = true; diff --git a/services/fail2ban.nix b/services/fail2ban.nix new file mode 100644 index 0000000..c2d3057 --- /dev/null +++ b/services/fail2ban.nix @@ -0,0 +1,75 @@ +{ config, lib, pkgs, ... }: +{ + config = lib.mkIf (config.networking.hostName == "FredOS-Mediaserver") { + + services.fail2ban = { + enable = true; + + # Default ban settings (overridable per jail) + maxretry = 5; + bantime = "1h"; + + # Progressively longer bans for repeat offenders, up to 1 week + bantime-increment = { + enable = true; + multiplier = "1 2 4 8 16 32 64"; + maxtime = "168h"; + overalljails = true; + }; + + # Never ban local network traffic + ignoreIP = [ + "127.0.0.1/8" + "::1" + "192.168.0.0/16" + "10.0.0.0/8" + ]; + + jails = { + + # SSH brute force — uses built-in sshd filter via journald + sshd = { + settings = { + enabled = true; + filter = "sshd"; + maxretry = 5; + bantime = "1h"; + }; + }; + + # Nginx Proxy Manager — watches Docker-mounted log files + # Catches repeated 401/403 responses (auth failures, bad requests) + nginx-proxy-manager = { + settings = { + enabled = true; + filter = "nginx-http-auth"; + logpath = "/home/fred/docker/nginx-proxy-manager/data/logs/*.log"; + maxretry = 10; + bantime = "1h"; + }; + }; + + # Jellyfin auth failures — uses journald backend + jellyfin = { + settings = { + enabled = true; + backend = "systemd"; + journalmatch = "_SYSTEMD_UNIT=jellyfin.service"; + maxretry = 5; + bantime = "2h"; + }; + }; + + }; + }; + + # Custom Jellyfin filter — matches failed auth log lines from the journal + environment.etc."fail2ban/filter.d/jellyfin.conf".text = '' + [Definition] + failregex = ^.*Authentication request for .* has been denied \(IP: ""\).*$ + ^.*Error processing request from remote IP Address .*$ + ignoreregex = + ''; + + }; +} From 4935d42e48aa1fdcdca224fa4af2bea064261cd5 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 6 Apr 2026 08:24:18 +0000 Subject: [PATCH 5/6] fail2ban: add jails for Sonarr, Radarr, Prowlarr, Bazarr, qBittorrent All services with openFirewall = true are now covered. The *arr suite shares a single filter since they use the same logging codebase. https://claude.ai/code/session_01PwAXuaoJx7qD5FhVLsn7Sn --- services/fail2ban.nix | 87 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 82 insertions(+), 5 deletions(-) diff --git a/services/fail2ban.nix b/services/fail2ban.nix index c2d3057..56fc95f 100644 --- a/services/fail2ban.nix +++ b/services/fail2ban.nix @@ -27,7 +27,7 @@ jails = { - # SSH brute force — uses built-in sshd filter via journald + # SSH brute force — built-in sshd filter via journald sshd = { settings = { enabled = true; @@ -37,8 +37,7 @@ }; }; - # Nginx Proxy Manager — watches Docker-mounted log files - # Catches repeated 401/403 responses (auth failures, bad requests) + # Nginx Proxy Manager — watches Docker-mounted log files for 401/403s nginx-proxy-manager = { settings = { enabled = true; @@ -49,7 +48,7 @@ }; }; - # Jellyfin auth failures — uses journald backend + # Jellyfin auth failures — journald jellyfin = { settings = { enabled = true; @@ -60,10 +59,88 @@ }; }; + # Sonarr — log files at dataDir/logs/ + sonarr = { + settings = { + enabled = true; + filter = "arr-apps"; + logpath = "/var/lib/sonarr/logs/*.txt"; + maxretry = 5; + bantime = "1h"; + }; + }; + + # Radarr — log files at dataDir/logs/ + radarr = { + settings = { + enabled = true; + filter = "arr-apps"; + logpath = "/var/lib/radarr/logs/*.txt"; + maxretry = 5; + bantime = "1h"; + }; + }; + + # Prowlarr — log files at dataDir/logs/ + prowlarr = { + settings = { + enabled = true; + filter = "arr-apps"; + logpath = "/var/lib/prowlarr/logs/*.txt"; + maxretry = 5; + bantime = "1h"; + }; + }; + + # Bazarr — log files at dataDir/log/ + bazarr = { + settings = { + enabled = true; + filter = "bazarr"; + logpath = "/var/lib/bazarr/log/*.txt"; + maxretry = 5; + bantime = "1h"; + }; + }; + + # qBittorrent-nox — watches journald for web UI login failures + qbittorrent = { + settings = { + enabled = true; + filter = "qbittorrent"; + backend = "systemd"; + journalmatch = "_SYSTEMD_UNIT=qbittorrent-nox.service"; + maxretry = 5; + bantime = "1h"; + }; + }; + }; }; - # Custom Jellyfin filter — matches failed auth log lines from the journal + # Shared filter for Sonarr, Radarr, Prowlarr — they all use the same *arr codebase + environment.etc."fail2ban/filter.d/arr-apps.conf".text = '' + [Definition] + failregex = .*Auth-Failure ip + ignoreregex = + ''; + + # Bazarr (Python/Flask) auth failure filter + environment.etc."fail2ban/filter.d/bazarr.conf".text = '' + [Definition] + failregex = .*login attempt.* + .*unauthorized.* + ignoreregex = + ''; + + # qBittorrent web UI login failure filter + environment.etc."fail2ban/filter.d/qbittorrent.conf".text = '' + [Definition] + failregex = .*WebAPI login failure.*remote IP: + ignoreregex = + ''; + + # Jellyfin filter environment.etc."fail2ban/filter.d/jellyfin.conf".text = '' [Definition] failregex = ^.*Authentication request for .* has been denied \(IP: ""\).*$ From 6b432f3bc2fc6409bfb9cd473c7b1345d4b1b358 Mon Sep 17 00:00:00 2001 From: Claude Date: Mon, 6 Apr 2026 08:28:08 +0000 Subject: [PATCH 6/6] =?UTF-8?q?Remove=20CrowdSec=20=E2=80=94=20replaced=20?= =?UTF-8?q?by=20fail2ban?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit https://claude.ai/code/session_01PwAXuaoJx7qD5FhVLsn7Sn --- common.nix | 1 - services/crowdsec.md | 99 ------------------------------------- services/crowdsec.nix | 112 ------------------------------------------ 3 files changed, 212 deletions(-) delete mode 100644 services/crowdsec.md delete mode 100644 services/crowdsec.nix diff --git a/common.nix b/common.nix index b484cd0..42d089d 100644 --- a/common.nix +++ b/common.nix @@ -29,7 +29,6 @@ ./services/jellyfin.nix ./services/bazarr.nix ./services/cloudflare-ddns.nix - ./services/crowdsec.nix ./services/fail2ban.nix ]; diff --git a/services/crowdsec.md b/services/crowdsec.md deleted file mode 100644 index eb8ab76..0000000 --- a/services/crowdsec.md +++ /dev/null @@ -1,99 +0,0 @@ -# CrowdSec Setup - -CrowdSec runs as a Docker (OCI) container on FredOS-Mediaserver. The firewall -bouncer runs as a native NixOS service and talks to the containerised LAPI over -localhost:8080. - -## Why Docker? - -The `crowdsec` package in nixpkgs unstable is incomplete — the NixOS module -does not reliably set up the LAPI and hub collections. The official CrowdSec -Docker image is well maintained and always up to date. - -## Architecture - -``` -[journald / log sources] - | - [CrowdSec LAPI] ← Docker container (port 8080 on localhost) - | -[firewall-bouncer] ← Native NixOS service (nftables/iptables) -``` - -## Initial Setup (first deploy) - -After running `nixos-rebuild switch`, the CrowdSec container will be running -but the firewall bouncer has no API key yet. - -**1. Generate a bouncer API key:** - -```bash -docker exec crowdsec cscli bouncers add firewall-bouncer -``` - -Copy the key printed to stdout — it is only shown once. - -**2. Store the key on the machine:** - -```bash -sudo mkdir -p /var/lib/secrets -echo -n "PASTE_KEY_HERE" | sudo tee /var/lib/secrets/crowdsec-bouncer-key -sudo chmod 600 /var/lib/secrets/crowdsec-bouncer-key -sudo chown root:root /var/lib/secrets/crowdsec-bouncer-key -``` - -**3. Restart the bouncer:** - -```bash -sudo systemctl restart crowdsec-firewall-bouncer -sudo systemctl status crowdsec-firewall-bouncer -``` - -The key file at `/var/lib/secrets/crowdsec-bouncer-key` is not managed by Nix -and must be created manually on each new machine. It should never be committed -to git. - -## Re-registering the Bouncer - -If the bouncer loses its registration (e.g. after a container wipe): - -```bash -# Remove the old registration -docker exec crowdsec cscli bouncers delete firewall-bouncer - -# Re-add and capture the new key -docker exec crowdsec cscli bouncers add firewall-bouncer - -# Update the key file and restart -echo -n "NEW_KEY_HERE" | sudo tee /var/lib/secrets/crowdsec-bouncer-key -sudo systemctl restart crowdsec-firewall-bouncer -``` - -## Useful Commands - -```bash -# View active bouncers -docker exec crowdsec cscli bouncers list - -# View active decisions (bans) -docker exec crowdsec cscli decisions list - -# View alerts -docker exec crowdsec cscli alerts list - -# Install/update a collection -docker exec crowdsec cscli collections install crowdsecurity/sshd - -# View installed collections -docker exec crowdsec cscli collections list -``` - -## Persistent Data - -The container mounts the following host paths: - -| Host path | Container path | Purpose | -|----------------------------------|-------------------------|--------------------------| -| `/var/lib/crowdsec/data` | `/var/lib/crowdsec/data`| GeoIP DB, decisions, etc | -| `/var/lib/crowdsec/config` | `/etc/crowdsec` | Config, hub, bouncers | -| `/var/log/crowdsec` | `/var/log/crowdsec` | CrowdSec logs | diff --git a/services/crowdsec.nix b/services/crowdsec.nix deleted file mode 100644 index 4ec5510..0000000 --- a/services/crowdsec.nix +++ /dev/null @@ -1,112 +0,0 @@ -{ config, lib, pkgs, ... }: - -let - # Acquisition config is written to the host config dir before the container - # starts, so it persists across container restarts and reflects Nix config. - acquisYaml = '' - - source: journalctl - journalctl_filter: - - "-u" - - "sshd" - labels: - type: syslog - ''; - - # Generates /run/crowdsec-bouncer/config.yaml at service start, injecting the - # API key from /var/lib/secrets/crowdsec-bouncer-key without it ever entering - # the Nix store. See services/crowdsec.md for key setup instructions. - bouncerPreStart = pkgs.writeShellScript "crowdsec-bouncer-prestart" '' - set -euo pipefail - - KEY_FILE=/var/lib/secrets/crowdsec-bouncer-key - if [ ! -f "$KEY_FILE" ]; then - echo "ERROR: $KEY_FILE not found. See services/crowdsec.md for setup steps." >&2 - exit 1 - fi - - API_KEY=$(cat "$KEY_FILE") - - cat > /run/crowdsec-bouncer/config.yaml << EOF - mode: nftables - pid_dir: /run/crowdsec-bouncer/ - update_frequency: 10s - log_mode: stdout - log_level: info - api_url: http://127.0.0.1:8080 - api_key: $API_KEY - disable_ipv6: false - deny_action: DROP - deny_log: false - nftables: - ipv4: - enabled: true - set-only: false - table: crowdsec - chain: crowdsec-chain - ipv6: - enabled: true - set-only: false - table: crowdsec6 - chain: crowdsec-chain6 - EOF - ''; -in -{ - config = lib.mkIf (config.networking.hostName == "FredOS-Mediaserver") { - - virtualisation.docker.enable = true; - virtualisation.oci-containers.backend = "docker"; - - # CrowdSec LAPI runs as a Docker container. - # Collections are installed on first boot via the COLLECTIONS env var. - # Journals are mounted read-only so CrowdSec can run journalctl inside the container. - virtualisation.oci-containers.containers.crowdsec = { - image = "crowdsecurity/crowdsec:latest"; - ports = [ "127.0.0.1:8080:8080" ]; - volumes = [ - "/var/lib/crowdsec/data:/var/lib/crowdsec/data" - "/var/lib/crowdsec/config:/etc/crowdsec" - "/var/log/journal:/var/log/journal:ro" - "/run/log/journal:/run/log/journal:ro" - "/etc/machine-id:/etc/machine-id:ro" - ]; - environment = { - COLLECTIONS = "crowdsecurity/linux crowdsecurity/sshd"; - }; - }; - - # Write acquisition config into the host config dir before the container starts. - systemd.services.docker-crowdsec.preStart = '' - mkdir -p /var/lib/crowdsec/config/acquis.d - cat > /var/lib/crowdsec/config/acquis.d/nixos.yaml << 'ACQUIS' - ${acquisYaml} - ACQUIS - ''; - - systemd.tmpfiles.rules = [ - "d /var/lib/crowdsec/data 0750 root root -" - "d /var/lib/crowdsec/config 0750 root root -" - "d /var/lib/secrets 0700 root root -" - ]; - - # Firewall bouncer runs natively. API key is injected at start time from - # /var/lib/secrets/crowdsec-bouncer-key — see services/crowdsec.md. - systemd.services.crowdsec-firewall-bouncer = { - description = "CrowdSec nftables firewall bouncer"; - after = [ "network.target" "docker-crowdsec.service" ]; - wants = [ "docker-crowdsec.service" ]; - wantedBy = [ "multi-user.target" ]; - - serviceConfig = { - Type = "simple"; - RuntimeDirectory = "crowdsec-bouncer"; - ExecStartPre = bouncerPreStart; - ExecStart = "${pkgs.crowdsec-firewall-bouncer}/bin/cs-firewall-bouncer -c /run/crowdsec-bouncer/config.yaml"; - Restart = "on-failure"; - RestartSec = "5s"; - AmbientCapabilities = [ "CAP_NET_ADMIN" "CAP_NET_RAW" ]; - CapabilityBoundingSet = [ "CAP_NET_ADMIN" "CAP_NET_RAW" ]; - }; - }; - }; -}