{ config, lib, input, pkgs, ... }: let sefidelKeys = [ "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILN14b5Fu+StHeMXq4ClyLG4G+/vCAfS7adxceEFria/ openpgp:0x1D5BCD11" ]; maintainerKeys = [ ] ++ sefidelKeys; poorObfuscation = y: x: "${x}@${y}"; in { imports = [ ]; deployment = { targetHost = "kanata.bee-polaris.ts.net"; targetUser = "root"; }; boot.loader.systemd-boot.enable = true; boot.loader.efi.canTouchEfiVariables = true; boot.supportedFilesystems = [ "zfs" ]; networking.hostId = "31cc5527"; networking.hostName = "kanata"; # Erase your darlings boot.initrd.postDeviceCommands = lib.mkAfter '' zfs rollback -r rpool/local/root@blank ''; boot.kernelModules = [ "r8169" ]; boot.initrd.kernelModules = [ "r8169" ]; boot.initrd.network.enable = true; boot.initrd.network.ssh = { enable = true; # Using the same port as the actual SSH daemon will cause the clients to # throw errors related to host key mismatch. port = 2222; hostKeys = [ # XXX: This has to be manually generated during NixOS install. # The files are then copied to initrd secrets during activation. "/persist/initrd/ssh_host_rsa_key" "/persist/initrd/ssh_host_ed25519_key" ]; authorizedKeys = maintainerKeys; }; boot.initrd.network.postCommands = '' cat < /root/.profile if pgrep -x "zfs" > /dev/null then zfs load-key -a killall zfs else echo "ZFS is not running -- this could be a sign of failure." fi EOF ''; modules.tailscale-initrd = { enable = true; # XXX: This has to be manually generated during NixOS install. # The files are then copied to initrd secrets during activation. tailscaleStatePath = "/persist/initrd/tailscale-initrd.state"; }; services.openssh.enable = true; users.users.root.openssh.authorizedKeys.keys = maintainerKeys; sops.defaultSopsFile = ./secrets/secrets.yaml; powerManagement.cpuFreqGovernor = "ondemand"; sops.secrets.zfs-smol-key = { }; sops.secrets.nextcloud-admin-pass = { owner = "nextcloud"; }; sops.secrets.acme-credentials = { owner = "acme"; }; sops.secrets.grafana-admin-pass = { owner = "grafana"; }; sops.secrets.cf-kanata-credentials = { owner = "cloudflared"; }; sops.secrets.nitter-account-jsonl = { }; # sops.secrets.interlink-private-key = { }; sops.secrets.interlink-wg-config = { }; sops.secrets.proton-private-key = { }; sops.secrets.attic-credentials = { }; # TODO: insecure? sops.secrets.invidious-hmac = { mode = "0444"; }; sops.secrets.transmission-extra-config = { owner = "transmission"; }; sops.secrets.paperless-superuser-password = { owner = "paperless"; }; boot.kernel.sysctl."net.ipv4.ip_forward" = 1; boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1; networking.firewall.enable = true; networking.nat = { enable = true; internalInterfaces = [ "ve-+" ]; externalInterface = "enp3s0"; # Lazy IPv6 connectivity for the container enableIPv6 = true; }; services.nscd = { enable = true; config = '' # We basically use nscd as a proxy for forwarding nss requests to appropriate # nss modules, as we run nscd with LD_LIBRARY_PATH set to the directory # containing all such modules # Note that we can not use `enable-cache no` As this will actually cause nscd # to just reject the nss requests it receives, which then causes glibc to # fallback to trying to handle the request by itself. Which won't work as glibc # is not aware of the path in which the nss modules live. As a workaround, we # have `enable-cache yes` with an explicit ttl of 0 server-user nscd enable-cache passwd yes positive-time-to-live passwd 0 negative-time-to-live passwd 0 shared passwd yes enable-cache group yes positive-time-to-live group 0 negative-time-to-live group 0 shared group yes enable-cache netgroup yes positive-time-to-live netgroup 0 negative-time-to-live netgroup 0 shared netgroup yes enable-cache hosts yes positive-time-to-live hosts 3600 # Invidious spams DNS queries to PubSubHubBub negative-time-to-live hosts 0 shared hosts yes enable-cache services yes positive-time-to-live services 0 negative-time-to-live services 0 shared services yes ''; }; services.tailscale = { enable = true; useRoutingFeatures = "both"; openFirewall = true; }; services.nginx.enable = true; services.cloudflared = { enable = true; tunnels."bf6dcc14-d315-41c7-b798-3fe0e0e968eb" = { default = "http_status:404"; }; }; networking.firewall.allowedTCPPorts = [ 80 443 ]; modules = { persistence.directories = [ "/var/lib/nixos" "/var/lib/tailscale" "/var/lib/nixos-containers" ]; persistence = { enable = true; storagePath = "/persist"; setupSshHostKeys = true; }; # NOTE: This module only populates route entries, # each service needs to be enabled individually. expose = { enable = true; routes = { "dns.kanata.network".to = "http://localhost:4000"; "metrics.kanata.network".to = "http://localhost:4001"; "nitter.kanata.network".to = "http://localhost:4002"; "invidious.kanata.network".to = "http://localhost:4003"; "hydra.kanata.network".to = "http://localhost:4004"; "cache.kanata.network".to = "http://localhost:4005"; "torrent.kanata.network".to = "http://localhost:4006"; "paperless.kanata.network".to = "http://localhost:4007"; "change.labs.kanata.network".to = "http://localhost:4901"; # Nginx pre-configured routes "nextcloud.kanata.network" = { to = "http://localhost:80"; configureNginx = false; }; }; ssl = { enable = true; acmeHost = "kanata.network"; }; tailscaleIp = "100.93.1.1"; # kanata @ core cloudflareUUID = "bf6dcc14-d315-41c7-b798-3fe0e0e968eb"; secrets.cloudflare-credentials = config.sops.secrets.cf-kanata-credentials.path; }; services.nginx.enable = true; services.acme = { enable = true; email = poorObfuscation "sefidel.net" "postmaster"; certs = { "kanata.network" = { subDomains = [ "*.labs" "cache" "dns" "hydra" "invidious" "jellyfin" "metrics" "nextcloud" "nitter" "paperless" "torrent" ]; }; }; secrets.acme-credentials = config.sops.secrets.acme-credentials.path; }; services.metrics = { enable = true; realHost = "metrics.kanata.network"; secrets.adminPassword = config.sops.secrets.grafana-admin-pass.path; }; services.postgresql.enable = true; services.blocky = { enable = true; realHost = "dns.kanata.network"; }; services.nextcloud = rec { enable = true; ssl = { enable = true; acmeHost = domain; }; domain = "kanata.network"; realHost = "nextcloud.kanata.network"; secrets.admin-pass = config.sops.secrets.nextcloud-admin-pass.path; }; services.nitter = { enable = true; title = "Kanata Nitter"; domain = "kanata.network"; realHost = "nitter.kanata.network"; secrets.nitter-guest-accounts = config.sops.secrets.nitter-account-jsonl.path; }; services.invidious = { enable = true; domain = "kanata.network"; realHost = "invidious.kanata.network"; secrets.invidious-hmac-key = config.sops.secrets.invidious-hmac.path; }; services.hydra = { enable = true; baseURL = "https://hydra.kanata.network"; }; services.atticd = { enable = true; hosts = [ "cache.kanata.network" ]; baseURL = "https://cache.kanata.network/"; storagePath = "/smol/archive/attic"; watchStore = true; secrets.attic-credentials = config.sops.secrets.attic-credentials.path; }; services.transmission = { enable = true; home = "/smol/sandbox/torrent"; secrets.transmission-extra-config = config.sops.secrets.transmission-extra-config.path; }; services.paperless = { enable = true; realHost = "paperless.kanata.network"; secrets.paperless-superuser-password = config.sops.secrets.paperless-superuser-password.path; }; }; containers.v-interlink = { autoStart = true; enableTun = true; # Tailscale authkeys expire after 90 days, which means if a system # restarts, there's a high chance that the key will be invalid. # Therefore, we use classic authentication with non-ephemeral storage. ephemeral = false; privateNetwork = true; hostAddress = "172.16.1.1"; localAddress = "172.16.1.2"; # bindMounts."/run/secrets/interlink-private-key".hostPath = config.sops.secrets.interlink-private-key.path; bindMounts."/run/secrets/interlink-wg-config".hostPath = config.sops.secrets.interlink-wg-config.path; config = { config, pkgs, lib, ... }: { services.tailscale = { enable = true; useRoutingFeatures = "both"; extraUpFlags = [ "--advertise-exit-node=true" ]; }; networking.firewall.allowedUDPPorts = [ 51820 ]; networking.wg-quick.interfaces.wg0 = { autostart = true; configFile = "/run/secrets/interlink-wg-config"; # address = [ "***.***.***.***/32" ]; # listenPort = 51820; # # mtu = 1350; # # privateKeyFile = "/run/secrets/interlink-private-key"; # # peers = [{ # publicKey = "*******************************************"; # # Exclude 100.64.0.0/10 # allowedIPs = [ # "0.0.0.0/0" # # "0.0.0.0/2" # # "64.0.0.0/3" # # "96.0.0.0/6" # # "100.0.0.0/10" # # "100.128.0.0/9" # # "101.0.0.0/8" # # "102.0.0.0/7" # # "104.0.0.0/5" # # "112.0.0.0/4" # # "128.0.0.0/1" # ]; # persistentKeepalive = 120; # endpoint = "***.***.***.***:51840"; # }]; }; system.stateVersion = "24.05"; }; }; networking.firewall.allowedUDPPorts = [ 51820 ]; containers.v-proton-jp43 = { autoStart = true; enableTun = true; # Tailscale authkeys expire after 90 days, which means if a system # restarts, there's a high chance that the key will be invalid. # Therefore, we use classic authentication with non-ephemeral storage. ephemeral = false; privateNetwork = true; hostAddress = "172.16.1.3"; localAddress = "172.16.1.4"; bindMounts."/run/secrets/proton-private-key".hostPath = config.sops.secrets.proton-private-key.path; config = { config, pkgs, lib, ... }: { services.tailscale = { enable = true; useRoutingFeatures = "both"; extraUpFlags = [ "--advertise-exit-node=true" ]; }; networking.firewall.allowedUDPPorts = [ 51820 ]; networking.wg-quick.interfaces.wg0 = { autostart = true; address = [ "10.2.0.2/32" ]; listenPort = 51820; privateKeyFile = "/run/secrets/proton-private-key"; peers = [{ publicKey = "7FslkahrdLwGbv4QSX5Cft5CtQLmBUlpWC382SSF7Hw="; # Exclude 100.64.0.0/10 allowedIPs = [ "0.0.0.0/0" # "0.0.0.0/2" # "64.0.0.0/3" # "96.0.0.0/6" # "100.0.0.0/10" # "100.128.0.0/9" # "101.0.0.0/8" # "102.0.0.0/7" # "104.0.0.0/5" # "112.0.0.0/4" # "128.0.0.0/1" ]; endpoint = "103.125.235.19:51820"; }]; }; system.stateVersion = "24.05"; }; }; # This option defines the first version of NixOS you have installed on this particular machine, # and is used to maintain compatibility with application data (e.g. databases) created on older NixOS versions. # # Most users should NEVER change this value after the initial install, for any reason, # even if you've upgraded your system to a new NixOS release. # # This value does NOT affect the Nixpkgs version your packages and OS are pulled from, # so changing it will NOT upgrade your system - see https://nixos.org/manual/nixos/stable/#sec-upgrading for how # to actually do that. # # This value being lower than the current NixOS release does NOT mean your system is # out of date, out of support, or vulnerable. # # Do NOT change this value unless you have manually inspected all the changes it would make to your configuration, # and migrated your data accordingly. # # For more information, see `man configuration.nix` or https://nixos.org/manual/nixos/stable/options#opt-system.stateVersion . system.stateVersion = "24.05"; # Did you read the comment? }