{ config, hostRegistry, lib, pkgs, ssh-public-keys, zentralwerk, ... }: let inherit (config.networking) hostName; # hydra does *not* use this module because it only runs a nomad # server but no client and no microvms servers = [ "server9" "server10" "hydra" ]; microvmServers = [ "server9" "server10" ]; storageServers = [ "server9" ]; serverNet = server: builtins.foldl' (result: net: if result == null && zentralwerk.lib.config.site.net.${net}.hosts4 ? ${server} then net else result ) null [ "cluster" "serv" ]; skyflakeHome = "/var/lib/skyflake/home"; in { # Open firewall between cluster members networking.firewall.extraCommands = lib.concatMapStrings (server: let netConfig = zentralwerk.lib.config.site.net.${serverNet server}; in lib.optionalString (server != hostName) '' iptables -A nixos-fw --source ${netConfig.hosts4.${server}} -j ACCEPT ${lib.concatMapStrings (hosts6: '' ip6tables -A nixos-fw --source ${hosts6.${server}} -j ACCEPT '') (builtins.attrValues netConfig.hosts6)} '' ) servers; # Cluster configuration # skyflake = { # # debug = true; # nodes = builtins.listToAttrs ( # map (name: { # inherit name; # value.address = hostRegistry.${name}.ip4; # }) servers # ); # # Extra nomad configuration # nomad = { # datacenter = "c3d2"; # inherit servers; # # run tasks only on these: # client.enable = builtins.elem hostName microvmServers; # client.meta = lib.optionalAttrs (builtins.elem hostName storageServers) { # "c3d2.storage" = "big"; # }; # }; # # The user that runs skyflake MicroVMs # microvmUid = 997; # users = { # # Deployment user for hosts in this flake # c3d2 = { # uid = 1001; # sshKeys = config.users.users.root.openssh.authorizedKeys.keys; # home = "${skyflakeHome}/c3d2"; # }; # # Deployment user for neighbour Andreas Lippmann # luulaatsch = { # uid = 1003; # home = "${skyflakeHome}/luulaatsch"; # sshKeys = [ # "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDVipwWSzxgUgm0fMeTbOSCXDNkbUUp+k1tvWaJcBA1M+LTM21rhP3tQQP24smPx1b2V/by6vHakhsj5pIMLzSSW7he/mAnOtwHj2dCEk6VaMvozAfS6n777g8ujZg0N9ekrqoNLh0uNchs3DYFZWXAnKtfEYjzIdXqVgXMI4uDXM6ObEh+YR3iGaZSYKd6S5nufzJOR6EiYbv/z6ThRq16f+HqetVdBamqmpNK9NcWCtjMcww7nZTyUvTbd2AyrpTAXSnVg30fza428Kpc5ZdzDJFjn0B3MVnP2LavIo8VH3Y5sjpp0aMDKfbz6dPA/df/pR6LVMsxx0eCEXeGqo9sFdabu2AwIPi79QFANr3qJ/nZo2LR3o2LvcOLO06xVRFKReGTsy6WWTNOTAL03sdiIbqnlmkjXu5nb4hzxoHBgA6LZGMx3knoVcUMBlpVaxOoT2tzXzpYszsKpGSN1EBqUlDXzen3EkHg7TpjjTJSH7Q0AeeSCJsaoAepa6wcZeJlBWSRiJdqIn7YBmCBdjV1nNPVBYE5mm2x88ha9e4NbyFhjQvZ/BiLyKCFVyDs6lIvhV5QfAPWOkyPwbLXtXg6g1OvmuM6mGDhtEfZKvreeqmTKjB+x7OpG847JKxmU8eeYax63/WgRssbOfTpicjsdoKlFST3WpZHGT7mWzmB/w== andreas@luulaatschs-MBP.pub.zentralwerk.org" # ] ++ ssh-public-keys.astro; # }; # # Deployment user for marenz # marenz = { # uid = 1004; # sshKeys = config.users.users.root.openssh.authorizedKeys.keys ++ ssh-public-keys.marenz; # home = "${skyflakeHome}/marenz"; # }; # }; # deploy.customizationModule = ./customization; # # Ceph storage cluster configuration # storage.ceph = rec { # package = pkgs.ceph_17_2; # fsid = "a06b1061-ef09-46d6-a15f-2f8ce4d7d1bf"; # mons = [ "server9" "server10" ]; # mgrs = mons; # mdss = mons; # rbdPools.microvms = { # params = { size = 2; class = "ssd"; }; # }; # rbdPools.microvms-hdd = { # params = { size = 2; class = "hdd"; }; # }; # cephfs.home.mountPoint = skyflakeHome; # # Legacy: migration to rbd # cephfs.skyflake.mountPoint = "/storage/cephfs"; # monKeyring = config.sops.secrets."ceph/monKeyring".path; # adminKeyring = config.sops.secrets."ceph/adminKeyring".path; # }; # }; # Ceph keyrings # sops.secrets = { # "ceph/monKeyring" = { # owner = "ceph"; # sopsFile = ./ceph.yaml; # }; # "ceph/adminKeyring" = { # owner = "ceph"; # sopsFile = ./ceph.yaml; # }; # }; # Collectd monitoring for ceph # services.collectd.plugins.ceph = '' # ConvertSpecialMetricTypes true # ${lib.concatMapStrings (hostName: '' # # SocketPath "/var/run/ceph/ceph-mon.${hostName}.asok" # # '') config.services.ceph.mon.daemons} # ${lib.concatMapStrings (hostName: '' # # SocketPath "/var/run/ceph/ceph-mgr.${hostName}.asok" # # '') config.services.ceph.mgr.daemons} # ${lib.concatMapStrings (hostName: '' # # SocketPath "/var/run/ceph/ceph-mds.${hostName}.asok" # # '') config.services.ceph.mds.daemons} # ${lib.concatMapStrings (id: '' # # SocketPath "/var/run/ceph/ceph-osd.${id}.asok" # # '') config.services.ceph.osd.daemons} # ''; # HACK: let collectd access ceph sockets # systemd.services.collectd.serviceConfig.User = lib.mkForce "ceph"; }