Only disable ceph config, not half the cluster

This commit is contained in:
Sandro - 2023-11-13 02:54:07 +01:00
parent 3d2a1d9fe5
commit 27930ce873
Signed by: sandro
GPG Key ID: 3AF5A43A3EECC2E5
3 changed files with 111 additions and 111 deletions

View File

@ -384,7 +384,7 @@
gnunet = nixosSystem' {
modules = [
# self.nixosModules.cluster-options
self.nixosModules.cluster-options
self.nixosModules.microvm
./hosts/gnunet
];
@ -413,7 +413,7 @@
hydra = nixosSystem' {
modules = [
# self.nixosModules.cluster
self.nixosModules.cluster
# skyflake.nixosModules.default
./hosts/hydra
];
@ -493,7 +493,7 @@
nixpkgs.overlays = [ secrets.overlays.mucbot ];
}
./hosts/mucbot
# self.nixosModules.cluster-options
self.nixosModules.cluster-options
self.nixosModules.microvm
];
};
@ -537,7 +537,7 @@
owncast = nixosSystem' {
modules = [
# self.nixosModules.cluster-options
self.nixosModules.cluster-options
self.nixosModules.microvm
./hosts/owncast
];
@ -642,7 +642,7 @@
}
heliwatch.nixosModules.heliwatch
self.nixosModules.microvm
# self.nixosModules.cluster-options
self.nixosModules.cluster-options
];
};
@ -650,7 +650,7 @@
modules = [
./hosts/server8
self.nixosModules.cluster-network
# self.nixosModules.cluster
self.nixosModules.cluster
# skyflake.nixosModules.default
{ _module.args = { inherit self; }; }
];
@ -661,7 +661,7 @@
./hosts/server9
self.nixosModules.microvm-host
self.nixosModules.cluster-network
# self.nixosModules.cluster
self.nixosModules.cluster
# skyflake.nixosModules.default
{ _module.args = { inherit self; }; }
];
@ -672,7 +672,7 @@
./hosts/server10
self.nixosModules.microvm-host
self.nixosModules.cluster-network
# self.nixosModules.cluster
self.nixosModules.cluster
# skyflake.nixosModules.default
{ _module.args = { inherit self; }; }
];
@ -688,7 +688,7 @@
sshlog = nixosSystem' {
modules = [
# self.nixosModules.cluster-options
self.nixosModules.cluster-options
self.nixosModules.microvm
sshlogd.nixosModule
./hosts/sshlog
@ -697,7 +697,7 @@
stream = nixosSystem' {
modules = [
# self.nixosModules.cluster-options
self.nixosModules.cluster-options
self.nixosModules.microvm
./hosts/stream
];

View File

@ -36,111 +36,111 @@ in {
servers;
# Cluster configuration
skyflake = {
# debug = true;
nodes = builtins.listToAttrs (
map (name: {
inherit name;
value.address = hostRegistry.${name}.ip4;
}) servers
);
# Extra nomad configuration
nomad = {
datacenter = "c3d2";
inherit servers;
# run tasks only on these:
client.enable = builtins.elem hostName microvmServers;
client.meta = lib.optionalAttrs (builtins.elem hostName storageServers) {
"c3d2.storage" = "big";
};
};
# The user that runs skyflake MicroVMs
microvmUid = 997;
# skyflake = {
# # debug = true;
# nodes = builtins.listToAttrs (
# map (name: {
# inherit name;
# value.address = hostRegistry.${name}.ip4;
# }) servers
# );
# # Extra nomad configuration
# nomad = {
# datacenter = "c3d2";
# inherit servers;
# # run tasks only on these:
# client.enable = builtins.elem hostName microvmServers;
# client.meta = lib.optionalAttrs (builtins.elem hostName storageServers) {
# "c3d2.storage" = "big";
# };
# };
# # The user that runs skyflake MicroVMs
# microvmUid = 997;
users = {
# Deployment user for hosts in this flake
c3d2 = {
uid = 1001;
sshKeys = config.users.users.root.openssh.authorizedKeys.keys;
home = "${skyflakeHome}/c3d2";
};
# Deployment user for neighbour Andreas Lippmann <andreaslippmann@web.de>
luulaatsch = {
uid = 1003;
home = "${skyflakeHome}/luulaatsch";
sshKeys = [
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDVipwWSzxgUgm0fMeTbOSCXDNkbUUp+k1tvWaJcBA1M+LTM21rhP3tQQP24smPx1b2V/by6vHakhsj5pIMLzSSW7he/mAnOtwHj2dCEk6VaMvozAfS6n777g8ujZg0N9ekrqoNLh0uNchs3DYFZWXAnKtfEYjzIdXqVgXMI4uDXM6ObEh+YR3iGaZSYKd6S5nufzJOR6EiYbv/z6ThRq16f+HqetVdBamqmpNK9NcWCtjMcww7nZTyUvTbd2AyrpTAXSnVg30fza428Kpc5ZdzDJFjn0B3MVnP2LavIo8VH3Y5sjpp0aMDKfbz6dPA/df/pR6LVMsxx0eCEXeGqo9sFdabu2AwIPi79QFANr3qJ/nZo2LR3o2LvcOLO06xVRFKReGTsy6WWTNOTAL03sdiIbqnlmkjXu5nb4hzxoHBgA6LZGMx3knoVcUMBlpVaxOoT2tzXzpYszsKpGSN1EBqUlDXzen3EkHg7TpjjTJSH7Q0AeeSCJsaoAepa6wcZeJlBWSRiJdqIn7YBmCBdjV1nNPVBYE5mm2x88ha9e4NbyFhjQvZ/BiLyKCFVyDs6lIvhV5QfAPWOkyPwbLXtXg6g1OvmuM6mGDhtEfZKvreeqmTKjB+x7OpG847JKxmU8eeYax63/WgRssbOfTpicjsdoKlFST3WpZHGT7mWzmB/w== andreas@luulaatschs-MBP.pub.zentralwerk.org"
] ++ ssh-public-keys.astro;
};
# Deployment user for marenz
marenz = {
uid = 1004;
sshKeys = config.users.users.root.openssh.authorizedKeys.keys ++ ssh-public-keys.marenz;
home = "${skyflakeHome}/marenz";
};
};
# users = {
# # Deployment user for hosts in this flake
# c3d2 = {
# uid = 1001;
# sshKeys = config.users.users.root.openssh.authorizedKeys.keys;
# home = "${skyflakeHome}/c3d2";
# };
# # Deployment user for neighbour Andreas Lippmann <andreaslippmann@web.de>
# luulaatsch = {
# uid = 1003;
# home = "${skyflakeHome}/luulaatsch";
# sshKeys = [
# "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDVipwWSzxgUgm0fMeTbOSCXDNkbUUp+k1tvWaJcBA1M+LTM21rhP3tQQP24smPx1b2V/by6vHakhsj5pIMLzSSW7he/mAnOtwHj2dCEk6VaMvozAfS6n777g8ujZg0N9ekrqoNLh0uNchs3DYFZWXAnKtfEYjzIdXqVgXMI4uDXM6ObEh+YR3iGaZSYKd6S5nufzJOR6EiYbv/z6ThRq16f+HqetVdBamqmpNK9NcWCtjMcww7nZTyUvTbd2AyrpTAXSnVg30fza428Kpc5ZdzDJFjn0B3MVnP2LavIo8VH3Y5sjpp0aMDKfbz6dPA/df/pR6LVMsxx0eCEXeGqo9sFdabu2AwIPi79QFANr3qJ/nZo2LR3o2LvcOLO06xVRFKReGTsy6WWTNOTAL03sdiIbqnlmkjXu5nb4hzxoHBgA6LZGMx3knoVcUMBlpVaxOoT2tzXzpYszsKpGSN1EBqUlDXzen3EkHg7TpjjTJSH7Q0AeeSCJsaoAepa6wcZeJlBWSRiJdqIn7YBmCBdjV1nNPVBYE5mm2x88ha9e4NbyFhjQvZ/BiLyKCFVyDs6lIvhV5QfAPWOkyPwbLXtXg6g1OvmuM6mGDhtEfZKvreeqmTKjB+x7OpG847JKxmU8eeYax63/WgRssbOfTpicjsdoKlFST3WpZHGT7mWzmB/w== andreas@luulaatschs-MBP.pub.zentralwerk.org"
# ] ++ ssh-public-keys.astro;
# };
# # Deployment user for marenz
# marenz = {
# uid = 1004;
# sshKeys = config.users.users.root.openssh.authorizedKeys.keys ++ ssh-public-keys.marenz;
# home = "${skyflakeHome}/marenz";
# };
# };
deploy.customizationModule = ./customization;
# deploy.customizationModule = ./customization;
# Ceph storage cluster configuration
storage.ceph = rec {
package = pkgs.ceph_17_2;
fsid = "a06b1061-ef09-46d6-a15f-2f8ce4d7d1bf";
mons = [ "server9" "server10" ];
mgrs = mons;
mdss = mons;
rbdPools.microvms = {
params = { size = 2; class = "ssd"; };
};
rbdPools.microvms-hdd = {
params = { size = 2; class = "hdd"; };
};
cephfs.home.mountPoint = skyflakeHome;
# Legacy: migration to rbd
cephfs.skyflake.mountPoint = "/storage/cephfs";
# # Ceph storage cluster configuration
# storage.ceph = rec {
# package = pkgs.ceph_17_2;
# fsid = "a06b1061-ef09-46d6-a15f-2f8ce4d7d1bf";
# mons = [ "server9" "server10" ];
# mgrs = mons;
# mdss = mons;
# rbdPools.microvms = {
# params = { size = 2; class = "ssd"; };
# };
# rbdPools.microvms-hdd = {
# params = { size = 2; class = "hdd"; };
# };
# cephfs.home.mountPoint = skyflakeHome;
# # Legacy: migration to rbd
# cephfs.skyflake.mountPoint = "/storage/cephfs";
monKeyring = config.sops.secrets."ceph/monKeyring".path;
adminKeyring = config.sops.secrets."ceph/adminKeyring".path;
};
};
# monKeyring = config.sops.secrets."ceph/monKeyring".path;
# adminKeyring = config.sops.secrets."ceph/adminKeyring".path;
# };
# };
# Ceph keyrings
sops.secrets = {
"ceph/monKeyring" = {
owner = "ceph";
sopsFile = ./ceph.yaml;
};
"ceph/adminKeyring" = {
owner = "ceph";
sopsFile = ./ceph.yaml;
};
};
# sops.secrets = {
# "ceph/monKeyring" = {
# owner = "ceph";
# sopsFile = ./ceph.yaml;
# };
# "ceph/adminKeyring" = {
# owner = "ceph";
# sopsFile = ./ceph.yaml;
# };
# };
# Collectd monitoring for ceph
services.collectd.plugins.ceph = ''
ConvertSpecialMetricTypes true
# services.collectd.plugins.ceph = ''
# ConvertSpecialMetricTypes true
${lib.concatMapStrings (hostName: ''
<Daemon "mon.${hostName}">
SocketPath "/var/run/ceph/ceph-mon.${hostName}.asok"
</Daemon>
'') config.services.ceph.mon.daemons}
${lib.concatMapStrings (hostName: ''
<Daemon "mgr.${hostName}">
SocketPath "/var/run/ceph/ceph-mgr.${hostName}.asok"
</Daemon>
'') config.services.ceph.mgr.daemons}
${lib.concatMapStrings (hostName: ''
<Daemon "mds.${hostName}">
SocketPath "/var/run/ceph/ceph-mds.${hostName}.asok"
</Daemon>
'') config.services.ceph.mds.daemons}
${lib.concatMapStrings (id: ''
<Daemon "osd.${id}">
SocketPath "/var/run/ceph/ceph-osd.${id}.asok"
</Daemon>
'') config.services.ceph.osd.daemons}
'';
# ${lib.concatMapStrings (hostName: ''
# <Daemon "mon.${hostName}">
# SocketPath "/var/run/ceph/ceph-mon.${hostName}.asok"
# </Daemon>
# '') config.services.ceph.mon.daemons}
# ${lib.concatMapStrings (hostName: ''
# <Daemon "mgr.${hostName}">
# SocketPath "/var/run/ceph/ceph-mgr.${hostName}.asok"
# </Daemon>
# '') config.services.ceph.mgr.daemons}
# ${lib.concatMapStrings (hostName: ''
# <Daemon "mds.${hostName}">
# SocketPath "/var/run/ceph/ceph-mds.${hostName}.asok"
# </Daemon>
# '') config.services.ceph.mds.daemons}
# ${lib.concatMapStrings (id: ''
# <Daemon "osd.${id}">
# SocketPath "/var/run/ceph/ceph-osd.${id}.asok"
# </Daemon>
# '') config.services.ceph.osd.daemons}
# '';
# HACK: let collectd access ceph sockets
systemd.services.collectd.serviceConfig.User = lib.mkForce "ceph";
# systemd.services.collectd.serviceConfig.User = lib.mkForce "ceph";
}

View File

@ -9,10 +9,10 @@
config = {
assertions = [
{
assertion = config.skyflake.storage.ceph.package != 17;
message = "Please pin ceph to major version 17!";
}
# {
# assertion = config.skyflake.storage.ceph.package != 17;
# message = "Please pin ceph to major version 17!";
# }
];
microvm = {