Disable skyflake and ceph

This commit is contained in:
Sandro - 2023-11-13 00:13:40 +01:00
parent 2d88feff21
commit 1523e78c29
Signed by: sandro
GPG Key ID: 3AF5A43A3EECC2E5
4 changed files with 105 additions and 105 deletions

View File

@ -384,7 +384,7 @@
gnunet = nixosSystem' {
modules = [
self.nixosModules.cluster-options
# self.nixosModules.cluster-options
self.nixosModules.microvm
./hosts/gnunet
];
@ -413,8 +413,8 @@
hydra = nixosSystem' {
modules = [
self.nixosModules.cluster
skyflake.nixosModules.default
# self.nixosModules.cluster
# skyflake.nixosModules.default
./hosts/hydra
];
};
@ -493,7 +493,7 @@
nixpkgs.overlays = [ secrets.overlays.mucbot ];
}
./hosts/mucbot
self.nixosModules.cluster-options
# self.nixosModules.cluster-options
self.nixosModules.microvm
];
};
@ -537,7 +537,7 @@
owncast = nixosSystem' {
modules = [
self.nixosModules.cluster-options
# self.nixosModules.cluster-options
self.nixosModules.microvm
./hosts/owncast
];
@ -642,16 +642,16 @@
}
heliwatch.nixosModules.heliwatch
self.nixosModules.microvm
self.nixosModules.cluster-options
# self.nixosModules.cluster-options
];
};
server8 = nixosSystem' {
modules = [
./hosts/server8
self.nixosModules.cluster-network
self.nixosModules.cluster
skyflake.nixosModules.default
# self.nixosModules.cluster-network
# self.nixosModules.cluster
# skyflake.nixosModules.default
{ _module.args = { inherit self; }; }
];
};
@ -660,9 +660,9 @@
modules = [
./hosts/server9
self.nixosModules.microvm-host
self.nixosModules.cluster-network
self.nixosModules.cluster
skyflake.nixosModules.default
# self.nixosModules.cluster-network
# self.nixosModules.cluster
# skyflake.nixosModules.default
{ _module.args = { inherit self; }; }
];
};
@ -671,9 +671,9 @@
modules = [
./hosts/server10
self.nixosModules.microvm-host
self.nixosModules.cluster-network
self.nixosModules.cluster
skyflake.nixosModules.default
# self.nixosModules.cluster-network
# self.nixosModules.cluster
# skyflake.nixosModules.default
{ _module.args = { inherit self; }; }
];
};
@ -688,7 +688,7 @@
sshlog = nixosSystem' {
modules = [
self.nixosModules.cluster-options
# self.nixosModules.cluster-options
self.nixosModules.microvm
sshlogd.nixosModule
./hosts/sshlog
@ -697,7 +697,7 @@
stream = nixosSystem' {
modules = [
self.nixosModules.cluster-options
# self.nixosModules.cluster-options
self.nixosModules.microvm
./hosts/stream
];

View File

@ -34,22 +34,22 @@
hostId = "10101010";
};
services = {
ceph = {
mds.package = pkgs.ceph_17_2;
mgr.package = pkgs.ceph_17_2;
mon.package = pkgs.ceph_17_2;
osd.package = pkgs.ceph_17_2;
rgw.package = pkgs.ceph_17_2;
};
# services = {
# ceph = {
# mds.package = pkgs.ceph_17_2;
# mgr.package = pkgs.ceph_17_2;
# mon.package = pkgs.ceph_17_2;
# osd.package = pkgs.ceph_17_2;
# rgw.package = pkgs.ceph_17_2;
# };
# reserve resources for legacy MicroVMs
nomad.settings.client.reserved = {
cpu = 4200;
# see /sys/fs/cgroup/system.slice/system-microvm.slice/memory.current
memory = 28 * 1024;
};
};
# # reserve resources for legacy MicroVMs
# nomad.settings.client.reserved = {
# cpu = 4200;
# # see /sys/fs/cgroup/system.slice/system-microvm.slice/memory.current
# memory = 28 * 1024;
# };
# };
simd.arch = "ivybridge";
@ -59,7 +59,7 @@
mode = "444";
path = "/etc/machine-id";
};
secrets."ceph/osd.4/keyfile" = { };
# secrets."ceph/osd.4/keyfile" = { };
};
# static list of microvms from other sources
@ -72,18 +72,18 @@
"dresden-zone"
];
skyflake = {
nomad.client.meta."c3d2.cpuSpeed" = "4";
storage.ceph.osds = [{
id = 4;
fsid = "21ff9a57-c8d1-4cfa-8e01-c09ae0c2f0e3";
path = "/dev/zvol/server10/ceph-osd.4";
keyfile = config.sops.secrets."ceph/osd.4/keyfile".path;
deviceClass = "ssd";
}];
# TODO: remove
storage.ceph.package = lib.mkForce pkgs.ceph_17_2;
};
# skyflake = {
# nomad.client.meta."c3d2.cpuSpeed" = "4";
# storage.ceph.osds = [{
# id = 4;
# fsid = "21ff9a57-c8d1-4cfa-8e01-c09ae0c2f0e3";
# path = "/dev/zvol/server10/ceph-osd.4";
# keyfile = config.sops.secrets."ceph/osd.4/keyfile".path;
# deviceClass = "ssd";
# }];
# # TODO: remove
# storage.ceph.package = lib.mkForce pkgs.ceph_17_2;
# };
system.stateVersion = "21.11"; # Did you read the comment?
}

View File

@ -33,21 +33,21 @@
hostId = "08080808";
};
services.ceph = {
mds.package = pkgs.ceph_17_2;
mgr.package = pkgs.ceph_17_2;
mon.package = pkgs.ceph_17_2;
osd.package = pkgs.ceph_17_2;
rgw.package = pkgs.ceph_17_2;
};
# services.ceph = {
# mds.package = pkgs.ceph_17_2;
# mgr.package = pkgs.ceph_17_2;
# mon.package = pkgs.ceph_17_2;
# osd.package = pkgs.ceph_17_2;
# rgw.package = pkgs.ceph_17_2;
# };
simd.arch = "westmere";
sops = {
defaultSopsFile = ./secrets.yaml;
secrets = {
"ceph/osd.1/keyfile" = { };
"ceph/osd.2/keyfile" = { };
# "ceph/osd.1/keyfile" = { };
# "ceph/osd.2/keyfile" = { };
"machine-id" = {
mode = "444";
path = "/etc/machine-id";
@ -55,25 +55,25 @@
};
};
skyflake = {
nomad.client.meta."c3d2.cpuSpeed" = "3";
storage.ceph.osds = [{
id = 1;
fsid = "4b196252-efb6-4ad2-9e9b-cc3fcd664a3a";
path = "/dev/zvol/server8_root/ceph-osd.1";
keyfile = config.sops.secrets."ceph/osd.1/keyfile".path;
deviceClass = "ssd";
}
{
id = 2;
fsid = "b860ec59-3314-4fd1-be45-35a46fd8c059";
path = "/dev/zvol/server8_hdd/ceph-osd.2";
keyfile = config.sops.secrets."ceph/osd.2/keyfile".path;
deviceClass = "hdd";
}];
# TODO: remove
storage.ceph.package = lib.mkForce pkgs.ceph_17_2;
};
# skyflake = {
# nomad.client.meta."c3d2.cpuSpeed" = "3";
# storage.ceph.osds = [{
# id = 1;
# fsid = "4b196252-efb6-4ad2-9e9b-cc3fcd664a3a";
# path = "/dev/zvol/server8_root/ceph-osd.1";
# keyfile = config.sops.secrets."ceph/osd.1/keyfile".path;
# deviceClass = "ssd";
# }
# {
# id = 2;
# fsid = "b860ec59-3314-4fd1-be45-35a46fd8c059";
# path = "/dev/zvol/server8_hdd/ceph-osd.2";
# keyfile = config.sops.secrets."ceph/osd.2/keyfile".path;
# deviceClass = "hdd";
# }];
# # TODO: remove
# storage.ceph.package = lib.mkForce pkgs.ceph_17_2;
# };
system.stateVersion = "22.11";
}

View File

@ -36,13 +36,13 @@
security.polkit.enable = true;
services = {
ceph = {
mds.package = pkgs.ceph_17_2;
mgr.package = pkgs.ceph_17_2;
mon.package = pkgs.ceph_17_2;
osd.package = pkgs.ceph_17_2;
rgw.package = pkgs.ceph_17_2;
};
# ceph = {
# mds.package = pkgs.ceph_17_2;
# mgr.package = pkgs.ceph_17_2;
# mon.package = pkgs.ceph_17_2;
# osd.package = pkgs.ceph_17_2;
# rgw.package = pkgs.ceph_17_2;
# };
nginx = {
enable = true;
@ -61,10 +61,10 @@
};
# reserve resources for libvirt VMs
nomad.settings.client.reserved = {
cpu = 2300;
memory = 16 * 1024;
};
# nomad.settings.client.reserved = {
# cpu = 2300;
# memory = 16 * 1024;
# };
restic.server = {
enable = true;
@ -82,8 +82,8 @@
mode = "444";
path = "/etc/machine-id";
};
"ceph/osd.3/keyfile" = { };
"ceph/osd.7/keyfile" = { };
# "ceph/osd.3/keyfile" = { };
# "ceph/osd.7/keyfile" = { };
"restic/htpasswd" = {
owner = config.systemd.services.restic-rest-server.serviceConfig.User;
path = "/var/lib/restic/.htpasswd";
@ -91,25 +91,25 @@
};
};
skyflake = {
nomad.client.meta."c3d2.cpuSpeed" = "3";
storage.ceph.osds = [{
id = 3;
fsid = "54d56ab8-fc43-4e16-886d-3c82dcc1f8fe";
path = "/dev/zvol/tank/ceph-osd.3";
keyfile = config.sops.secrets."ceph/osd.3/keyfile".path;
deviceClass = "hdd";
}
{
id = 7;
fsid = "a5450c3b-2e20-450b-a17a-d7938ee9d262";
path = "/dev/disk/by-id/wwn-0x600300570140a0c02c39f0863bd3c53e";
keyfile = config.sops.secrets."ceph/osd.7/keyfile".path;
deviceClass = "ssd";
}];
# TODO: remove
storage.ceph.package = lib.mkForce pkgs.ceph_17_2;
};
# skyflake = {
# nomad.client.meta."c3d2.cpuSpeed" = "3";
# storage.ceph.osds = [{
# id = 3;
# fsid = "54d56ab8-fc43-4e16-886d-3c82dcc1f8fe";
# path = "/dev/zvol/tank/ceph-osd.3";
# keyfile = config.sops.secrets."ceph/osd.3/keyfile".path;
# deviceClass = "hdd";
# }
# {
# id = 7;
# fsid = "a5450c3b-2e20-450b-a17a-d7938ee9d262";
# path = "/dev/disk/by-id/wwn-0x600300570140a0c02c39f0863bd3c53e";
# keyfile = config.sops.secrets."ceph/osd.7/keyfile".path;
# deviceClass = "ssd";
# }];
# # TODO: remove
# storage.ceph.package = lib.mkForce pkgs.ceph_17_2;
# };
system.stateVersion = "21.11";