disko: support multiple disks

This commit is contained in:
Sandro - 2023-05-19 01:57:15 +02:00
parent 31d255b388
commit 7e3a98d68f
Signed by: sandro
GPG Key ID: 3AF5A43A3EECC2E5
2 changed files with 166 additions and 145 deletions

View File

@ -1,4 +1,4 @@
{ config, pkgs, ... }:
{ pkgs, ... }:
{
imports = [ ./hardware-configuration.nix ];
@ -9,12 +9,12 @@
autoUpdate = true;
};
disko = {
enableCeph = false;
enableLuks = false;
name = config.networking.hostName;
rootDisk = "/dev/sda";
};
disko.disks = [ {
device = "/dev/sda";
name = "";
withCeph = false;
withLuks = false;
} ];
nix.settings = {
cores = 4;

View File

@ -4,72 +4,91 @@ let
cfg = config.disko;
in
{
options.disko = {
name = lib.mkOption {
type = lib.types.str;
example = "chaos";
description = "Machine name used in eg zpool name.";
};
options.disko.disks = lib.mkOption {
description = lib.mdDoc "Disk names to format.";
type = with lib.types; listOf (submodule (_: {
options = {
device = lib.mkOption {
type = lib.types.str;
default = null;
example = "/dev/sda";
description = "Path of the disk.";
};
rootDisk = lib.mkOption {
type = with lib.types; nullOr str;
default = null;
example = "/dev/sda";
description = "Path of the root disk.";
};
name = lib.mkOption {
type = lib.types.str;
default = null;
example = "ssd0";
description = "Name of the disk.";
};
enableCeph = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Wether to include a ceph on the root disk.";
};
withBoot = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Wether to include a boot partition.";
};
enableLuks = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Wether to encrypt the root disk.";
};
withCeph = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Wether to include a ceph partition.";
};
enableZfs = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Wether to include a zfs on the root disk.";
};
withLuks = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Wether to encrypt the paritions.";
};
withZfs = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Wether to include a zfs parition.";
};
};
}));
default = [ ];
};
config = {
assertions = [
{
assertion = cfg.enableCeph || cfg.enableZfs;
message = "Must enable ceph or zfs!";
}
{
assertion = cfg.enableCeph -> cfg.enableLuks;
message = "Ceph requires Luks!";
}
];
assertions = map
(disk: [
{
assertion = disk.withCeph || disk.withZfs;
message = "Must enable ceph or zfs!";
}
{
assertion = disk.withCeph -> disk.withLuks;
message = "Ceph requires Luks!";
}
])
(lib.attrNames cfg.disks);
disko = {
devices =
let
rootSize = 200;
zfs = {
size = if (!cfg.enableCeph) then "100%FREE" else "${toString rootSize}GiB";
content = {
pool = cfg.name;
type = "zfs";
devices = lib.mkIf (cfg.disks != [ ]) (lib.head (map
(disk:
let
diskName = if disk.name != "" then "-${disk.name}" else "";
luksName = "crypt-${config.networking.hostName}${diskName}";
rootSize = 200;
vgName = "lvm-${config.networking.hostName}${diskName}";
zfs = {
size = if (!disk.withCeph) then "100%FREE" else "${toString rootSize}GiB";
content = {
pool = disk.name;
type = "zfs";
};
};
};
in
lib.mkIf (cfg.rootDisk != "")
{
disk.${cfg.rootDisk} = {
device = cfg.rootDisk;
zfsName = "${config.networking.hostName}${diskName}";
in
({
disk.${disk.device} = {
inherit (disk) device;
type = "disk";
content = {
type = "table";
format = "gpt";
partitions = lib.optional cfg.enableZfs
partitions = lib.optional disk.withZfs
{
name = "ESP";
start = "1MiB";
@ -83,107 +102,109 @@ in
} ++ [
{
name = "root";
start = if cfg.enableZfs then "512MiB" else "1MiB";
start = if disk.withZfs then "512MiB" else "1MiB";
end = "100%";
part-type = "primary";
content = lib.optionalAttrs cfg.enableLuks
content = lib.optionalAttrs disk.withLuks
{
type = "luks";
name = "crypt-${cfg.name}";
name = luksName;
# TODO: add password, otherwise prompt opens
keyFile = "/$PWD/keyFile";
content = {
type = "lvm_pv";
vg = "lvm-${cfg.name}";
vg = vgName;
};
} // lib.optionalAttrs (!cfg.enableLuks) zfs.content;
} // lib.optionalAttrs (!disk.withLuks) zfs.content;
}
];
};
};
} // lib.optionalAttrs cfg.enableLuks {
lvm_vg."lvm-${cfg.name}" = {
type = "lvm_vg";
lvs = lib.optionalAttrs cfg.enableCeph
{
# the header is 3650 byte long and substract an additional 446 byte for aligment
# error messages:
# Volume group "lvm-chaos" has insufficient free space (51195 extents): 51200 required.
# Size is not a multiple of 512. Try using 40057405440 or 40057405952.
ceph.size =
let
# convert GiB to bytes
rootSizeMiB = rootSize * 1024 * 1024 * 1024;
# convert back to MiB and allign to 4 MiB in the process
roundToMiB = "/1024/1024/4*4";
# substract 512 MiB for /boot and 20 MiB for luks+header+other
bootOther = "-512-20";
in
"$((($(lsblk /dev/sda --noheadings --nodeps --output SIZE --bytes)-${toString rootSizeMiB})${roundToMiB}${bootOther}))MiB";
} // lib.optionalAttrs cfg.enableZfs { inherit zfs; };
};
} // {
zpool."${cfg.name}" = {
type = "zpool";
mountpoint = null;
mountRoot = "/mnt";
rootFsOptions.acltype = "posixacl";
options = {
ashift = "12";
autotrim = "on";
} // lib.optionalAttrs disk.withLuks {
lvm_vg.${vgName} = {
type = "lvm_vg";
lvs = lib.optionalAttrs disk.withCeph
{
# TODO: delete old code if this works
ceph.size = "100%FREE";
# the header is 3650 byte long and substract an additional 446 byte for aligment
# error messages:
# Volume group "lvm-chaos" has insufficient free space (51195 extents): 51200 required.
# Size is not a multiple of 512. Try using 40057405440 or 40057405952.
# ceph.size =
# let
# # convert GiB to bytes
# rootSizeMiB = rootSize * 1024 * 1024 * 1024;
# # convert back to MiB and allign to 4 MiB in the process
# roundToMiB = "/1024/1024/4*4";
# # substract 512 MiB for /boot and 20 MiB for luks+header+other
# bootOther = "-512-20";
# in
# "$((($(lsblk ${disk.device} --noheadings --nodeps --output SIZE --bytes)-${toString rootSizeMiB})${roundToMiB}${bootOther}))MiB";
} // lib.optionalAttrs disk.withZfs { inherit zfs; };
};
datasets =
let
dataset = mountpoint: {
options = {
canmount = "on";
compression = "zstd";
dnodesize = "auto";
normalization = "formD";
xattr = "sa";
inherit mountpoint;
};
type = "zfs_fs";
};
in
{
"data" = dataset "/";
"data/etc" = dataset "/etc";
"data/home" = dataset "/home";
"data/var" = dataset "/var";
# used by services.postgresqlBackup and later by restic
"data/var/backup" = dataset "/var/backup";
"data/var/lib" = dataset "/var/lib";
"data/var/log" = dataset "/var/log";
"nixos" = {
options = {
canmount = "off";
mountpoint = "none";
};
type = "zfs_fs";
};
"nixos/nix" = dataset "/nix";
"nixos/nix/store" = {
options = {
atime = "off";
canmount = "on";
mountpoint = "/nix/store";
};
type = "zfs_fs";
};
"nixos/nix/var" = dataset "/nix/var";
"reserved" = {
# zfs uses copy on write and requires some free space to delete files when the disk is completely filled
options = {
canmount = "off";
mountpoint = "none";
reservation = "5GiB";
};
type = "zfs_fs";
};
} // {
zpool.${zfsName} = {
type = "zpool";
mountpoint = null;
rootFsOptions.acltype = "posixacl";
options = {
ashift = "12";
autotrim = "on";
};
};
};
datasets =
let
dataset = mountpoint: {
options = {
canmount = "on";
compression = "zstd";
dnodesize = "auto";
normalization = "formD";
xattr = "sa";
inherit mountpoint;
};
type = "zfs_fs";
};
in
{
"data" = dataset "/";
"data/etc" = dataset "/etc";
"data/home" = dataset "/home";
"data/var" = dataset "/var";
# used by services.postgresqlBackup and later by restic
"data/var/backup" = dataset "/var/backup";
"data/var/lib" = dataset "/var/lib";
"data/var/log" = dataset "/var/log";
"nixos" = {
options = {
canmount = "off";
mountpoint = "none";
};
type = "zfs_fs";
};
"nixos/nix" = dataset "/nix";
"nixos/nix/store" = {
options = {
atime = "off";
canmount = "on";
mountpoint = "/nix/store";
};
type = "zfs_fs";
};
"nixos/nix/var" = dataset "/nix/var";
"reserved" = {
# zfs uses copy on write and requires some free space to delete files when the disk is completely filled
options = {
canmount = "off";
mountpoint = "none";
reservation = "5GiB";
};
type = "zfs_fs";
};
};
};
}))
cfg.disks));
# we use our own hardware-configuration.nix
enableConfig = false;
};