nix-config/modules/disko.nix

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

213 lines
7.2 KiB
Nix
Raw Normal View History

2023-05-08 23:33:12 +02:00
{ config, lib, ... }:
2023-04-29 23:03:39 +02:00
2023-05-08 23:33:12 +02:00
let
cfg = config.disko;
in
2023-04-29 23:03:39 +02:00
{
2023-05-19 01:57:15 +02:00
options.disko.disks = lib.mkOption {
description = lib.mdDoc "Disk names to format.";
2023-05-19 02:28:03 +02:00
type = with lib.types; nullOr (listOf (submodule (_: {
2023-05-19 01:57:15 +02:00
options = {
device = lib.mkOption {
type = lib.types.str;
default = null;
example = "/dev/sda";
description = "Path of the disk.";
};
2023-04-29 23:03:39 +02:00
2023-05-19 01:57:15 +02:00
name = lib.mkOption {
type = lib.types.str;
default = null;
example = "ssd0";
description = "Name of the disk.";
};
2023-04-29 23:03:39 +02:00
2023-05-19 01:57:15 +02:00
withBoot = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Wether to include a boot partition.";
};
2023-04-29 23:03:39 +02:00
2023-05-19 01:57:15 +02:00
withCeph = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Wether to include a ceph partition.";
};
2023-04-29 23:03:39 +02:00
2023-05-19 01:57:15 +02:00
withLuks = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Wether to encrypt the paritions.";
};
withZfs = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Wether to include a zfs parition.";
};
};
2023-05-19 02:28:03 +02:00
})));
default = null;
2023-04-29 23:03:39 +02:00
};
2023-05-08 23:33:12 +02:00
config = {
2023-05-19 01:57:15 +02:00
assertions = map
(disk: [
{
assertion = disk.withCeph || disk.withZfs;
message = "Must enable ceph or zfs!";
}
{
assertion = disk.withCeph -> disk.withLuks;
message = "Ceph requires Luks!";
}
])
(lib.attrNames cfg.disks);
2023-05-08 23:33:12 +02:00
2023-05-10 20:34:18 +02:00
disko = {
2023-05-19 02:38:08 +02:00
devices = lib.mkIf (cfg.disks != null) (lib.head (map
2023-05-19 01:57:15 +02:00
(disk:
let
diskName = if disk.name != "" then "-${disk.name}" else "";
luksName = "crypt-${config.networking.hostName}${diskName}";
2023-05-19 02:02:40 +02:00
rootSize = 200; # size of the zfs partition if inside of lvm
2023-05-19 01:57:15 +02:00
vgName = "lvm-${config.networking.hostName}${diskName}";
zfs = {
size = if (!disk.withCeph) then "100%FREE" else "${toString rootSize}GiB";
content = {
pool = disk.name;
type = "zfs";
};
2023-05-10 20:34:18 +02:00
};
2023-05-19 01:57:15 +02:00
zfsName = "${config.networking.hostName}${diskName}";
in
2023-05-19 02:02:40 +02:00
{
2023-05-19 01:57:15 +02:00
disk.${disk.device} = {
inherit (disk) device;
2023-05-10 20:34:18 +02:00
type = "disk";
content = {
type = "table";
format = "gpt";
2023-05-19 01:57:15 +02:00
partitions = lib.optional disk.withZfs
2023-05-08 23:33:12 +02:00
{
2023-05-10 20:34:18 +02:00
name = "ESP";
start = "1MiB";
end = "512MiB";
bootable = true;
2023-05-08 23:33:12 +02:00
content = {
2023-05-10 20:34:18 +02:00
type = "filesystem";
format = "vfat";
mountpoint = "/boot";
2023-05-08 23:33:12 +02:00
};
2023-05-10 20:34:18 +02:00
} ++ [
{
name = "root";
2023-05-19 01:57:15 +02:00
start = if disk.withZfs then "512MiB" else "1MiB";
2023-05-10 20:34:18 +02:00
end = "100%";
part-type = "primary";
2023-05-19 01:57:15 +02:00
content = lib.optionalAttrs disk.withLuks
2023-05-10 20:34:18 +02:00
{
type = "luks";
2023-05-19 01:57:15 +02:00
name = luksName;
2023-05-10 20:34:18 +02:00
# TODO: add password, otherwise prompt opens
keyFile = "/$PWD/keyFile";
content = {
type = "lvm_pv";
2023-05-19 01:57:15 +02:00
vg = vgName;
2023-05-10 20:34:18 +02:00
};
2023-05-19 01:57:15 +02:00
} // lib.optionalAttrs (!disk.withLuks) zfs.content;
2023-05-10 20:34:18 +02:00
}
];
};
};
2023-05-19 01:57:15 +02:00
} // lib.optionalAttrs disk.withLuks {
lvm_vg.${vgName} = {
type = "lvm_vg";
lvs = lib.optionalAttrs disk.withCeph
{
# TODO: delete old code if this works
ceph.size = "100%FREE";
# the header is 3650 byte long and substract an additional 446 byte for aligment
# error messages:
# Volume group "lvm-chaos" has insufficient free space (51195 extents): 51200 required.
# Size is not a multiple of 512. Try using 40057405440 or 40057405952.
# ceph.size =
# let
# # convert GiB to bytes
# rootSizeMiB = rootSize * 1024 * 1024 * 1024;
# # convert back to MiB and allign to 4 MiB in the process
# roundToMiB = "/1024/1024/4*4";
# # substract 512 MiB for /boot and 20 MiB for luks+header+other
# bootOther = "-512-20";
# in
# "$((($(lsblk ${disk.device} --noheadings --nodeps --output SIZE --bytes)-${toString rootSizeMiB})${roundToMiB}${bootOther}))MiB";
} // lib.optionalAttrs disk.withZfs { inherit zfs; };
2023-05-10 20:34:18 +02:00
};
2023-05-19 01:57:15 +02:00
} // {
zpool.${zfsName} = {
type = "zpool";
mountpoint = null;
rootFsOptions.acltype = "posixacl";
options = {
ashift = "12";
autotrim = "on";
};
datasets =
let
dataset = mountpoint: {
options = {
canmount = "on";
compression = "zstd";
dnodesize = "auto";
normalization = "formD";
xattr = "sa";
inherit mountpoint;
};
type = "zfs_fs";
2023-05-10 20:34:18 +02:00
};
2023-05-19 01:57:15 +02:00
in
{
"data" = dataset "/";
"data/etc" = dataset "/etc";
"data/home" = dataset "/home";
"data/var" = dataset "/var";
# used by services.postgresqlBackup and later by restic
"data/var/backup" = dataset "/var/backup";
"data/var/lib" = dataset "/var/lib";
"data/var/log" = dataset "/var/log";
"nixos" = {
options = {
canmount = "off";
mountpoint = "none";
};
type = "zfs_fs";
2023-05-10 20:34:18 +02:00
};
2023-05-19 01:57:15 +02:00
"nixos/nix" = dataset "/nix";
"nixos/nix/store" = {
options = {
atime = "off";
canmount = "on";
mountpoint = "/nix/store";
};
type = "zfs_fs";
2023-05-10 20:34:18 +02:00
};
2023-05-19 01:57:15 +02:00
"nixos/nix/var" = dataset "/nix/var";
"reserved" = {
# zfs uses copy on write and requires some free space to delete files when the disk is completely filled
options = {
canmount = "off";
mountpoint = "none";
reservation = "5GiB";
};
type = "zfs_fs";
2023-05-10 20:34:18 +02:00
};
2023-05-08 23:33:12 +02:00
};
2023-05-19 01:57:15 +02:00
};
2023-05-19 02:02:40 +02:00
})
2023-05-19 01:57:15 +02:00
cfg.disks));
2023-05-10 20:34:18 +02:00
# we use our own hardware-configuration.nix
enableConfig = false;
};
2023-05-08 23:33:12 +02:00
};
2023-04-29 23:03:39 +02:00
}