2
0

assimilate nixos modules

This commit is contained in:
Ehmry - 2020-10-23 22:39:33 +02:00
parent c765ba3b02
commit 3d5b687b4b
19 changed files with 1092 additions and 867 deletions

View File

@ -65,9 +65,7 @@
forAllCrossSystems ({ system, localSystem, crossSystem }: forAllCrossSystems ({ system, localSystem, crossSystem }:
nixpkgs.lib // (import ./lib { nixpkgs.lib // (import ./lib {
inherit system localSystem crossSystem; inherit system localSystem crossSystem;
localPackages = nixpkgs.legacyPackages.${localSystem}; pkgs = self.legacyPackages.${system};
genodepkgs = self;
nixpkgs = nixpkgsFor.${system};
})); }));
legacyPackages = legacyPackages =
@ -140,11 +138,11 @@
checks = checks =
# Checks for continous testing # Checks for continous testing
let tests = import ./tests; let tests = import ./tests;
in in with (forAllCrossSystems ({ system, localSystem, crossSystem }:
with (forAllCrossSystems ({ system, localSystem, crossSystem }:
tests { tests {
flake = self; flake = self;
inherit system localSystem crossSystem; inherit system localSystem crossSystem;
pkgs = self.legacyPackages.${system};
} // { } // {
ports = nixpkgsFor.${localSystem}.symlinkJoin { ports = nixpkgsFor.${localSystem}.symlinkJoin {
name = "ports"; name = "ports";

View File

@ -1,9 +1,11 @@
{ system, localSystem, crossSystem, genodepkgs, nixpkgs, localPackages }: { system, localSystem, crossSystem, pkgs }:
let let
nixpkgs = pkgs;
localPackages = pkgs.buildPackages.buildPackages;
thisSystem = builtins.getAttr system; thisSystem = builtins.getAttr system;
inherit (nixpkgs) buildPackages; inherit (nixpkgs) buildPackages;
testPkgs = thisSystem genodepkgs.packages; testPkgs = pkgs.genodePackages;
dhallCachePrelude = '' dhallCachePrelude = ''
export XDG_CACHE_HOME=$NIX_BUILD_TOP export XDG_CACHE_HOME=$NIX_BUILD_TOP

View File

@ -0,0 +1,27 @@
{ config, pkgs, lib, ... }:
with lib;
let
localPackages = pkgs.buildPackages;
utils = import ../lib {
inherit (config.nixpkgs) system localSystem crossSystem;
inherit pkgs;
};
in {
genode.core = {
prefix = "hw-pc-";
supportedSystems = [ "x86_64-genode" ];
basePackages = with pkgs.genodePackages; [ base-hw-pc rtc_drv ];
};
genode.boot = {
initrd = "${config.genode.boot.image}/image.elf";
image = utils.hwImage "0xffffffc000000000" "0x00200000"
pkgs.genodePackages.base-hw-pc config.system.name { }
config.genode.boot.config;
};
}

View File

@ -0,0 +1,27 @@
{ config, pkgs, lib, ... }:
with lib;
let
localPackages = pkgs.buildPackages;
utils = import ../lib {
inherit (config.nixpkgs) system localSystem crossSystem;
inherit pkgs;
};
in {
genode.core = {
prefix = "hw-virt_qemu";
supportedSystems = [ "aarch64-genode" ];
basePackages = with pkgs.genodePackages; [ base-hw-virt_qemu rtc-dummy ];
};
genode.boot = {
initrd = "${config.genode.boot.image}/image.elf";
image = utils.hwImage "0xffffffc000000000" "0x40000000"
pkgs.genodePackages.base-hw-virt_qemu config.system.name { }
config.genode.boot.config;
};
}

View File

@ -0,0 +1,123 @@
{ config, pkgs, lib, ... }:
with lib;
let localPackages = pkgs.buildPackages;
in {
options.genode = {
core = {
prefix = mkOption {
type = types.str;
example = "hw-pc-";
};
supportedSystems = mkOption {
type = types.listOf types.str;
example = [ "i686-genode" "x86_64-genode" ];
};
basePackages = mkOption { type = types.listOf types.package; };
};
boot = {
kernel = mkOption {
type = types.path;
default = "${pkgs.genodePackages.bender}/bender";
};
initrd = mkOption {
type = types.str;
default = "${pkgs.genodePackages.bender}/bender";
description = "Path to an image or a command-line arguments";
};
config = mkOption {
type = types.str;
description = ''
Dhall boot configuration. See
https://git.sr.ht/~ehmry/dhall-genode/tree/master/Boot/package.dhall
'';
};
image = mkOption {
type = types.path;
description =
"Boot image containing the base component binaries and configuration.";
};
romModules = mkOption {
type = types.attrsOf types.path;
description = "Attr set of initial ROM modules";
};
};
};
config = {
assertions = [{
assertion = builtins.any (s: s == config.nixpkgs.system)
config.genode.core.supportedSystems;
message = "invalid Genode core for this system";
}];
genode.boot.config = let
addManifest = drv:
drv // {
manifest =
localPackages.runCommand "${drv.name}.dhall" { inherit drv; } ''
set -eu
echo -n '[' >> $out
find $drv/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out
${if builtins.elem "lib" drv.outputs then
''
find ${drv.lib}/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out''
else
""}
echo -n ']' >> $out
'';
};
mergeManifests = inputs:
localPackages.writeTextFile {
name = "manifest.dhall";
text = with builtins;
let
f = head: input:
if hasAttr "manifest" input then
''
${head}, { mapKey = "${
lib.getName input
}", mapValue = ${input.manifest} }''
else
abort "${input.pname} does not have a manifest";
in (foldl' f "[" inputs) + "]";
};
storeManifest =
mergeManifests (map addManifest config.genode.init.inputs);
storeTarball = localPackages.runCommand "store" { } ''
mkdir -p $out
tar cf "$out/store.tar" --absolute-names ${
toString config.genode.init.inputs
}
'';
manifest = mergeManifests (map addManifest
(config.genode.core.basePackages ++ [ storeTarball ]
++ (with pkgs.genodePackages; [ init vfs cached_fs_rom ])));
in ''
${./store-wrapper.dhall}
(${config.genode.init.config})
$(stat --format '%s' ${storeTarball}/store.tar)
${storeManifest} ${manifest}
'';
};
}

View File

@ -11,7 +11,8 @@ with lib;
config = mkOption { config = mkOption {
description = "Dhall configuration of this init instance"; description = "Dhall configuration of this init instance";
type = types.either types.str types.path; type = types.nullOr types.str;
default = null;
}; };
inputs = mkOption { inputs = mkOption {
@ -19,6 +20,15 @@ with lib;
type = types.listOf types.package; type = types.listOf types.package;
}; };
children = mkOption {
type = let
childOptions = { name, ... }: {
name = mkOption { type = types.str; };
dhallAttrs = mkOption { type = types.str; };
};
in types.attrsOf (types.submodule childOptions);
};
subinits = mkOption { subinits = mkOption {
type = types.attrsOf (types.submodule ({ config, options, name, ... }: { type = types.attrsOf (types.submodule ({ config, options, name, ... }: {
options = { options = {

View File

@ -0,0 +1,51 @@
{ config, pkgs, lib, ... }:
with lib; {
options.genode = {
hardware = {
nic = mkOption {
default = { };
example = { eth0.driver = "virtio"; };
description = "The configuration for each Nic service.";
type = let
nicOptions = { name, ... }: {
name = mkOption {
example = "eth0";
type = types.str;
description = "Name of the Nic service.";
};
driver = mkOption { type = types.enum [ "ipxe" "virtio" ]; };
ipStack = mkOption {
type = types.enum [ "lwip" "lxip" ];
default = "lwip";
};
};
in types.attrsOf (types.submodule nicOptions);
};
};
};
config = {
genode.init.children = let
drivers = mapAttrsToList (name: interface: {
name = name + "-nic";
value = { };
}) config.networking.interfaces;
sockets = mapAttrsToList (name: interface: {
name = name + "-sockets";
value = { };
}) config.networking.interfaces;
in builtins.listToAttrs (drivers ++ sockets);
};
}

26
nixos-modules/nova.nix Normal file
View File

@ -0,0 +1,26 @@
{ config, pkgs, lib, ... }:
with lib;
let
localPackages = pkgs.buildPackages;
utils = import ../lib {
inherit (config.nixpkgs) system localSystem crossSystem;
inherit pkgs;
};
in {
genode.core = {
prefix = "nova-";
supportedSystems = [ "x86_64-genode" ];
basePackages = with pkgs.genodePackages; [ base-nova rtc_drv ];
};
genode.boot = {
initrd =
"'${pkgs.genodePackages.NOVA}/hypervisor-x86_64 arg=iommu novpid serial,${config.genode.boot.image}/image.elf'";
image = utils.novaImage config.system.name { } config.genode.boot.config;
};
}

View File

@ -1,20 +1,11 @@
# This module creates a virtual machine from the NixOS configuration.
# Building the `config.system.build.vm' attribute gives you a command
# that starts a KVM/QEMU VM running the NixOS configuration defined in
# `config'. The Nix store is shared read-only with the host, which
# makes (re)building VMs very efficient. However, it also means you
# can't reconfigure the guest inside the guest - you need to rebuild
# the VM in the host. On the other hand, the root filesystem is a
# read/writable disk image persistent across VM reboots.
{ config, lib, pkgs, ... }: { config, lib, pkgs, ... }:
with lib; with lib;
with import ../../lib/qemu-flags.nix { inherit pkgs; }; with import ../tests/lib/qemu-flags.nix { inherit pkgs; };
let let
qemu = config.system.build.qemu or pkgs.qemu_test; qemu = config.system.build.qemu;
cfg = config.virtualisation; cfg = config.virtualisation;
@ -52,7 +43,8 @@ let
}; };
driveCmdline = idx: { file, driveExtraOpts, deviceExtraOpts, ... }: driveCmdline = idx:
{ file, driveExtraOpts, deviceExtraOpts, ... }:
let let
drvId = "drive${toString idx}"; drvId = "drive${toString idx}";
mkKeyValue = generators.mkKeyValueDefault { } "="; mkKeyValue = generators.mkKeyValueDefault { } "=";
@ -63,20 +55,15 @@ let
"if" = "none"; "if" = "none";
inherit file; inherit file;
}); });
deviceOpts = mkOpts (deviceExtraOpts // { deviceOpts = mkOpts (deviceExtraOpts // { drive = drvId; });
drive = drvId; device = if cfg.qemu.diskInterface == "scsi" then
});
device =
if cfg.qemu.diskInterface == "scsi" then
"-device lsi53c895a -device scsi-hd,${deviceOpts}" "-device lsi53c895a -device scsi-hd,${deviceOpts}"
else else
"-device virtio-blk-pci,${deviceOpts}"; "-device virtio-blk-pci,${deviceOpts}";
in in "-drive ${driveOpts} ${device}";
"-drive ${driveOpts} ${device}";
drivesCmdLine = drives: concatStringsSep " " (imap1 driveCmdline drives); drivesCmdLine = drives: concatStringsSep " " (imap1 driveCmdline drives);
# Creates a device name from a 1-based a numerical index, e.g. # Creates a device name from a 1-based a numerical index, e.g.
# * `driveDeviceName 1` -> `/dev/vda` # * `driveDeviceName 1` -> `/dev/vda`
# * `driveDeviceName 2` -> `/dev/vdb` # * `driveDeviceName 2` -> `/dev/vdb`
@ -95,16 +82,17 @@ let
addDeviceNames = addDeviceNames =
imap1 (idx: drive: drive // { device = driveDeviceName idx; }); imap1 (idx: drive: drive // { device = driveDeviceName idx; });
efiPrefix = efiPrefix = if (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) then
if (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) then "${pkgs.OVMF.fd}/FV/OVMF" "${pkgs.OVMF.fd}/FV/OVMF"
else if pkgs.stdenv.isAarch64 then "${pkgs.OVMF.fd}/FV/AAVMF" else if pkgs.stdenv.isAarch64 then
else throw "No EFI firmware available for platform"; "${pkgs.OVMF.fd}/FV/AAVMF"
else
throw "No EFI firmware available for platform";
efiFirmware = "${efiPrefix}_CODE.fd"; efiFirmware = "${efiPrefix}_CODE.fd";
efiVarsDefault = "${efiPrefix}_VARS.fd"; efiVarsDefault = "${efiPrefix}_VARS.fd";
# Shell script to start the VM. # Shell script to start the VM.
startVM = startVM = ''
''
#! ${pkgs.runtimeShell} #! ${pkgs.runtimeShell}
NIX_DISK_IMAGE=$(readlink -f ''${NIX_DISK_IMAGE:-${config.virtualisation.diskImage}}) NIX_DISK_IMAGE=$(readlink -f ''${NIX_DISK_IMAGE:-${config.virtualisation.diskImage}})
@ -135,16 +123,18 @@ let
cp ${bootDisk}/efi-vars.fd "$NIX_EFI_VARS" || exit 1 cp ${bootDisk}/efi-vars.fd "$NIX_EFI_VARS" || exit 1
chmod 0644 "$NIX_EFI_VARS" || exit 1 chmod 0644 "$NIX_EFI_VARS" || exit 1
fi fi
'' else '' '' else
''} ""}
'' else '' '' else
''} ""}
cd $TMPDIR cd $TMPDIR
idx=0 idx=0
${flip concatMapStrings cfg.emptyDiskImages (size: '' ${flip concatMapStrings cfg.emptyDiskImages (size: ''
if ! test -e "empty$idx.qcow2"; then if ! test -e "empty$idx.qcow2"; then
${qemu}/bin/qemu-img create -f qcow2 "empty$idx.qcow2" "${toString size}M" ${qemu}/bin/qemu-img create -f qcow2 "empty$idx.qcow2" "${
toString size
}M"
fi fi
idx=$((idx + 1)) idx=$((idx + 1))
'')} '')}
@ -165,20 +155,16 @@ let
"$@" "$@"
''; '';
regInfo =
regInfo = pkgs.closureInfo { rootPaths = config.virtualisation.pathsInNixDB; }; pkgs.closureInfo { rootPaths = config.virtualisation.pathsInNixDB; };
# Generate a hard disk image containing a /boot partition and GRUB # Generate a hard disk image containing a /boot partition and GRUB
# in the MBR. Used when the `useBootLoader' option is set. # in the MBR. Used when the `useBootLoader' option is set.
# Uses `runInLinuxVM` to create the image in a throwaway VM. # Uses `runInLinuxVM` to create the image in a throwaway VM.
# See note [Disk layout with `useBootLoader`]. # See note [Disk layout with `useBootLoader`].
# FIXME: use nixos/lib/make-disk-image.nix. # FIXME: use nixos/lib/make-disk-image.nix.
bootDisk = bootDisk = pkgs.vmTools.runInLinuxVM (pkgs.runCommand "nixos-boot-disk" {
pkgs.vmTools.runInLinuxVM ( preVM = ''
pkgs.runCommand "nixos-boot-disk"
{ preVM =
''
mkdir $out mkdir $out
diskImage=$out/disk.img diskImage=$out/disk.img
${qemu}/bin/qemu-img create -f qcow2 $diskImage "60M" ${qemu}/bin/qemu-img create -f qcow2 $diskImage "60M"
@ -186,16 +172,15 @@ let
efiVars=$out/efi-vars.fd efiVars=$out/efi-vars.fd
cp ${efiVarsDefault} $efiVars cp ${efiVarsDefault} $efiVars
chmod 0644 $efiVars chmod 0644 $efiVars
'' else '' '' else
''} ""}
''; '';
buildInputs = [ pkgs.utillinux ]; buildInputs = [ pkgs.utillinux ];
QEMU_OPTS = "-nographic -serial stdio -monitor none" QEMU_OPTS = "-nographic -serial stdio -monitor none"
+ lib.optionalString cfg.useEFIBoot ( + lib.optionalString cfg.useEFIBoot
" -drive if=pflash,format=raw,unit=0,readonly=on,file=${efiFirmware}" (" -drive if=pflash,format=raw,unit=0,readonly=on,file=${efiFirmware}"
+ " -drive if=pflash,format=raw,unit=1,file=$efiVars"); + " -drive if=pflash,format=raw,unit=1,file=$efiVars");
} } ''
''
# Create a /boot EFI partition with 60M and arbitrary but fixed GUIDs for reproducibility # Create a /boot EFI partition with 60M and arbitrary but fixed GUIDs for reproducibility
${pkgs.gptfdisk}/bin/sgdisk \ ${pkgs.gptfdisk}/bin/sgdisk \
--set-alignment=1 --new=1:34:2047 --change-name=1:BIOSBootPartition --typecode=1:ef02 \ --set-alignment=1 --new=1:34:2047 --change-name=1:BIOSBootPartition --typecode=1:ef02 \
@ -217,19 +202,12 @@ let
# by `switch-to-configuration` will hit /dev/vda anyway. # by `switch-to-configuration` will hit /dev/vda anyway.
'' ''
ln -s /dev/vda ${config.boot.loader.grub.device} ln -s /dev/vda ${config.boot.loader.grub.device}
'' ''}
}
${pkgs.dosfstools}/bin/mkfs.fat -F16 /dev/vda2 ${pkgs.dosfstools}/bin/mkfs.fat -F16 /dev/vda2
export MTOOLS_SKIP_CHECK=1 export MTOOLS_SKIP_CHECK=1
${pkgs.mtools}/bin/mlabel -i /dev/vda2 ::boot ${pkgs.mtools}/bin/mlabel -i /dev/vda2 ::boot
# Mount /boot; load necessary modules first.
${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/nls/nls_cp437.ko.xz || true
${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/nls/nls_iso8859-1.ko.xz || true
${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/fat/fat.ko.xz || true
${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/fat/vfat.ko.xz || true
${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/efivarfs/efivarfs.ko.xz || true
mkdir /boot mkdir /boot
mount /dev/vda2 /boot mount /dev/vda2 /boot
@ -245,11 +223,6 @@ let
mkdir -p /dev/block mkdir -p /dev/block
ln -s /dev/vda2 /dev/block/254:2 ln -s /dev/vda2 /dev/block/254:2
# Set up system profile (normally done by nixos-rebuild / nix-env --set)
mkdir -p /nix/var/nix/profiles
ln -s ${config.system.build.toplevel} /nix/var/nix/profiles/system-1-link
ln -s /nix/var/nix/profiles/system-1-link /nix/var/nix/profiles/system
# Install bootloader # Install bootloader
touch /etc/NIXOS touch /etc/NIXOS
export NIXOS_INSTALL_BOOTLOADER=1 export NIXOS_INSTALL_BOOTLOADER=1
@ -259,94 +232,72 @@ let
'' # */ '' # */
); );
in in {
{
imports = [
../profiles/qemu-guest.nix
];
options = { options = {
virtualisation.memorySize = virtualisation.memorySize = mkOption {
mkOption {
default = 384; default = 384;
description = description = ''
''
Memory size (M) of virtual machine. Memory size (M) of virtual machine.
''; '';
}; };
virtualisation.diskSize = virtualisation.diskSize = mkOption {
mkOption {
default = 512; default = 512;
description = description = ''
''
Disk size (M) of virtual machine. Disk size (M) of virtual machine.
''; '';
}; };
virtualisation.diskImage = virtualisation.diskImage = mkOption {
mkOption {
default = "./${config.system.name}.qcow2"; default = "./${config.system.name}.qcow2";
description = description = ''
''
Path to the disk image containing the root filesystem. Path to the disk image containing the root filesystem.
The image will be created on startup if it does not The image will be created on startup if it does not
exist. exist.
''; '';
}; };
virtualisation.bootDevice = virtualisation.bootDevice = mkOption {
mkOption {
type = types.str; type = types.str;
example = "/dev/vda"; example = "/dev/vda";
description = description = ''
''
The disk to be used for the root filesystem. The disk to be used for the root filesystem.
''; '';
}; };
virtualisation.emptyDiskImages = virtualisation.emptyDiskImages = mkOption {
mkOption {
default = [ ]; default = [ ];
type = types.listOf types.int; type = types.listOf types.int;
description = description = ''
''
Additional disk images to provide to the VM. The value is Additional disk images to provide to the VM. The value is
a list of size in megabytes of each disk. These disks are a list of size in megabytes of each disk. These disks are
writeable by the VM. writeable by the VM.
''; '';
}; };
virtualisation.graphics = virtualisation.graphics = mkOption {
mkOption {
default = true; default = true;
description = description = ''
''
Whether to run QEMU with a graphics window, or in nographic mode. Whether to run QEMU with a graphics window, or in nographic mode.
Serial console will be enabled on both settings, but this will Serial console will be enabled on both settings, but this will
change the preferred console. change the preferred console.
''; '';
}; };
virtualisation.cores = virtualisation.cores = mkOption {
mkOption {
default = 1; default = 1;
type = types.int; type = types.int;
description = description = ''
''
Specify the number of cores the guest is permitted to use. Specify the number of cores the guest is permitted to use.
The number can be higher than the available cores on the The number can be higher than the available cores on the
host system. host system.
''; '';
}; };
virtualisation.pathsInNixDB = virtualisation.pathsInNixDB = mkOption {
mkOption {
default = [ ]; default = [ ];
description = description = ''
''
The list of paths whose closure is registered in the Nix The list of paths whose closure is registered in the Nix
database in the VM. All other paths in the host Nix store database in the VM. All other paths in the host Nix store
appear in the guest Nix store as well, but are considered appear in the guest Nix store as well, but are considered
@ -355,12 +306,10 @@ in
''; '';
}; };
virtualisation.vlans = virtualisation.vlans = mkOption {
mkOption {
default = [ 1 ]; default = [ 1 ];
example = [ 1 2 ]; example = [ 1 2 ];
description = description = ''
''
Virtual networks to which the VM is connected. Each Virtual networks to which the VM is connected. Each
number <replaceable>N</replaceable> in this list causes number <replaceable>N</replaceable> in this list causes
the VM to have a virtual Ethernet interface attached to a the VM to have a virtual Ethernet interface attached to a
@ -372,37 +321,31 @@ in
''; '';
}; };
virtualisation.writableStore = virtualisation.writableStore = mkOption {
mkOption {
default = true; # FIXME default = true; # FIXME
description = description = ''
''
If enabled, the Nix store in the VM is made writable by If enabled, the Nix store in the VM is made writable by
layering an overlay filesystem on top of the host's Nix layering an overlay filesystem on top of the host's Nix
store. store.
''; '';
}; };
virtualisation.writableStoreUseTmpfs = virtualisation.writableStoreUseTmpfs = mkOption {
mkOption {
default = true; default = true;
description = description = ''
''
Use a tmpfs for the writable store instead of writing to the VM's Use a tmpfs for the writable store instead of writing to the VM's
own filesystem. own filesystem.
''; '';
}; };
networking.primaryIPAddress = networking.primaryIPAddress = mkOption {
mkOption {
default = ""; default = "";
internal = true; internal = true;
description = "Primary IP address used in /etc/hosts."; description = "Primary IP address used in /etc/hosts.";
}; };
virtualisation.qemu = { virtualisation.qemu = {
options = options = mkOption {
mkOption {
type = types.listOf types.unspecified; type = types.listOf types.unspecified;
default = [ ]; default = [ ];
example = [ "-vga std" ]; example = [ "-vga std" ];
@ -411,8 +354,7 @@ in
consoles = mkOption { consoles = mkOption {
type = types.listOf types.str; type = types.listOf types.str;
default = let default = let consoles = [ "${qemuSerialDevice},115200n8" "tty0" ];
consoles = [ "${qemuSerialDevice},115200n8" "tty0" ];
in if cfg.graphics then consoles else reverseList consoles; in if cfg.graphics then consoles else reverseList consoles;
example = [ "console=tty1" ]; example = [ "console=tty1" ];
description = '' description = ''
@ -426,8 +368,7 @@ in
''; '';
}; };
networkingOptions = networkingOptions = mkOption {
mkOption {
default = [ default = [
"-net nic,netdev=user.0,model=virtio" "-net nic,netdev=user.0,model=virtio"
"-netdev user,id=user.0\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}" "-netdev user,id=user.0\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
@ -443,23 +384,20 @@ in
''; '';
}; };
drives = drives = mkOption {
mkOption {
type = types.listOf (types.submodule driveOpts); type = types.listOf (types.submodule driveOpts);
description = "Drives passed to qemu."; description = "Drives passed to qemu.";
apply = addDeviceNames; apply = addDeviceNames;
}; };
diskInterface = diskInterface = mkOption {
mkOption {
default = "virtio"; default = "virtio";
example = "scsi"; example = "scsi";
type = types.enum [ "virtio" "scsi" "ide" ]; type = types.enum [ "virtio" "scsi" "ide" ];
description = "The interface used for the virtual hard disks."; description = "The interface used for the virtual hard disks.";
}; };
guestAgent.enable = guestAgent.enable = mkOption {
mkOption {
default = true; default = true;
type = types.bool; type = types.bool;
description = '' description = ''
@ -468,11 +406,9 @@ in
}; };
}; };
virtualisation.useBootLoader = virtualisation.useBootLoader = mkOption {
mkOption {
default = false; default = false;
description = description = ''
''
If enabled, the virtual machine will be booted using the If enabled, the virtual machine will be booted using the
regular boot loader (i.e., GRUB 1 or 2). This allows regular boot loader (i.e., GRUB 1 or 2). This allows
testing of the boot loader. If testing of the boot loader. If
@ -482,33 +418,27 @@ in
''; '';
}; };
virtualisation.useEFIBoot = virtualisation.useEFIBoot = mkOption {
mkOption {
default = false; default = false;
description = description = ''
''
If enabled, the virtual machine will provide a EFI boot If enabled, the virtual machine will provide a EFI boot
manager. manager.
useEFIBoot is ignored if useBootLoader == false. useEFIBoot is ignored if useBootLoader == false.
''; '';
}; };
virtualisation.efiVars = virtualisation.efiVars = mkOption {
mkOption {
default = "./${config.system.name}-efi-vars.fd"; default = "./${config.system.name}-efi-vars.fd";
description = description = ''
''
Path to nvram image containing UEFI variables. The will be created Path to nvram image containing UEFI variables. The will be created
on startup if it does not exist. on startup if it does not exist.
''; '';
}; };
virtualisation.bios = virtualisation.bios = mkOption {
mkOption {
default = null; default = null;
type = types.nullOr types.package; type = types.nullOr types.package;
description = description = ''
''
An alternate BIOS (such as <package>qboot</package>) with which to start the VM. An alternate BIOS (such as <package>qboot</package>) with which to start the VM.
Should contain a file named <literal>bios.bin</literal>. Should contain a file named <literal>bios.bin</literal>.
If <literal>null</literal>, QEMU's builtin SeaBIOS will be used. If <literal>null</literal>, QEMU's builtin SeaBIOS will be used.
@ -535,20 +465,17 @@ in
# If `useBootLoader`, GRUB goes to the second disk, see # If `useBootLoader`, GRUB goes to the second disk, see
# note [Disk layout with `useBootLoader`]. # note [Disk layout with `useBootLoader`].
boot.loader.grub.device = mkVMOverride ( boot.loader.grub.device = mkVMOverride (if cfg.useBootLoader then
if cfg.useBootLoader driveDeviceName 2 # second disk
then driveDeviceName 2 # second disk else
else cfg.bootDevice cfg.bootDevice);
);
boot.initrd.extraUtilsCommands = boot.initrd.extraUtilsCommands = ''
''
# We need mke2fs in the initrd. # We need mke2fs in the initrd.
copy_bin_and_libs ${pkgs.e2fsprogs}/bin/mke2fs copy_bin_and_libs ${pkgs.e2fsprogs}/bin/mke2fs
''; '';
boot.initrd.postDeviceCommands = boot.initrd.postDeviceCommands = ''
''
# If the disk image appears to be empty, run mke2fs to # If the disk image appears to be empty, run mke2fs to
# initialise. # initialise.
FSTYPE=$(blkid -o value -s TYPE ${cfg.bootDevice} || true) FSTYPE=$(blkid -o value -s TYPE ${cfg.bootDevice} || true)
@ -557,8 +484,7 @@ in
fi fi
''; '';
boot.initrd.postMountCommands = boot.initrd.postMountCommands = ''
''
# Mark this as a NixOS machine. # Mark this as a NixOS machine.
mkdir -p $targetRoot/etc mkdir -p $targetRoot/etc
echo -n > $targetRoot/etc/NIXOS echo -n > $targetRoot/etc/NIXOS
@ -576,51 +502,30 @@ in
''} ''}
''; '';
# After booting, register the closure of the paths in
# `virtualisation.pathsInNixDB' in the Nix database in the VM. This
# allows Nix operations to work in the VM. The path to the
# registration file is passed through the kernel command line to
# allow `system.build.toplevel' to be included. (If we had a direct
# reference to ${regInfo} here, then we would get a cyclic
# dependency.)
boot.postBootCommands =
''
if [[ "$(cat /proc/cmdline)" =~ regInfo=([^ ]*) ]]; then
${config.nix.package.out}/bin/nix-store --load-db < ''${BASH_REMATCH[1]}
fi
'';
boot.initrd.availableKernelModules =
optional cfg.writableStore "overlay"
++ optional (cfg.qemu.diskInterface == "scsi") "sym53c8xx";
virtualisation.bootDevice = mkDefault (driveDeviceName 1); virtualisation.bootDevice = mkDefault (driveDeviceName 1);
virtualisation.pathsInNixDB = [ config.system.build.toplevel ];
# FIXME: Consolidate this one day. # FIXME: Consolidate this one day.
virtualisation.qemu.options = mkMerge [ virtualisation.qemu.options = mkMerge [
(mkIf (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) [ (mkIf (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) [
"-usb" "-device usb-tablet,bus=usb-bus.0" "-usb"
"-device usb-tablet,bus=usb-bus.0"
]) ])
(mkIf (pkgs.stdenv.isAarch32 || pkgs.stdenv.isAarch64) [ (mkIf (pkgs.stdenv.isAarch32 || pkgs.stdenv.isAarch64) [
"-device virtio-gpu-pci" "-device usb-ehci,id=usb0" "-device usb-kbd" "-device usb-tablet" "-device virtio-gpu-pci"
"-device usb-ehci,id=usb0"
"-device usb-kbd"
"-device usb-tablet"
]) ])
(mkIf (!cfg.useBootLoader) [ (mkIf (!cfg.useBootLoader) [
"-kernel ${config.system.build.toplevel}/kernel" "-kernel ${config.genode.boot.kernel}"
"-initrd ${config.system.build.toplevel}/initrd" "-initrd ${config.genode.boot.initrd}"
''-append "$(cat ${config.system.build.toplevel}/kernel-params) init=${config.system.build.toplevel}/init regInfo=${regInfo}/registration ${consoles} $QEMU_KERNEL_PARAMS"''
]) ])
(mkIf cfg.useEFIBoot [ (mkIf cfg.useEFIBoot [
"-drive if=pflash,format=raw,unit=0,readonly,file=${efiFirmware}" "-drive if=pflash,format=raw,unit=0,readonly,file=${efiFirmware}"
"-drive if=pflash,format=raw,unit=1,file=$NIX_EFI_VARS" "-drive if=pflash,format=raw,unit=1,file=$NIX_EFI_VARS"
]) ])
(mkIf (cfg.bios != null) [ (mkIf (cfg.bios != null) [ "-bios ${cfg.bios}/bios.bin" ])
"-bios ${cfg.bios}/bios.bin" (mkIf (!cfg.graphics) [ "-nographic" ])
])
(mkIf (!cfg.graphics) [
"-nographic"
])
]; ];
virtualisation.qemu.drives = mkMerge [ virtualisation.qemu.drives = mkMerge [
@ -646,106 +551,12 @@ in
}) cfg.emptyDiskImages) }) cfg.emptyDiskImages)
]; ];
# Mount the host filesystem via 9P, and bind-mount the Nix store system.build.vm = pkgs.runCommand "nixos-vm" { preferLocalBuild = true; } ''
# of the host into our own filesystem. We use mkVMOverride to
# allow this module to be applied to "normal" NixOS system
# configuration, where the regular value for the `fileSystems'
# attribute should be disregarded for the purpose of building a VM
# test image (since those filesystems don't exist in the VM).
fileSystems = mkVMOverride (
{ "/".device = cfg.bootDevice;
${if cfg.writableStore then "/nix/.ro-store" else "/nix/store"} =
{ device = "store";
fsType = "9p";
options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
neededForBoot = true;
};
"/tmp" = mkIf config.boot.tmpOnTmpfs
{ device = "tmpfs";
fsType = "tmpfs";
neededForBoot = true;
# Sync with systemd's tmp.mount;
options = [ "mode=1777" "strictatime" "nosuid" "nodev" ];
};
"/tmp/xchg" =
{ device = "xchg";
fsType = "9p";
options = [ "trans=virtio" "version=9p2000.L" ];
neededForBoot = true;
};
"/tmp/shared" =
{ device = "shared";
fsType = "9p";
options = [ "trans=virtio" "version=9p2000.L" ];
neededForBoot = true;
};
} // optionalAttrs (cfg.writableStore && cfg.writableStoreUseTmpfs)
{ "/nix/.rw-store" =
{ fsType = "tmpfs";
options = [ "mode=0755" ];
neededForBoot = true;
};
} // optionalAttrs cfg.useBootLoader
{ "/boot" =
# see note [Disk layout with `useBootLoader`]
{ device = "${lookupDriveDeviceName "boot" cfg.qemu.drives}2"; # 2 for e.g. `vdb2`, as created in `bootDisk`
fsType = "vfat";
noCheck = true; # fsck fails on a r/o filesystem
};
});
swapDevices = mkVMOverride [ ];
boot.initrd.luks.devices = mkVMOverride {};
# Don't run ntpd in the guest. It should get the correct time from KVM.
services.timesyncd.enable = false;
services.qemuGuest.enable = cfg.qemu.guestAgent.enable;
system.build.vm = pkgs.runCommand "nixos-vm" { preferLocalBuild = true; }
''
mkdir -p $out/bin mkdir -p $out/bin
ln -s ${config.system.build.toplevel} $out/system ln -s ${
ln -s ${pkgs.writeScript "run-nixos-vm" startVM} $out/bin/run-${config.system.name}-vm pkgs.writeScript "run-nixos-vm" startVM
} $out/bin/run-${config.system.name}-vm
''; '';
# When building a regular system configuration, override whatever
# video driver the host uses.
services.xserver.videoDrivers = mkVMOverride [ "modesetting" ];
services.xserver.defaultDepth = mkVMOverride 0;
services.xserver.resolutions = mkVMOverride [ { x = 1024; y = 768; } ];
services.xserver.monitorSection =
''
# Set a higher refresh rate so that resolutions > 800x600 work.
HorizSync 30-140
VertRefresh 50-160
'';
# Wireless won't work in the VM.
networking.wireless.enable = mkVMOverride false;
services.connman.enable = mkVMOverride false;
# Speed up booting by not waiting for ARP.
networking.dhcpcd.extraConfig = "noarp";
networking.usePredictableInterfaceNames = false;
system.requiredKernelConfig = with config.lib.kernelConfig;
[ (isEnabled "VIRTIO_BLK")
(isEnabled "VIRTIO_PCI")
(isEnabled "VIRTIO_NET")
(isEnabled "EXT4_FS")
(isYes "BLK_DEV")
(isYes "PCI")
(isYes "EXPERIMENTAL")
(isYes "NETDEVICES")
(isYes "NET_CORE")
(isYes "INET")
(isYes "NETWORK_FILESYSTEMS")
] ++ optional (!cfg.graphics) [
(isYes "SERIAL_8250_CONSOLE")
(isYes "SERIAL_8250")
];
}; };
} }

View File

@ -0,0 +1,159 @@
let Genode =
env:DHALL_GENODE sha256:e90438be23b5100003cf018b783986df67bc6d0e3d35e800677d0d9109ff6aa9
let Prelude = Genode.Prelude
let XML = Prelude.XML
let Init = Genode.Init
let Child = Init.Child
let TextMapType = Prelude.Map.Type Text
let Children = TextMapType Child.Type
let Manifest/Type = TextMapType (TextMapType Text)
let Manifest/toRoutes =
λ(manifest : Manifest/Type) →
Prelude.List.map
(Prelude.Map.Entry Text Text)
Init.ServiceRoute.Type
( λ(entry : Prelude.Map.Entry Text Text) →
{ service =
{ name = "ROM"
, label = Init.LabelSelector.Type.Last entry.mapKey
}
, route =
Init.Route.Type.Child
{ name = "store_rom"
, label = Some entry.mapValue
, diag = None Bool
}
}
)
( Prelude.List.concat
(Prelude.Map.Entry Text Text)
(Prelude.Map.values Text (Prelude.Map.Type Text Text) manifest)
)
let parentROMs =
Prelude.List.map
Text
Init.ServiceRoute.Type
( λ(label : Text) →
{ service =
{ name = "ROM", label = Init.LabelSelector.Type.Last label }
, route =
Init.Route.Type.Parent { label = Some label, diag = None Bool }
}
)
let wrapStore
: Init.Type → Manifest/Type → Child.Type
= λ(init : Init.Type) →
λ(manifest : Manifest/Type) →
Init.toChild
init
Init.Attributes::{
, exitPropagate = True
, resources = Init.Resources::{ ram = Genode.units.MiB 4 }
, routes =
[ Init.ServiceRoute.parent "IO_MEM"
, Init.ServiceRoute.parent "IO_PORT"
, Init.ServiceRoute.parent "IRQ"
, Init.ServiceRoute.parent "VM"
, Init.ServiceRoute.child "Timer" "timer"
, Init.ServiceRoute.child "Rtc" "rtc"
, Init.ServiceRoute.child "File_system" "store_fs"
]
# parentROMs
[ "ld.lib.so"
, "init"
, "platform_info"
, "core_log"
, "kernel_log"
, "vfs"
, "libvfs.so"
, "cached_fs_rom"
]
# Manifest/toRoutes manifest
}
in λ(subinit : Init.Type) →
λ(storeSize : Natural) →
λ(storeManifest : Manifest/Type) →
λ(bootManifest : Manifest/Type) →
Genode.Boot::{
, config = Init::{
, children =
[ { mapKey = "timer"
, mapValue =
Child.flat
Child.Attributes::{
, binary = "timer_drv"
, provides = [ "Timer" ]
}
}
, { mapKey = "rtc"
, mapValue =
Child.flat
Child.Attributes::{
, binary = "rtc_drv"
, provides = [ "Rtc" ]
, routes = [ Init.ServiceRoute.parent "IO_PORT" ]
}
}
, { mapKey = "store_fs"
, mapValue =
Child.flat
Child.Attributes::{
, binary = "vfs"
, config = Init.Config::{
, content =
[ XML.element
{ name = "vfs"
, attributes = XML.emptyAttributes
, content =
[ XML.leaf
{ name = "tar"
, attributes = toMap { name = "store.tar" }
}
]
}
]
, defaultPolicy = Some Init.Config.DefaultPolicy::{
, attributes = toMap { root = "/", writeable = "no" }
}
}
, provides = [ "File_system" ]
}
}
, { mapKey = "store_rom"
, mapValue =
Child.flat
Child.Attributes::{
, binary = "cached_fs_rom"
, provides = [ "ROM" ]
, resources = Init.Resources::{
, ram = storeSize + Genode.units.MiB 1
}
, routes =
[ Init.ServiceRoute.child "File_system" "store_fs" ]
}
}
, { mapKey = "init", mapValue = wrapStore subinit storeManifest }
]
}
, rom =
Genode.BootModules.toRomPaths
( Prelude.List.concat
(Prelude.Map.Entry Text Text)
( Prelude.Map.values
Text
(Prelude.Map.Type Text Text)
bootManifest
)
)
}

View File

@ -1,73 +1,51 @@
{ flake, system, localSystem, crossSystem }: { flake, system, localSystem, crossSystem, pkgs }:
let let
apps = flake.apps.${system}; apps = flake.apps.${system};
localPackages = flake.legacyPackages.${localSystem}; localPackages = flake.legacyPackages.${localSystem};
genodepkgs = flake.packages.${system};
lib = flake.lib.${system}; lib = flake.lib.${system};
nixpkgs = flake.legacyPackages.${system}; nixpkgs = flake.legacyPackages.${system};
legacyPackages = flake.legacyPackages.${system}; legacyPackages = flake.legacyPackages.${system};
in with import ./lib/build-vms.nix { testingPython = import ./lib/testing-python.nix;
inherit flake system localSystem crossSystem;
pkgs = flake.legacyPackages.${system};
localPackages = flake.inputs.nixpkgs.legacyPackages.${localSystem};
modulesPath = "${flake.inputs.nixpkgs}/nixos/modules";
};
with flake.legacyPackages.${system};
let
testSpecs = map (p: import p) [ testSpecs = map (p: import p) [
./log.nix ./log.nix
./lighttpd.nix
# ./posix.nix # ./posix.nix
# ./tox-bootstrapd.nix # ./tox-bootstrapd.nix
# ./vmm_arm.nix # ./vmm_arm.nix
# ./vmm_x86.nix # ./vmm_x86.nix
# ./x86.nix ./x86.nix
]; # TODO ++ (callTest ./solo5); ]; # TODO ++ (callTest ./solo5);
testPkgs = genodepkgs;
qemu' = localPackages.qemu; qemu' = localPackages.qemu;
qemuBinary = qemuPkg: qemuBinary = qemuPkg:
{ {
aarch64-genode = "${qemuPkg}/bin/qemu-system-aarch64"; aarch64-genode = "${qemuPkg}/bin/qemu-system-aarch64";
x86_64-genode = "${qemuPkg}/bin/qemu-system-x86_64"; x86_64-genode = "${qemuPkg}/bin/qemu-system-x86_64";
}.${genodepkgs.stdenv.hostPlatform.system}; }.${pkgs.stdenv.hostPlatform.system};
# TODO: move the cores into nixos modules # TODO: move the cores into nixos modules
cores = [ cores = [
{ {
prefix = "hw-pc-"; prefix = "hw-pc-";
testingPython = testingPython {
inherit flake system localSystem crossSystem pkgs;
extraConfigurations = [ ../nixos-modules/base-hw-pc.nix ];
};
specs = [ "x86" "hw" ]; specs = [ "x86" "hw" ];
platforms = [ "x86_64-genode" ]; platforms = [ "x86_64-genode" ];
basePackages = [ testPkgs.base-hw-pc ]
++ map testPkgs.genodeSources.depot [ "rtc_drv" ];
makeImage =
lib.hwImage "0xffffffc000000000" "0x00200000" testPkgs.base-hw-pc;
startVM = vmName: image: ''
#! ${localPackages.runtimeShell}
exec ${qemuBinary qemu'} \
-name ${vmName} \
-machine q35 \
-m 384 \
-netdev user,id=net0 \
-device virtio-net-pci,netdev=net0 \
-kernel "${testPkgs.bender}/bender" \
-initrd "${image}/image.elf" \
$QEMU_OPTS \
"$@"
'';
} }
{ /* {
prefix = "hw-virt_qemu-"; prefix = "hw-virt_qemu-";
testingPython = testingPython {
inherit flake system localSystem crossSystem pkgs;
extraConfigurations = [ ../nixos-modules/base-hw-virt_qemu.nix ];
};
specs = [ "aarch64" "hw" ]; specs = [ "aarch64" "hw" ];
platforms = [ "aarch64-genode" ]; platforms = [ "aarch64-genode" ];
basePackages = with testPkgs; [ base-hw-virt_qemu rtc-dummy ];
makeImage = lib.hwImage "0xffffffc000000000" "0x40000000"
testPkgs.base-hw-virt_qemu;
startVM = vmName: image: '' startVM = vmName: image: ''
#! ${localPackages.runtimeShell} #! ${localPackages.runtimeShell}
exec ${qemuBinary qemu'} \ exec ${qemuBinary qemu'} \
@ -81,265 +59,30 @@ let
"$@" "$@"
''; '';
} }
*/
{ {
prefix = "nova-"; prefix = "nova-";
testingPython = testingPython {
inherit flake system localSystem crossSystem pkgs;
extraConfigurations = [ ../nixos-modules/nova.nix ];
};
specs = [ "x86" "nova" ]; specs = [ "x86" "nova" ];
platforms = [ "x86_64-genode" ]; platforms = [ "x86_64-genode" ];
basePackages = [ testPkgs.base-nova ]
++ map testPkgs.genodeSources.depot [ "rtc_drv" ];
makeImage = lib.novaImage;
startVM = vmName: image: ''
#! ${localPackages.runtimeShell}
exec ${qemuBinary qemu'} \
-name ${vmName} \
-machine q35 \
-m 384 \
-kernel "${testPkgs.bender}/bender" \
-initrd "${testPkgs.NOVA}/hypervisor-x86_64 arg=iommu novpid serial,${image}/image.elf" \
$QEMU_OPTS \
"$@"
'';
} }
]; ];
cores' = builtins.filter (core: cores' = builtins.filter (core:
builtins.any (x: x == genodepkgs.stdenv.hostPlatform.system) core.platforms) builtins.any (x: x == pkgs.stdenv.hostPlatform.system) core.platforms)
cores; cores;
testDriver = with localPackages;
let testDriverScript = ./test-driver/test-driver.py;
in stdenv.mkDerivation {
name = "nixos-test-driver";
nativeBuildInputs = [ makeWrapper ];
buildInputs = [ (python3.withPackages (p: [ p.ptpython ])) ];
checkInputs = with python3Packages; [ pylint mypy ];
dontUnpack = true;
preferLocalBuild = true;
doCheck = true;
checkPhase = ''
mypy --disallow-untyped-defs \
--no-implicit-optional \
--ignore-missing-imports ${testDriverScript}
pylint --errors-only ${testDriverScript}
'';
installPhase = ''
mkdir -p $out/bin
cp ${testDriverScript} $out/bin/nixos-test-driver
chmod u+x $out/bin/nixos-test-driver
# TODO: copy user script part into this file (append)
wrapProgram $out/bin/nixos-test-driver \
--prefix PATH : "${lib.makeBinPath [ qemu' coreutils ]}" \
'';
};
defaultTestScript = ''
start_all()
machine.wait_until_serial_output('child "init" exited with exit value 0')
'';
makeTest = with localPackages;
{ prefix, specs, platforms, basePackages, makeImage, startVM }:
{ name ? "unnamed", testScript ? defaultTestScript,
# Skip linting (mainly intended for faster dev cycles)
skipLint ? false, ... }@t:
let
testDriverName = "genode-test-driver-${name}";
# TODO: move buildVM into a nixos module
buildVM = vmName:
{ config, inputs, env ? { }, extraPaths ? [ ] }:
let
storeTarball = localPackages.runCommand "store" { } ''
mkdir -p $out
tar cf "$out/store.tar" --absolute-names ${toString inputs} ${
toString extraPaths
}
'';
addManifest = drv:
drv // {
manifest =
nixpkgs.runCommand "${drv.name}.dhall" { inherit drv; } ''
set -eu
echo -n '[' >> $out
find $drv/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out
${if builtins.elem "lib" drv.outputs then
''
find ${drv.lib}/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out''
else
""}
echo -n ']' >> $out
'';
};
storeManifest = lib.mergeManifests (map addManifest inputs);
manifest = lib.mergeManifests (map addManifest (basePackages
++ [ testPkgs.sotest-producer storeTarball ]
++ map testPkgs.genodeSources.depot [
"init"
"vfs"
"cached_fs_rom"
]));
config' = "${
./test-wrapper.dhall
} (${config}) $(stat --format '%s' ${storeTarball}/store.tar) ${storeManifest} ${manifest}";
env' = {
DHALL_GENODE_TEST = "${./test.dhall}";
} // env;
image = makeImage vmName env' config';
startVM' = startVM vmName image;
in {
script = localPackages.writeScriptBin "run-${vmName}-vm" startVM';
config = lib.runDhallCommand (name + ".dhall") env' ''
${apps.dhall.program} <<< "${config'}" > $out
'';
store = storeTarball;
xml = lib.runDhallCommand (name + ".config") env'
''${apps.render-init.program} <<< "(${config'}).config" > $out'';
};
# nodes = lib.mapAttrs buildVM
# (t.nodes or (if t ? machine then { machine = t.machine; } else { }));
nodes = buildVirtualNetwork
(t.nodes or (if t ? machine then { machine = t.machine; } else { }));
testScript' =
# Call the test script with the computed nodes.
if lib.isFunction testScript then
testScript { inherit nodes; }
else
testScript;
vlans = map (m: m.config.virtualisation.vlans) (lib.attrValues nodes);
vms = map (m: m.config.system.build.vm) (lib.attrValues nodes);
# Generate onvenience wrappers for running the test driver
# interactively with the specified network, and for starting the
# VMs from the command line.
driver =
let warn = if skipLint then lib.warn "Linting is disabled!" else lib.id;
in warn (runCommand testDriverName {
buildInputs = [ makeWrapper ];
testScript = testScript';
preferLocalBuild = true;
testName = name;
} ''
mkdir -p $out/bin
echo -n "$testScript" > $out/test-script
${lib.optionalString (!skipLint) ''
${python3Packages.black}/bin/black --check --quiet --diff $out/test-script
''}
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/
vms=($(for i in ${toString vms}; do echo $i/bin/run-*-vm; done))
wrapProgram $out/bin/nixos-test-driver \
--add-flags "''${vms[*]}" \
--run "export testScript=\"\$(${coreutils}/bin/cat $out/test-script)\""
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-run-vms
wrapProgram $out/bin/nixos-run-vms \
--add-flags "''${vms[*]}" \
--set tests 'start_all(); join_all();'
''); # "
passMeta = drv:
drv
// lib.optionalAttrs (t ? meta) { meta = (drv.meta or { }) // t.meta; };
# Run an automated test suite in the given virtual network.
# `driver' is the script that runs the network.
runTests = driver:
stdenv.mkDerivation {
name = "test-run-${driver.testName}";
buildCommand = ''
mkdir -p $out
LOGFILE=/dev/null tests='exec(os.environ["testScript"])' ${driver}/bin/nixos-test-driver
'';
};
test = passMeta (runTests driver);
nodeNames = builtins.attrNames nodes;
invalidNodeNames =
lib.filter (node: builtins.match "^[A-z_]([A-z0-9_]+)?$" node == null)
nodeNames;
in if lib.length invalidNodeNames > 0 then
throw ''
Cannot create machines out of (${
lib.concatStringsSep ", " invalidNodeNames
})!
All machines are referenced as python variables in the testing framework which will break the
script when special characters are used.
Please stick to alphanumeric chars and underscores as separation.
''
else
test // { inherit nodes driver test; };
testList = let testList = let
f = core: f = core: test:
let makeTest' = makeTest core;
in test:
if (test.constraints or (_: true)) core.specs then { if (test.constraints or (_: true)) core.specs then {
name = core.prefix + test.name; name = core.prefix + test.name;
value = makeTest' test; value = core.testingPython.makeTest test;
} else } else
null; null;
in lib.lists.crossLists f [ cores' testSpecs ]; in lib.lists.crossLists f [ cores' testSpecs ];
in builtins.listToAttrs (builtins.filter (_: _ != null) testList) in builtins.listToAttrs (builtins.filter (_: _ != null) testList)
/* sotest = let
hwTests = with hw; [ multi posix x86 ];
novaTests = with nova; [ multi posix x86 vmm ];
allTests = hwTests ++ novaTests;
projectCfg.boot_items =
(map (test: {
inherit (test) name;
exec = "bender";
load = [ "${test.name}.image.elf" ];
}) hwTests)
++ (map (test: {
inherit (test) name;
exec = "bender";
load = [ "hypervisor serial novga iommu" test.image.name ];
}) novaTests);
in localPackages.stdenv.mkDerivation {
name = "sotest";
buildCommand = ''
mkdir zip; cd zip
cp "${testPkgs.bender}/bender" bender
cp "${testPkgs.NOVA}/hypervisor-x86_64" hypervisor
${concatStringsSep "\n"
(map (test: "cp ${test.image}/image.elf ${test.name}.image.elf")
allTests)}
mkdir -p $out/nix-support
${localPackages.zip}/bin/zip "$out/binaries.zip" *
cat << EOF > "$out/project.json"
${builtins.toJSON projectCfg}
EOF
echo file sotest-binaries $out/binaries.zip >> "$out/nix-support/hydra-build-products"
echo file sotest-config $out/project.json >> "$out/nix-support/hydra-build-products"
'';
};
*/

View File

@ -1,6 +1,8 @@
{ flake, system, localSystem, crossSystem { system, localSystem, crossSystem
# Nixpkgs, for qemu, lib and more # Nixpkgs, for qemu, lib and more
, localPackages, pkgs, modulesPath }: , pkgs, modulesPath
# NixOS configuration to add to the VMs
, extraConfigurations ? [ ] }:
with pkgs.lib; with pkgs.lib;
with import ./qemu-flags.nix { inherit pkgs; }; with import ./qemu-flags.nix { inherit pkgs; };
@ -9,7 +11,7 @@ rec {
inherit pkgs; inherit pkgs;
qemu = pkgs.qemu_test; qemu = pkgs.buildPackages.buildPackages.qemu_test;
# Build a virtual network from an attribute set `{ machine1 = # Build a virtual network from an attribute set `{ machine1 =
# config1; ... machineN = configN; }', where `machineX' is the # config1; ... machineN = configN; }', where `machineX' is the
@ -23,10 +25,11 @@ rec {
import "${modulesPath}/../lib/eval-config.nix" { import "${modulesPath}/../lib/eval-config.nix" {
inherit system; inherit system;
modules = configurations; modules = configurations ++ extraConfigurations;
baseModules = (import "${modulesPath}/module-list.nix") ++ [ baseModules = (import "${modulesPath}/module-list.nix") ++ [
"${modulesPath}/virtualisation/qemu-vm.nix" ../../nixos-modules/genode-core.nix
"${modulesPath}/testing/test-instrumentation.nix" # !!! should only get added for automated test runs ../../nixos-modules/genode-init.nix
../../nixos-modules/qemu-vm.nix
{ {
key = "no-manual"; key = "no-manual";
documentation.nixos.enable = false; documentation.nixos.enable = false;
@ -40,10 +43,8 @@ rec {
_module.args.nodes = nodes; _module.args.nodes = nodes;
} }
{ {
nixpkgs = { system.build.qemu = qemu;
inherit system crossSystem localSystem; nixpkgs = { inherit system crossSystem localSystem pkgs; };
pkgs = flake.legacyPackages.${system};
};
} }
]; ];
}; };

View File

@ -0,0 +1,235 @@
{ flake, system, localSystem, crossSystem, pkgs
# Modules to add to each VM
, extraConfigurations ? [ ] }:
with import ./build-vms.nix {
inherit system localSystem crossSystem pkgs extraConfigurations;
modulesPath = "${flake.inputs.nixpkgs}/nixos/modules";
};
with pkgs.buildPackages.buildPackages;
rec {
inherit pkgs;
testDriver = let testDriverScript = ./test-driver.py;
in stdenv.mkDerivation {
name = "nixos-test-driver";
nativeBuildInputs = [ makeWrapper ];
buildInputs = [ (python3.withPackages (p: [ p.ptpython ])) ];
checkInputs = with python3Packages; [ pylint mypy ];
dontUnpack = true;
preferLocalBuild = true;
doCheck = true;
checkPhase = ''
mypy --disallow-untyped-defs \
--no-implicit-optional \
--ignore-missing-imports ${testDriverScript}
pylint --errors-only ${testDriverScript}
'';
installPhase = ''
mkdir -p $out/bin
cp ${testDriverScript} $out/bin/nixos-test-driver
chmod u+x $out/bin/nixos-test-driver
# TODO: copy user script part into this file (append)
wrapProgram $out/bin/nixos-test-driver \
--prefix PATH : "${
lib.makeBinPath [ qemu_test vde2 netpbm coreutils ]
}" \
'';
};
# Run an automated test suite in the given virtual network.
# `driver' is the script that runs the network.
runTests = driver:
stdenv.mkDerivation {
name = "vm-test-run-${driver.testName}";
requiredSystemFeatures = [ "nixos-test" ];
buildCommand = ''
mkdir -p $out
LOGFILE=/dev/null tests='exec(os.environ["testScript"])' ${driver}/bin/nixos-test-driver
'';
};
defaultTestScript = ''
start_all()
machine.wait_until_serial_output('child "init" exited with exit value 0')
'';
makeTest = { testScript ? defaultTestScript, enableOCR ? false, name ?
"unnamed"
# Skip linting (mainly intended for faster dev cycles)
, skipLint ? false, ... }@t:
let
testDriverName = "genode-test-driver-${name}";
nodes = buildVirtualNetwork
(t.nodes or (if t ? machine then { machine = t.machine; } else { }));
testScript' =
# Call the test script with the computed nodes.
if lib.isFunction testScript then
testScript { inherit nodes; }
else
testScript;
vlans = map (m: m.config.virtualisation.vlans) (lib.attrValues nodes);
vms = map (m: m.config.system.build.vm) (lib.attrValues nodes);
ocrProg = tesseract4.override { enableLanguages = [ "eng" ]; };
imagemagick_tiff = imagemagick_light.override { inherit libtiff; };
# Generate convenience wrappers for running the test driver
# interactively with the specified network, and for starting the
# VMs from the command line.
driver =
let warn = if skipLint then lib.warn "Linting is disabled!" else lib.id;
in warn (runCommand testDriverName {
buildInputs = [ makeWrapper ];
testScript = testScript';
preferLocalBuild = true;
testName = name;
} ''
mkdir -p $out/bin
echo -n "$testScript" > $out/test-script
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/
vms=($(for i in ${toString vms}; do echo $i/bin/run-*-vm; done))
wrapProgram $out/bin/nixos-test-driver \
--add-flags "''${vms[*]}" \
${
lib.optionalString enableOCR
"--prefix PATH : '${ocrProg}/bin:${imagemagick_tiff}/bin'"
} \
--run "export testScript=\"\$(${coreutils}/bin/cat $out/test-script)\"" \
--set VLANS '${toString vlans}'
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-run-vms
wrapProgram $out/bin/nixos-run-vms \
--add-flags "''${vms[*]}" \
${lib.optionalString enableOCR "--prefix PATH : '${ocrProg}/bin'"} \
--set tests 'start_all(); join_all();' \
--set VLANS '${toString vlans}' \
${
lib.optionalString (builtins.length vms == 1) "--set USE_SERIAL 1"
}
''); # "
passMeta = drv:
drv
// lib.optionalAttrs (t ? meta) { meta = (drv.meta or { }) // t.meta; };
test = passMeta (runTests driver);
nodeNames = builtins.attrNames nodes;
invalidNodeNames =
lib.filter (node: builtins.match "^[A-z_]([A-z0-9_]+)?$" node == null)
nodeNames;
in if lib.length invalidNodeNames > 0 then
throw ''
Cannot create machines out of (${
lib.concatStringsSep ", " invalidNodeNames
})!
All machines are referenced as python variables in the testing framework which will break the
script when special characters are used.
Please stick to alphanumeric chars and underscores as separation.
''
else
test // { inherit nodes driver test; };
runInMachine = { drv, machine, preBuild ? "", postBuild ? "", ... # ???
}:
let
vm = buildVM { } [
machine
{
key = "run-in-machine";
networking.hostName = "client";
nix.readOnlyStore = false;
virtualisation.writableStore = false;
}
];
buildrunner = writeText "vm-build" ''
source $1
${coreutils}/bin/mkdir -p $TMPDIR
cd $TMPDIR
exec $origBuilder $origArgs
'';
testScript = ''
start_all()
client.wait_for_unit("multi-user.target")
${preBuild}
client.succeed("env -i ${bash}/bin/bash ${buildrunner} /tmp/xchg/saved-env >&2")
${postBuild}
client.succeed("sync") # flush all data before pulling the plug
'';
vmRunCommand = writeText "vm-run" ''
xchg=vm-state-client/xchg
${coreutils}/bin/mkdir $out
${coreutils}/bin/mkdir -p $xchg
for i in $passAsFile; do
i2=''${i}Path
_basename=$(${coreutils}/bin/basename ''${!i2})
${coreutils}/bin/cp ''${!i2} $xchg/$_basename
eval $i2=/tmp/xchg/$_basename
${coreutils}/bin/ls -la $xchg
done
unset i i2 _basename
export | ${gnugrep}/bin/grep -v '^xchg=' > $xchg/saved-env
unset xchg
export tests='${testScript}'
${testDriver}/bin/nixos-test-driver ${vm.config.system.build.vm}/bin/run-*-vm
''; # */
in lib.overrideDerivation drv (attrs: {
requiredSystemFeatures = [ "kvm" ];
builder = "${bash}/bin/sh";
args = [ "-e" vmRunCommand ];
origArgs = attrs.args;
origBuilder = attrs.builder;
});
runInMachineWithX = { require ? [ ], ... }@args:
let
client = { ... }: {
inherit require;
imports = [ ../tests/common/auto.nix ];
virtualisation.memorySize = 1024;
services.xserver.enable = true;
test-support.displayManager.auto.enable = true;
services.xserver.displayManager.defaultSession = "none+icewm";
services.xserver.windowManager.icewm.enable = true;
};
in runInMachine ({
machine = client;
preBuild = ''
client.wait_for_x()
'';
} // args);
simpleTest = as: (makeTest as).test;
}

13
tests/lighttpd.nix Normal file
View File

@ -0,0 +1,13 @@
{
name = "lighttpd";
nodes = {
webserver = {
imports = [ ../nixos-modules/hardware.nix ];
services.lighttpd.enable = true;
};
client = {
imports = [ ../nixos-modules/hardware.nix ];
genode.hardware.nic.eth0.driver = "virtio";
};
};
}

View File

@ -1,9 +1,6 @@
{ {
name = "log"; name = "log";
machine = { config, pkgs, ... }: { machine = { config, pkgs, ... }: {
imports = [
../nixos-modules/genode-init.nix
];
genode.init = { genode.init = {
config = ./log.dhall; config = ./log.dhall;
inputs = [ (pkgs.genodeSources.depot "test-log") ]; inputs = [ (pkgs.genodeSources.depot "test-log") ];

View File

@ -1,4 +1,6 @@
let Test = ./test.dhall ? env:DHALL_GENODE_TEST let Test =
./test.dhall sha256:00e0b73a23e1f131a2e5af36a34bc85b31b4fb6597ea3772dee9c536929ea166
? env:DHALL_GENODE_TEST sha256:00e0b73a23e1f131a2e5af36a34bc85b31b4fb6597ea3772dee9c536929ea166
let Genode = Test.Genode let Genode = Test.Genode

View File

@ -1,4 +1,5 @@
let Genode = env:DHALL_GENODE let Genode =
env:DHALL_GENODE sha256:e90438be23b5100003cf018b783986df67bc6d0e3d35e800677d0d9109ff6aa9
let Prelude = Genode.Prelude let Prelude = Genode.Prelude

View File

@ -1,13 +1,12 @@
{ pkgs, ... }: { {
name = "x86"; name = "x86";
constraints = builtins.any (spec: spec == "x86"); constraints = builtins.any (spec: spec == "x86");
machine = { machine = { config, pkgs, ... }: {
genode.init = {
config = ./x86.dhall; config = ./x86.dhall;
inputs = (map pkgs.genodeSources.depot [ inputs = with pkgs.genodePackages;
"acpi_drv" [ acpi_drv platform_drv report_rom test-signal ]
"platform_drv" ++ (map genodeSources.make [ "test/pci" "test/rtc" ]);
"report_rom" };
"test-signal"
]) ++ (map pkgs.genodeSources.make [ "test/pci" "test/rtc" ]);
}; };
} }