diff --git a/flake.nix b/flake.nix
index 3edf95e..33924c6 100644
--- a/flake.nix
+++ b/flake.nix
@@ -65,9 +65,7 @@
forAllCrossSystems ({ system, localSystem, crossSystem }:
nixpkgs.lib // (import ./lib {
inherit system localSystem crossSystem;
- localPackages = nixpkgs.legacyPackages.${localSystem};
- genodepkgs = self;
- nixpkgs = nixpkgsFor.${system};
+ pkgs = self.legacyPackages.${system};
}));
legacyPackages =
@@ -139,15 +137,12 @@
checks =
# Checks for continous testing
- with (forAllCrossSystems ({ system, localSystem, crossSystem }:
- import ./tests {
- inherit self;
- apps = self.apps.${system};
- localPackages = nixpkgsFor.${localSystem};
- genodepkgs = self.packages.${system};
- lib = self.lib.${system};
- nixpkgs = nixpkgsFor.${system};
- legacyPackages = self.legacyPackages.${system};
+ let tests = import ./tests;
+ in with (forAllCrossSystems ({ system, localSystem, crossSystem }:
+ tests {
+ flake = self;
+ inherit system localSystem crossSystem;
+ pkgs = self.legacyPackages.${system};
} // {
ports = nixpkgsFor.${localSystem}.symlinkJoin {
name = "ports";
diff --git a/lib/default.nix b/lib/default.nix
index cc57fc5..a489f01 100644
--- a/lib/default.nix
+++ b/lib/default.nix
@@ -1,22 +1,22 @@
-{ system, localSystem, crossSystem, genodepkgs, nixpkgs, localPackages }:
+{ system, localSystem, crossSystem, pkgs }:
let
- thisSystem = builtins.getAttr system;
- inherit (nixpkgs) buildPackages;
- testPkgs = thisSystem genodepkgs.packages;
+ inherit (pkgs) buildPackages;
+ localPackages = pkgs.buildPackages.buildPackages;
+ inherit (pkgs.genodePackages) dhallGenode genodeSources;
dhallCachePrelude = ''
export XDG_CACHE_HOME=$NIX_BUILD_TOP
- export DHALL_GENODE="${testPkgs.dhallGenode}/binary.dhall";
+ export DHALL_GENODE="${dhallGenode}/binary.dhall";
${buildPackages.xorg.lndir}/bin/lndir -silent \
- ${testPkgs.dhallGenode}/.cache \
+ ${dhallGenode}/.cache \
$XDG_CACHE_HOME
'';
in rec {
runDhallCommand = name: env: script:
- nixpkgs.runCommand name (env // {
+ pkgs.runCommand name (env // {
nativeBuildInputs = [ localPackages.dhall ]
++ env.nativeBuildInputs or [ ];
}) ''
@@ -42,7 +42,7 @@ in rec {
hwImage = coreLinkAddr: bootstrapLinkAddr: basePkg: name:
{ gzip ? false, ... }@env:
boot:
- nixpkgs.stdenv.mkDerivation {
+ pkgs.stdenv.mkDerivation {
name = name + "-hw-image";
build = compileBoot name env boot;
nativeBuildInputs = [ localPackages.dhall ];
@@ -74,7 +74,7 @@ in rec {
LD="${buildPackages.binutils}/bin/${buildPackages.binutils.targetPrefix}ld"
$LD \
--strip-all \
- -T${testPkgs.genodeSources}/repos/base/src/ld/genode.ld \
+ -T${genodeSources}/repos/base/src/ld/genode.ld \
-z max-page-size=0x1000 \
-Ttext=$link_address -gc-sections \
"$lib" "boot_modules.o" \
@@ -96,13 +96,13 @@ in rec {
bootstrap/modules_asm \
${bootstrapLinkAddr} \
$out/image.elf
- '' + nixpkgs.lib.optionalString gzip "gzip $out/image.elf";
+ '' + pkgs.lib.optionalString gzip "gzip $out/image.elf";
};
novaImage = name:
{ gzip ? false, ... }@env:
boot:
- nixpkgs.stdenv.mkDerivation {
+ pkgs.stdenv.mkDerivation {
name = name + "-nova-image";
build = compileBoot name env boot;
@@ -115,17 +115,17 @@ in rec {
# link final image
LD="${buildPackages.binutils}/bin/${buildPackages.binutils.targetPrefix}ld"
$LD --strip-all -nostdlib \
- -T${testPkgs.genodeSources}/repos/base/src/ld/genode.ld \
- -T${testPkgs.genodeSources}/repos/base-nova/src/core/core-bss.ld \
+ -T${genodeSources}/repos/base/src/ld/genode.ld \
+ -T${genodeSources}/repos/base-nova/src/core/core-bss.ld \
-z max-page-size=0x1000 \
-Ttext=0x100000 -gc-sections \
- "${testPkgs.base-nova.coreObj}" boot_modules.o \
+ "${pkgs.genodePackages.base-nova.coreObj}" boot_modules.o \
-o $out/image.elf
- '' + nixpkgs.lib.optionalString gzip "gzip $out/image.elf";
+ '' + pkgs.lib.optionalString gzip "gzip $out/image.elf";
};
mergeManifests = inputs:
- nixpkgs.writeTextFile {
+ pkgs.writeTextFile {
name = "manifest.dhall";
text = with builtins;
let
@@ -133,7 +133,7 @@ in rec {
if hasAttr "manifest" input then
''
${head}, { mapKey = "${
- nixpkgs.lib.getName input
+ pkgs.lib.getName input
}", mapValue = ${input.manifest} }''
else
abort "${input.pname} does not have a manifest";
diff --git a/nixos-modules/base-hw-pc.nix b/nixos-modules/base-hw-pc.nix
new file mode 100644
index 0000000..c66589c
--- /dev/null
+++ b/nixos-modules/base-hw-pc.nix
@@ -0,0 +1,27 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+ localPackages = pkgs.buildPackages;
+ utils = import ../lib {
+ inherit (config.nixpkgs) system localSystem crossSystem;
+ inherit pkgs;
+ };
+in {
+ genode.core = {
+ prefix = "hw-pc-";
+ supportedSystems = [ "x86_64-genode" ];
+ basePackages = with pkgs.genodePackages; [ base-hw-pc rtc_drv ];
+ };
+
+ genode.boot = {
+
+ initrd = "${config.genode.boot.image}/image.elf";
+
+ image = utils.hwImage "0xffffffc000000000" "0x00200000"
+ pkgs.genodePackages.base-hw-pc config.system.name { }
+ config.genode.boot.configFile;
+
+ };
+
+}
diff --git a/nixos-modules/base-hw-virt_qemu.nix b/nixos-modules/base-hw-virt_qemu.nix
new file mode 100644
index 0000000..c11b859
--- /dev/null
+++ b/nixos-modules/base-hw-virt_qemu.nix
@@ -0,0 +1,27 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+ localPackages = pkgs.buildPackages;
+ utils = import ../lib {
+ inherit (config.nixpkgs) system localSystem crossSystem;
+ inherit pkgs;
+ };
+in {
+ genode.core = {
+ prefix = "hw-virt_qemu";
+ supportedSystems = [ "aarch64-genode" ];
+ basePackages = with pkgs.genodePackages; [ base-hw-virt_qemu rtc-dummy ];
+ };
+
+ genode.boot = {
+
+ initrd = "${config.genode.boot.image}/image.elf";
+
+ image = utils.hwImage "0xffffffc000000000" "0x40000000"
+ pkgs.genodePackages.base-hw-virt_qemu config.system.name { }
+ config.genode.boot.configFile;
+
+ };
+
+}
diff --git a/nixos-modules/genode-core.nix b/nixos-modules/genode-core.nix
new file mode 100644
index 0000000..3cf5fc3
--- /dev/null
+++ b/nixos-modules/genode-core.nix
@@ -0,0 +1,157 @@
+{ config, pkgs, lib, modulesPath, ... }:
+
+with lib;
+let localPackages = pkgs.buildPackages;
+in {
+ options.genode = {
+ core = {
+
+ prefix = mkOption {
+ type = types.str;
+ example = "hw-pc-";
+ };
+
+ supportedSystems = mkOption {
+ type = types.listOf types.str;
+ example = [ "i686-genode" "x86_64-genode" ];
+ };
+
+ basePackages = mkOption { type = types.listOf types.package; };
+
+ };
+
+ boot = {
+
+ kernel = mkOption {
+ type = types.path;
+ default = "${pkgs.genodePackages.bender}/bender";
+ };
+
+ initrd = mkOption {
+ type = types.str;
+ default = "${pkgs.genodePackages.bender}/bender";
+ description = "Path to an image or a command-line arguments";
+ };
+
+ configFile = mkOption {
+ type = types.path;
+ description = ''
+ Dhall boot configuration. See
+ https://git.sr.ht/~ehmry/dhall-genode/tree/master/Boot/package.dhall
+ '';
+ };
+
+ image = mkOption {
+ type = types.path;
+ description =
+ "Boot image containing the base component binaries and configuration.";
+ };
+
+ romModules = mkOption {
+ type = types.attrsOf types.path;
+ description = "Attr set of initial ROM modules";
+ };
+
+ };
+
+ };
+
+ config = let
+ initInputs = unique config.genode.init.inputs;
+
+ addManifest = drv:
+ drv // {
+ manifest =
+ localPackages.runCommand "${drv.name}.dhall" { inherit drv; } ''
+ set -eu
+ echo -n '[' >> $out
+ find $drv/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out
+ ${if builtins.elem "lib" drv.outputs then
+ ''
+ find ${drv.lib}/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out''
+ else
+ ""}
+ echo -n ']' >> $out
+ '';
+ };
+
+ mergeManifests = inputs:
+ localPackages.writeTextFile {
+ name = "manifest.dhall";
+ text = with builtins;
+ let
+ f = head: input:
+ if hasAttr "manifest" input then
+ ''
+ ${head}, { mapKey = "${
+ lib.getName input
+ }", mapValue = ${input.manifest} }''
+ else
+ abort "${input.pname} does not have a manifest";
+ in (foldl' f "[" inputs) + "]";
+ };
+
+ in {
+
+ assertions = [{
+ assertion = builtins.any (s: s == config.nixpkgs.system)
+ config.genode.core.supportedSystems;
+ message = "invalid Genode core for this system";
+ }];
+
+ genode.boot.configFile = let
+ tarball =
+ "${config.system.build.tarball}/tarball/${config.system.build.tarball.fileName}.tar";
+ manifest = mergeManifests (map addManifest
+ (config.genode.core.basePackages ++ [ config.system.build.tarball ]
+ ++ (with pkgs.genodePackages; [ init cached_fs_rom vfs ])));
+ in localPackages.runCommand "boot.dhall" { } ''
+ cat > $out << EOF
+ ${./store-wrapper.dhall}
+ (${config.genode.init.configFile})
+ "${config.system.build.tarball.fileName}.tar"
+ $(stat --format '%s' ${tarball})
+ ${config.system.build.storeManifest} ${manifest}
+ EOF
+ '';
+
+ system.build.storeManifest = mergeManifests (map addManifest initInputs);
+
+ # Create the tarball of the store to live in core ROM
+ system.build.tarball =
+ pkgs.callPackage "${modulesPath}/../lib/make-system-tarball.nix" {
+ contents = [ ];
+ storeContents = [
+ {
+ # assume that the init config will depend
+ # on every store path needed to boot
+ object = config.genode.init.configFile;
+ symlink = "/config.dhall";
+ }
+ {
+ object = pkgs.buildPackages.symlinkJoin {
+ name = config.system.name + ".rom";
+ paths = config.genode.init.inputs;
+ };
+ symlink = "/rom";
+ }
+ ];
+ compressCommand = "cat";
+ compressionExtension = "";
+ };
+
+ system.build.initXml = pkgs.buildPackages.runCommand "init.xml" {
+ nativeBuildInputs = with pkgs.buildPackages; [ dhall xorg.lndir ];
+ DHALL_GENODE = "${pkgs.genodePackages.dhallGenode}/binary.dhall";
+ BOOT_CONFIG = config.genode.boot.configFile;
+ } ''
+ export XDG_CACHE_HOME=$NIX_BUILD_TOP
+ lndir -silent \
+ ${pkgs.genodePackages.dhallGenode}/.cache \
+ $XDG_CACHE_HOME
+ dhall text <<< "(env:DHALL_GENODE).Init.render (env:BOOT_CONFIG).config" > $out
+ '';
+
+ };
+
+}
diff --git a/nixos-modules/genode-init.nix b/nixos-modules/genode-init.nix
new file mode 100644
index 0000000..c6c97a0
--- /dev/null
+++ b/nixos-modules/genode-init.nix
@@ -0,0 +1,111 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+ inputs = mkOption {
+ description = "List of packages to build a ROM store with.";
+ type = types.listOf types.package;
+ };
+in {
+
+ options.genode.init = {
+ inherit inputs;
+
+ configFile = mkOption {
+ description = ''
+ Dhall configuration of this init instance after children have been merged.
+ '';
+ type = types.path;
+ };
+
+ baseConfig = mkOption {
+ description =
+ "Dhall configuration of this init instance before merging children.";
+ type = types.str;
+ default = ''
+ let Genode = env:DHALL_GENODE
+
+ in Genode.Init::{
+ , routes =
+ [ Genode.Init.ServiceRoute.parent "File_system"
+ , Genode.Init.ServiceRoute.parent "Rtc"
+ , Genode.Init.ServiceRoute.parent "Timer"
+ , Genode.Init.ServiceRoute.parent "IRQ"
+ , Genode.Init.ServiceRoute.parent "IO_MEM"
+ , Genode.Init.ServiceRoute.parent "IO_PORT"
+ ]
+ }
+ '';
+ };
+
+ children = mkOption {
+ default = { };
+ type = with types;
+ attrsOf (submodule {
+ options = {
+ inherit inputs;
+ configFile = mkOption {
+ type = types.path;
+ description = ''
+ Dhall configuration of child.
+ See https://git.sr.ht/~ehmry/dhall-genode/tree/master/Init/Child/Type
+ '';
+ };
+ };
+ });
+ };
+
+ subinits = mkOption {
+ default = { };
+ type = with types;
+ attrsOf (submodule {
+ options = {
+ inherit inputs;
+ configFile = mkOption {
+ type = types.path;
+ description = ''
+ Dhall configuration of child init.
+ See https://git.sr.ht/~ehmry/dhall-genode/tree/master/Init/Type
+ '';
+ };
+ };
+ });
+ };
+
+ };
+
+ config = {
+
+ genode.init.inputs = with builtins;
+ [ pkgs.genodePackages.report_rom ] ++ concatLists (catAttrs "inputs"
+ ((attrValues config.genode.init.children)
+ ++ (attrValues config.genode.init.subinits)));
+
+ # TODO: convert the subinits to children
+
+ genode.init.configFile = pkgs.writeText "init.dhall" ''
+ let Genode = env:DHALL_GENODE
+ let baseConfig = ${config.genode.init.baseConfig}
+
+ in baseConfig with children = baseConfig.children # toMap {${
+ concatMapStrings (name:
+ ", `${name}` = (${
+ config.genode.init.children.${name}.configFile
+ } : Genode.Init.Child.Type)")
+ (builtins.attrNames config.genode.init.children)
+ } ${
+ concatMapStrings (name: ''
+ , `${name}` =
+ Genode.Init.toChild
+ (${
+ config.genode.init.subinits.${name}.configFile
+ } : Genode.Init.Type)
+ Genode.Init.Attributes.default
+ '') (builtins.attrNames config.genode.init.subinits)
+ } }
+ '';
+
+ };
+
+}
diff --git a/nixos-modules/hardware.nix b/nixos-modules/hardware.nix
new file mode 100644
index 0000000..e9bff06
--- /dev/null
+++ b/nixos-modules/hardware.nix
@@ -0,0 +1,195 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+ options.networking.interfaces = lib.mkOption {
+ type = with types;
+ attrsOf (submodule ({ ... }: {
+ options.genode = {
+
+ driver = mkOption {
+ type = types.enum [ "ipxe" "virtio" ];
+ default = "ipxe";
+ };
+
+ stack = mkOption {
+ type = types.enum [ "lwip" "lxip" ];
+ default = "lwip";
+ };
+
+ };
+ }));
+ };
+
+ config.genode.init.children = let
+ inherit (builtins) toFile;
+
+ nics = mapAttrs' (name: interface:
+ let name' = "nic." + name;
+ in {
+ name = name';
+ value = {
+ inputs = with pkgs.genodePackages;
+ {
+ ipxe = [ ipxe_nic_drv ];
+ virtio = [ virtio_nic_drv ];
+ }.${interface.genode.driver};
+ configFile = toFile "${name'}.dhall" ''
+ let Genode = env:DHALL_GENODE
+
+ let Init = Genode.Init
+
+ in Init.Child.flat
+ Init.Child.Attributes::{
+ , binary = "virtio_pci_nic"
+ , provides = [ "Nic" ]
+ , resources = Init.Resources::{
+ , caps = 128
+ , ram = Genode.units.MiB 4
+ }
+ , routes = [ Init.ServiceRoute.parent "IO_MEM" ]
+ , config = Init.Config::{
+ , policies =
+ [ Init.Config.Policy::{
+ , service = "Nic"
+ , label =
+ Init.LabelSelector.prefix "sockets.${name}"
+ }
+ ]
+ }
+ }
+ '';
+ };
+ }) config.networking.interfaces;
+
+ sockets = mapAttrs' (name: interface:
+ let name' = "sockets." + name;
+ in {
+ name = name';
+ value = {
+ inputs = with pkgs.genodePackages;
+ {
+ lwip = [ vfs_lwip ];
+ lxip = [ vfs_lixp ];
+ }.${interface.genode.stack};
+ configFile = let ipv4 = builtins.head interface.ipv4.addresses;
+ in toFile "${name'}.dhall" ''
+ let Genode = env:DHALL_GENODE
+
+ let Init = Genode.Init
+
+ in Init.Child.flat
+ Init.Child.Attributes::{
+ , binary = "vfs"
+ , provides = [ "File_system" ]
+ , resources = Init.Resources::{ caps = 128, ram = Genode.units.MiB 16 }
+ , config = Init.Config::{
+ , policies =
+ [ Init.Config.Policy::{
+ , service = "File_system"
+ , label = Init.LabelSelector.suffix "sockets"
+ , attributes = toMap { root = "/" }
+ }
+ ]
+ , content =
+ let XML = Genode.Prelude.XML
+
+ in [ XML.element
+ { name = "vfs"
+ , attributes = XML.emptyAttributes
+ , content =
+ [ XML.leaf
+ { name = "lwip"
+ , attributes = toMap
+ { ip_addr = "${ipv4.address}", netmask = "${
+ if ipv4.prefixLength == 24 then
+ "255.255.255.0"
+ else
+ throw
+ "missing prefix to netmask conversion"
+ }" }
+ }
+ ]
+ }
+ ]
+ }
+ }
+ '';
+ };
+ }) config.networking.interfaces;
+
+ in nics // sockets // {
+
+ platform_drv = {
+ inputs = [ pkgs.genodePackages.platform_drv ];
+ configFile = let
+ policies = concatMapStrings (name: ''
+ Init.Config.Policy::{
+ , service = "Platform"
+ , label = Init.LabelSelector.prefix "nic.${name}"
+ , content =
+ [ Genode.Prelude.XML.leaf
+ { name = "pci", attributes = toMap { class = "ETHERNET" } }
+ ]
+ }
+ '') (builtins.attrNames config.networking.interfaces);
+ in toFile "platform_drv.dhall" ''
+ let Genode = env:DHALL_GENODE
+
+ let Init = Genode.Init
+
+ let label = \(_ : Text) -> { local = _, route = _ }
+
+ in Init.Child.flat
+ Init.Child.Attributes::{
+ , binary = "platform_drv"
+ , resources = Init.Resources::{
+ , caps = 800
+ , ram = Genode.units.MiB 4
+ , constrainPhys = True
+ }
+ , reportRoms = [ label "acpi" ]
+ , provides = [ "Platform" ]
+ , routes =
+ [ Init.ServiceRoute.parent "IRQ"
+ , Init.ServiceRoute.parent "IO_MEM"
+ , Init.ServiceRoute.parent "IO_PORT"
+ ]
+ , config = Init.Config::{
+ , policies = [ ${policies} ]
+ }
+ }
+ '';
+ };
+
+ acpi_drv = {
+ inputs = [ pkgs.genodePackages.acpi_drv ];
+ configFile = toFile "acpi_drv.dhall" ''
+ let Genode = env:DHALL_GENODE
+
+ let Init = Genode.Init
+
+ let label = \(_ : Text) -> { local = _, route = _ }
+
+ in Init.Child.flat
+ Init.Child.Attributes::{
+ , binary = "acpi_drv"
+ , resources = Init.Resources::{
+ , caps = 400
+ , ram = Genode.units.MiB 4
+ , constrainPhys = True
+ }
+ , romReports = [ label "acpi" ]
+ , routes =
+ [ Init.ServiceRoute.parent "IRQ"
+ , Init.ServiceRoute.parent "IO_MEM"
+ , Init.ServiceRoute.parent "IO_PORT"
+ ]
+ }
+ '';
+ };
+
+ };
+
+}
diff --git a/nixos-modules/nova.nix b/nixos-modules/nova.nix
new file mode 100644
index 0000000..da52d0e
--- /dev/null
+++ b/nixos-modules/nova.nix
@@ -0,0 +1,27 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+ localPackages = pkgs.buildPackages;
+ utils = import ../lib {
+ inherit (config.nixpkgs) system localSystem crossSystem;
+ inherit pkgs;
+ };
+in {
+ genode.core = {
+ prefix = "nova-";
+ supportedSystems = [ "x86_64-genode" ];
+ basePackages = with pkgs.genodePackages; [ base-nova rtc_drv ];
+ };
+
+ genode.boot = {
+
+ initrd =
+ "'${pkgs.genodePackages.NOVA}/hypervisor-x86_64 arg=iommu novpid serial,${config.genode.boot.image}/image.elf'";
+
+ image =
+ utils.novaImage config.system.name { } config.genode.boot.configFile;
+
+ };
+
+}
diff --git a/nixos-modules/qemu-vm.nix b/nixos-modules/qemu-vm.nix
index 42e43f5..9f91cb0 100644
--- a/nixos-modules/qemu-vm.nix
+++ b/nixos-modules/qemu-vm.nix
@@ -1,20 +1,11 @@
-# This module creates a virtual machine from the NixOS configuration.
-# Building the `config.system.build.vm' attribute gives you a command
-# that starts a KVM/QEMU VM running the NixOS configuration defined in
-# `config'. The Nix store is shared read-only with the host, which
-# makes (re)building VMs very efficient. However, it also means you
-# can't reconfigure the guest inside the guest - you need to rebuild
-# the VM in the host. On the other hand, the root filesystem is a
-# read/writable disk image persistent across VM reboots.
-
{ config, lib, pkgs, ... }:
with lib;
-with import ../../lib/qemu-flags.nix { inherit pkgs; };
+with import ../tests/lib/qemu-flags.nix { inherit pkgs; };
let
- qemu = config.system.build.qemu or pkgs.qemu_test;
+ qemu = config.system.build.qemu;
cfg = config.virtualisation;
@@ -31,13 +22,13 @@ let
driveExtraOpts = mkOption {
type = types.attrsOf types.str;
- default = {};
+ default = { };
description = "Extra options passed to drive flag.";
};
deviceExtraOpts = mkOption {
type = types.attrsOf types.str;
- default = {};
+ default = { };
description = "Extra options passed to device flag.";
};
@@ -52,10 +43,11 @@ let
};
- driveCmdline = idx: { file, driveExtraOpts, deviceExtraOpts, ... }:
+ driveCmdline = idx:
+ { file, driveExtraOpts, deviceExtraOpts, ... }:
let
drvId = "drive${toString idx}";
- mkKeyValue = generators.mkKeyValueDefault {} "=";
+ mkKeyValue = generators.mkKeyValueDefault { } "=";
mkOpts = opts: concatStringsSep "," (mapAttrsToList mkKeyValue opts);
driveOpts = mkOpts (driveExtraOpts // {
index = idx;
@@ -63,20 +55,15 @@ let
"if" = "none";
inherit file;
});
- deviceOpts = mkOpts (deviceExtraOpts // {
- drive = drvId;
- });
- device =
- if cfg.qemu.diskInterface == "scsi" then
- "-device lsi53c895a -device scsi-hd,${deviceOpts}"
- else
- "-device virtio-blk-pci,${deviceOpts}";
- in
- "-drive ${driveOpts} ${device}";
+ deviceOpts = mkOpts (deviceExtraOpts // { drive = drvId; });
+ device = if cfg.qemu.diskInterface == "scsi" then
+ "-device lsi53c895a -device scsi-hd,${deviceOpts}"
+ else
+ "-device virtio-blk-pci,${deviceOpts}";
+ in "-drive ${driveOpts} ${device}";
drivesCmdLine = drives: concatStringsSep " " (imap1 driveCmdline drives);
-
# Creates a device name from a 1-based a numerical index, e.g.
# * `driveDeviceName 1` -> `/dev/vda`
# * `driveDeviceName 2` -> `/dev/vdb`
@@ -95,324 +82,279 @@ let
addDeviceNames =
imap1 (idx: drive: drive // { device = driveDeviceName idx; });
- efiPrefix =
- if (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) then "${pkgs.OVMF.fd}/FV/OVMF"
- else if pkgs.stdenv.isAarch64 then "${pkgs.OVMF.fd}/FV/AAVMF"
- else throw "No EFI firmware available for platform";
+ efiPrefix = if (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) then
+ "${pkgs.OVMF.fd}/FV/OVMF"
+ else if pkgs.stdenv.isAarch64 then
+ "${pkgs.OVMF.fd}/FV/AAVMF"
+ else
+ throw "No EFI firmware available for platform";
efiFirmware = "${efiPrefix}_CODE.fd";
efiVarsDefault = "${efiPrefix}_VARS.fd";
# Shell script to start the VM.
- startVM =
- ''
- #! ${pkgs.runtimeShell}
+ startVM = ''
+ #! ${pkgs.buildPackages.runtimeShell}
- NIX_DISK_IMAGE=$(readlink -f ''${NIX_DISK_IMAGE:-${config.virtualisation.diskImage}})
+ NIX_DISK_IMAGE=$(readlink -f ''${NIX_DISK_IMAGE:-${config.virtualisation.diskImage}})
- if ! test -e "$NIX_DISK_IMAGE"; then
- ${qemu}/bin/qemu-img create -f qcow2 "$NIX_DISK_IMAGE" \
- ${toString config.virtualisation.diskSize}M || exit 1
- fi
+ if ! test -e "$NIX_DISK_IMAGE"; then
+ ${qemu}/bin/qemu-img create -f qcow2 "$NIX_DISK_IMAGE" \
+ ${toString config.virtualisation.diskSize}M || exit 1
+ fi
- # Create a directory for storing temporary data of the running VM.
- if [ -z "$TMPDIR" -o -z "$USE_TMPDIR" ]; then
- TMPDIR=$(mktemp -d nix-vm.XXXXXXXXXX --tmpdir)
- fi
+ # Create a directory for storing temporary data of the running VM.
+ if [ -z "$TMPDIR" -o -z "$USE_TMPDIR" ]; then
+ TMPDIR=$(mktemp -d nix-vm.XXXXXXXXXX --tmpdir)
+ fi
- # Create a directory for exchanging data with the VM.
- mkdir -p $TMPDIR/xchg
+ # Create a directory for exchanging data with the VM.
+ mkdir -p $TMPDIR/xchg
- ${if cfg.useBootLoader then ''
- # Create a writable copy/snapshot of the boot disk.
- # A writable boot disk can be booted from automatically.
- ${qemu}/bin/qemu-img create -f qcow2 -b ${bootDisk}/disk.img $TMPDIR/disk.img || exit 1
+ ${if cfg.useBootLoader then ''
+ # Create a writable copy/snapshot of the boot disk.
+ # A writable boot disk can be booted from automatically.
+ ${qemu}/bin/qemu-img create -f qcow2 -b ${bootDisk}/disk.img $TMPDIR/disk.img || exit 1
- NIX_EFI_VARS=$(readlink -f ''${NIX_EFI_VARS:-${cfg.efiVars}})
+ NIX_EFI_VARS=$(readlink -f ''${NIX_EFI_VARS:-${cfg.efiVars}})
- ${if cfg.useEFIBoot then ''
- # VM needs writable EFI vars
- if ! test -e "$NIX_EFI_VARS"; then
- cp ${bootDisk}/efi-vars.fd "$NIX_EFI_VARS" || exit 1
- chmod 0644 "$NIX_EFI_VARS" || exit 1
- fi
- '' else ''
- ''}
- '' else ''
- ''}
-
- cd $TMPDIR
- idx=0
- ${flip concatMapStrings cfg.emptyDiskImages (size: ''
- if ! test -e "empty$idx.qcow2"; then
- ${qemu}/bin/qemu-img create -f qcow2 "empty$idx.qcow2" "${toString size}M"
+ ${if cfg.useEFIBoot then ''
+ # VM needs writable EFI vars
+ if ! test -e "$NIX_EFI_VARS"; then
+ cp ${bootDisk}/efi-vars.fd "$NIX_EFI_VARS" || exit 1
+ chmod 0644 "$NIX_EFI_VARS" || exit 1
fi
- idx=$((idx + 1))
- '')}
+ '' else
+ ""}
+ '' else
+ ""}
- # Start QEMU.
- exec ${qemuBinary qemu} \
- -name ${config.system.name} \
- -m ${toString config.virtualisation.memorySize} \
- -smp ${toString config.virtualisation.cores} \
- -device virtio-rng-pci \
- ${concatStringsSep " " config.virtualisation.qemu.networkingOptions} \
- -virtfs local,path=/nix/store,security_model=none,mount_tag=store \
- -virtfs local,path=$TMPDIR/xchg,security_model=none,mount_tag=xchg \
- -virtfs local,path=''${SHARED_DIR:-$TMPDIR/xchg},security_model=none,mount_tag=shared \
- ${drivesCmdLine config.virtualisation.qemu.drives} \
- ${toString config.virtualisation.qemu.options} \
- $QEMU_OPTS \
- "$@"
- '';
+ cd $TMPDIR
+ idx=0
+ ${flip concatMapStrings cfg.emptyDiskImages (size: ''
+ if ! test -e "empty$idx.qcow2"; then
+ ${qemu}/bin/qemu-img create -f qcow2 "empty$idx.qcow2" "${
+ toString size
+ }M"
+ fi
+ idx=$((idx + 1))
+ '')}
+ # Start QEMU.
+ exec ${qemuBinary qemu} \
+ -name ${config.system.name} \
+ -m ${toString config.virtualisation.memorySize} \
+ -smp ${toString config.virtualisation.cores} \
+ -device virtio-rng-pci \
+ ${concatStringsSep " " config.virtualisation.qemu.networkingOptions} \
+ -virtfs local,path=/nix/store,security_model=none,mount_tag=store \
+ -virtfs local,path=$TMPDIR/xchg,security_model=none,mount_tag=xchg \
+ -virtfs local,path=''${SHARED_DIR:-$TMPDIR/xchg},security_model=none,mount_tag=shared \
+ ${drivesCmdLine config.virtualisation.qemu.drives} \
+ ${toString config.virtualisation.qemu.options} \
+ $QEMU_OPTS \
+ "$@"
+ '';
- regInfo = pkgs.closureInfo { rootPaths = config.virtualisation.pathsInNixDB; };
-
+ regInfo =
+ pkgs.closureInfo { rootPaths = config.virtualisation.pathsInNixDB; };
# Generate a hard disk image containing a /boot partition and GRUB
# in the MBR. Used when the `useBootLoader' option is set.
# Uses `runInLinuxVM` to create the image in a throwaway VM.
# See note [Disk layout with `useBootLoader`].
# FIXME: use nixos/lib/make-disk-image.nix.
- bootDisk =
- pkgs.vmTools.runInLinuxVM (
- pkgs.runCommand "nixos-boot-disk"
- { preVM =
- ''
- mkdir $out
- diskImage=$out/disk.img
- ${qemu}/bin/qemu-img create -f qcow2 $diskImage "60M"
- ${if cfg.useEFIBoot then ''
- efiVars=$out/efi-vars.fd
- cp ${efiVarsDefault} $efiVars
- chmod 0644 $efiVars
- '' else ''
- ''}
- '';
- buildInputs = [ pkgs.utillinux ];
- QEMU_OPTS = "-nographic -serial stdio -monitor none"
- + lib.optionalString cfg.useEFIBoot (
- " -drive if=pflash,format=raw,unit=0,readonly=on,file=${efiFirmware}"
- + " -drive if=pflash,format=raw,unit=1,file=$efiVars");
- }
- ''
- # Create a /boot EFI partition with 60M and arbitrary but fixed GUIDs for reproducibility
- ${pkgs.gptfdisk}/bin/sgdisk \
- --set-alignment=1 --new=1:34:2047 --change-name=1:BIOSBootPartition --typecode=1:ef02 \
- --set-alignment=512 --largest-new=2 --change-name=2:EFISystem --typecode=2:ef00 \
- --attributes=1:set:1 \
- --attributes=2:set:2 \
- --disk-guid=97FD5997-D90B-4AA3-8D16-C1723AEA73C1 \
- --partition-guid=1:1C06F03B-704E-4657-B9CD-681A087A2FDC \
- --partition-guid=2:970C694F-AFD0-4B99-B750-CDB7A329AB6F \
- --hybrid 2 \
- --recompute-chs /dev/vda
+ bootDisk = pkgs.vmTools.runInLinuxVM (pkgs.runCommand "nixos-boot-disk" {
+ preVM = ''
+ mkdir $out
+ diskImage=$out/disk.img
+ ${qemu}/bin/qemu-img create -f qcow2 $diskImage "60M"
+ ${if cfg.useEFIBoot then ''
+ efiVars=$out/efi-vars.fd
+ cp ${efiVarsDefault} $efiVars
+ chmod 0644 $efiVars
+ '' else
+ ""}
+ '';
+ buildInputs = [ pkgs.utillinux ];
+ QEMU_OPTS = "-nographic -serial stdio -monitor none"
+ + lib.optionalString cfg.useEFIBoot
+ (" -drive if=pflash,format=raw,unit=0,readonly=on,file=${efiFirmware}"
+ + " -drive if=pflash,format=raw,unit=1,file=$efiVars");
+ } ''
+ # Create a /boot EFI partition with 60M and arbitrary but fixed GUIDs for reproducibility
+ ${pkgs.gptfdisk}/bin/sgdisk \
+ --set-alignment=1 --new=1:34:2047 --change-name=1:BIOSBootPartition --typecode=1:ef02 \
+ --set-alignment=512 --largest-new=2 --change-name=2:EFISystem --typecode=2:ef00 \
+ --attributes=1:set:1 \
+ --attributes=2:set:2 \
+ --disk-guid=97FD5997-D90B-4AA3-8D16-C1723AEA73C1 \
+ --partition-guid=1:1C06F03B-704E-4657-B9CD-681A087A2FDC \
+ --partition-guid=2:970C694F-AFD0-4B99-B750-CDB7A329AB6F \
+ --hybrid 2 \
+ --recompute-chs /dev/vda
- ${optionalString (config.boot.loader.grub.device != "/dev/vda")
- # In this throwaway VM, we only have the /dev/vda disk, but the
- # actual VM described by `config` (used by `switch-to-configuration`
- # below) may set `boot.loader.grub.device` to a different device
- # that's nonexistent in the throwaway VM.
- # Create a symlink for that device, so that the `grub-install`
- # by `switch-to-configuration` will hit /dev/vda anyway.
- ''
- ln -s /dev/vda ${config.boot.loader.grub.device}
- ''
- }
+ ${optionalString (config.boot.loader.grub.device != "/dev/vda")
+ # In this throwaway VM, we only have the /dev/vda disk, but the
+ # actual VM described by `config` (used by `switch-to-configuration`
+ # below) may set `boot.loader.grub.device` to a different device
+ # that's nonexistent in the throwaway VM.
+ # Create a symlink for that device, so that the `grub-install`
+ # by `switch-to-configuration` will hit /dev/vda anyway.
+ ''
+ ln -s /dev/vda ${config.boot.loader.grub.device}
+ ''}
- ${pkgs.dosfstools}/bin/mkfs.fat -F16 /dev/vda2
- export MTOOLS_SKIP_CHECK=1
- ${pkgs.mtools}/bin/mlabel -i /dev/vda2 ::boot
+ ${pkgs.dosfstools}/bin/mkfs.fat -F16 /dev/vda2
+ export MTOOLS_SKIP_CHECK=1
+ ${pkgs.mtools}/bin/mlabel -i /dev/vda2 ::boot
- # Mount /boot; load necessary modules first.
- ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/nls/nls_cp437.ko.xz || true
- ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/nls/nls_iso8859-1.ko.xz || true
- ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/fat/fat.ko.xz || true
- ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/fat/vfat.ko.xz || true
- ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/efivarfs/efivarfs.ko.xz || true
- mkdir /boot
- mount /dev/vda2 /boot
+ mkdir /boot
+ mount /dev/vda2 /boot
- ${optionalString config.boot.loader.efi.canTouchEfiVariables ''
- mount -t efivarfs efivarfs /sys/firmware/efi/efivars
- ''}
+ ${optionalString config.boot.loader.efi.canTouchEfiVariables ''
+ mount -t efivarfs efivarfs /sys/firmware/efi/efivars
+ ''}
- # This is needed for GRUB 0.97, which doesn't know about virtio devices.
- mkdir /boot/grub
- echo '(hd0) /dev/vda' > /boot/grub/device.map
+ # This is needed for GRUB 0.97, which doesn't know about virtio devices.
+ mkdir /boot/grub
+ echo '(hd0) /dev/vda' > /boot/grub/device.map
- # This is needed for systemd-boot to find ESP, and udev is not available here to create this
- mkdir -p /dev/block
- ln -s /dev/vda2 /dev/block/254:2
+ # This is needed for systemd-boot to find ESP, and udev is not available here to create this
+ mkdir -p /dev/block
+ ln -s /dev/vda2 /dev/block/254:2
- # Set up system profile (normally done by nixos-rebuild / nix-env --set)
- mkdir -p /nix/var/nix/profiles
- ln -s ${config.system.build.toplevel} /nix/var/nix/profiles/system-1-link
- ln -s /nix/var/nix/profiles/system-1-link /nix/var/nix/profiles/system
+ # Install bootloader
+ touch /etc/NIXOS
+ export NIXOS_INSTALL_BOOTLOADER=1
+ ${config.system.build.toplevel}/bin/switch-to-configuration boot
- # Install bootloader
- touch /etc/NIXOS
- export NIXOS_INSTALL_BOOTLOADER=1
- ${config.system.build.toplevel}/bin/switch-to-configuration boot
-
- umount /boot
- '' # */
- );
-
-in
-
-{
- imports = [
- ../profiles/qemu-guest.nix
- ];
+ umount /boot
+ '' # */
+ );
+in {
options = {
- virtualisation.memorySize =
- mkOption {
- default = 384;
- description =
- ''
- Memory size (M) of virtual machine.
- '';
- };
+ virtualisation.memorySize = mkOption {
+ default = 384;
+ description = ''
+ Memory size (M) of virtual machine.
+ '';
+ };
- virtualisation.diskSize =
- mkOption {
- default = 512;
- description =
- ''
- Disk size (M) of virtual machine.
- '';
- };
+ virtualisation.diskSize = mkOption {
+ default = 512;
+ description = ''
+ Disk size (M) of virtual machine.
+ '';
+ };
- virtualisation.diskImage =
- mkOption {
- default = "./${config.system.name}.qcow2";
- description =
- ''
- Path to the disk image containing the root filesystem.
- The image will be created on startup if it does not
- exist.
- '';
- };
+ virtualisation.diskImage = mkOption {
+ default = "./${config.system.name}.qcow2";
+ description = ''
+ Path to the disk image containing the root filesystem.
+ The image will be created on startup if it does not
+ exist.
+ '';
+ };
- virtualisation.bootDevice =
- mkOption {
- type = types.str;
- example = "/dev/vda";
- description =
- ''
- The disk to be used for the root filesystem.
- '';
- };
+ virtualisation.bootDevice = mkOption {
+ type = types.str;
+ example = "/dev/vda";
+ description = ''
+ The disk to be used for the root filesystem.
+ '';
+ };
- virtualisation.emptyDiskImages =
- mkOption {
- default = [];
- type = types.listOf types.int;
- description =
- ''
- Additional disk images to provide to the VM. The value is
- a list of size in megabytes of each disk. These disks are
- writeable by the VM.
- '';
- };
+ virtualisation.emptyDiskImages = mkOption {
+ default = [ ];
+ type = types.listOf types.int;
+ description = ''
+ Additional disk images to provide to the VM. The value is
+ a list of size in megabytes of each disk. These disks are
+ writeable by the VM.
+ '';
+ };
- virtualisation.graphics =
- mkOption {
- default = true;
- description =
- ''
- Whether to run QEMU with a graphics window, or in nographic mode.
- Serial console will be enabled on both settings, but this will
- change the preferred console.
- '';
- };
+ virtualisation.graphics = mkOption {
+ default = true;
+ description = ''
+ Whether to run QEMU with a graphics window, or in nographic mode.
+ Serial console will be enabled on both settings, but this will
+ change the preferred console.
+ '';
+ };
- virtualisation.cores =
- mkOption {
- default = 1;
- type = types.int;
- description =
- ''
- Specify the number of cores the guest is permitted to use.
- The number can be higher than the available cores on the
- host system.
- '';
- };
+ virtualisation.cores = mkOption {
+ default = 1;
+ type = types.int;
+ description = ''
+ Specify the number of cores the guest is permitted to use.
+ The number can be higher than the available cores on the
+ host system.
+ '';
+ };
- virtualisation.pathsInNixDB =
- mkOption {
- default = [];
- description =
- ''
- The list of paths whose closure is registered in the Nix
- database in the VM. All other paths in the host Nix store
- appear in the guest Nix store as well, but are considered
- garbage (because they are not registered in the Nix
- database in the guest).
- '';
- };
+ virtualisation.pathsInNixDB = mkOption {
+ default = [ ];
+ description = ''
+ The list of paths whose closure is registered in the Nix
+ database in the VM. All other paths in the host Nix store
+ appear in the guest Nix store as well, but are considered
+ garbage (because they are not registered in the Nix
+ database in the guest).
+ '';
+ };
- virtualisation.vlans =
- mkOption {
- default = [ 1 ];
- example = [ 1 2 ];
- description =
- ''
- Virtual networks to which the VM is connected. Each
- number N in this list causes
- the VM to have a virtual Ethernet interface attached to a
- separate virtual network on which it will be assigned IP
- address
- 192.168.N.M,
- where M is the index of this VM
- in the list of VMs.
- '';
- };
+ virtualisation.vlans = mkOption {
+ default = [ 1 ];
+ example = [ 1 2 ];
+ description = ''
+ Virtual networks to which the VM is connected. Each
+ number N in this list causes
+ the VM to have a virtual Ethernet interface attached to a
+ separate virtual network on which it will be assigned IP
+ address
+ 192.168.N.M,
+ where M is the index of this VM
+ in the list of VMs.
+ '';
+ };
- virtualisation.writableStore =
- mkOption {
- default = true; # FIXME
- description =
- ''
- If enabled, the Nix store in the VM is made writable by
- layering an overlay filesystem on top of the host's Nix
- store.
- '';
- };
+ virtualisation.writableStore = mkOption {
+ default = true; # FIXME
+ description = ''
+ If enabled, the Nix store in the VM is made writable by
+ layering an overlay filesystem on top of the host's Nix
+ store.
+ '';
+ };
- virtualisation.writableStoreUseTmpfs =
- mkOption {
- default = true;
- description =
- ''
- Use a tmpfs for the writable store instead of writing to the VM's
- own filesystem.
- '';
- };
+ virtualisation.writableStoreUseTmpfs = mkOption {
+ default = true;
+ description = ''
+ Use a tmpfs for the writable store instead of writing to the VM's
+ own filesystem.
+ '';
+ };
- networking.primaryIPAddress =
- mkOption {
- default = "";
- internal = true;
- description = "Primary IP address used in /etc/hosts.";
- };
+ networking.primaryIPAddress = mkOption {
+ default = "";
+ internal = true;
+ description = "Primary IP address used in /etc/hosts.";
+ };
virtualisation.qemu = {
- options =
- mkOption {
- type = types.listOf types.unspecified;
- default = [];
- example = [ "-vga std" ];
- description = "Options passed to QEMU.";
- };
+ options = mkOption {
+ type = types.listOf types.unspecified;
+ default = [ ];
+ example = [ "-vga std" ];
+ description = "Options passed to QEMU.";
+ };
consoles = mkOption {
type = types.listOf types.str;
- default = let
- consoles = [ "${qemuSerialDevice},115200n8" "tty0" ];
+ default = let consoles = [ "${qemuSerialDevice},115200n8" "tty0" ];
in if cfg.graphics then consoles else reverseList consoles;
example = [ "console=tty1" ];
description = ''
@@ -426,94 +368,82 @@ in
'';
};
- networkingOptions =
- mkOption {
- default = [
- "-net nic,netdev=user.0,model=virtio"
- "-netdev user,id=user.0\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
- ];
- type = types.listOf types.str;
- description = ''
- Networking-related command-line options that should be passed to qemu.
- The default is to use userspace networking (slirp).
+ networkingOptions = mkOption {
+ default = [
+ "-net nic,netdev=user.0,model=virtio"
+ "-netdev user,id=user.0\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
+ ];
+ type = types.listOf types.str;
+ description = ''
+ Networking-related command-line options that should be passed to qemu.
+ The default is to use userspace networking (slirp).
- If you override this option, be advised to keep
- ''${QEMU_NET_OPTS:+,$QEMU_NET_OPTS} (as seen in the default)
- to keep the default runtime behaviour.
- '';
- };
+ If you override this option, be advised to keep
+ ''${QEMU_NET_OPTS:+,$QEMU_NET_OPTS} (as seen in the default)
+ to keep the default runtime behaviour.
+ '';
+ };
- drives =
- mkOption {
- type = types.listOf (types.submodule driveOpts);
- description = "Drives passed to qemu.";
- apply = addDeviceNames;
- };
+ drives = mkOption {
+ type = types.listOf (types.submodule driveOpts);
+ description = "Drives passed to qemu.";
+ apply = addDeviceNames;
+ };
- diskInterface =
- mkOption {
- default = "virtio";
- example = "scsi";
- type = types.enum [ "virtio" "scsi" "ide" ];
- description = "The interface used for the virtual hard disks.";
- };
+ diskInterface = mkOption {
+ default = "virtio";
+ example = "scsi";
+ type = types.enum [ "virtio" "scsi" "ide" ];
+ description = "The interface used for the virtual hard disks.";
+ };
- guestAgent.enable =
- mkOption {
- default = true;
- type = types.bool;
- description = ''
- Enable the Qemu guest agent.
- '';
- };
+ guestAgent.enable = mkOption {
+ default = true;
+ type = types.bool;
+ description = ''
+ Enable the Qemu guest agent.
+ '';
+ };
};
- virtualisation.useBootLoader =
- mkOption {
- default = false;
- description =
- ''
- If enabled, the virtual machine will be booted using the
- regular boot loader (i.e., GRUB 1 or 2). This allows
- testing of the boot loader. If
- disabled (the default), the VM directly boots the NixOS
- kernel and initial ramdisk, bypassing the boot loader
- altogether.
- '';
- };
+ virtualisation.useBootLoader = mkOption {
+ default = false;
+ description = ''
+ If enabled, the virtual machine will be booted using the
+ regular boot loader (i.e., GRUB 1 or 2). This allows
+ testing of the boot loader. If
+ disabled (the default), the VM directly boots the NixOS
+ kernel and initial ramdisk, bypassing the boot loader
+ altogether.
+ '';
+ };
- virtualisation.useEFIBoot =
- mkOption {
- default = false;
- description =
- ''
- If enabled, the virtual machine will provide a EFI boot
- manager.
- useEFIBoot is ignored if useBootLoader == false.
- '';
- };
+ virtualisation.useEFIBoot = mkOption {
+ default = false;
+ description = ''
+ If enabled, the virtual machine will provide a EFI boot
+ manager.
+ useEFIBoot is ignored if useBootLoader == false.
+ '';
+ };
- virtualisation.efiVars =
- mkOption {
- default = "./${config.system.name}-efi-vars.fd";
- description =
- ''
- Path to nvram image containing UEFI variables. The will be created
- on startup if it does not exist.
- '';
- };
+ virtualisation.efiVars = mkOption {
+ default = "./${config.system.name}-efi-vars.fd";
+ description = ''
+ Path to nvram image containing UEFI variables. The will be created
+ on startup if it does not exist.
+ '';
+ };
- virtualisation.bios =
- mkOption {
- default = null;
- type = types.nullOr types.package;
- description =
- ''
- An alternate BIOS (such as qboot) with which to start the VM.
- Should contain a file named bios.bin.
- If null, QEMU's builtin SeaBIOS will be used.
- '';
- };
+ virtualisation.bios = mkOption {
+ default = null;
+ type = types.nullOr types.package;
+ description = ''
+ An alternate BIOS (such as qboot) with which to start the VM.
+ Should contain a file named bios.bin.
+ If null, QEMU's builtin SeaBIOS will be used.
+ '';
+ };
};
@@ -535,92 +465,67 @@ in
# If `useBootLoader`, GRUB goes to the second disk, see
# note [Disk layout with `useBootLoader`].
- boot.loader.grub.device = mkVMOverride (
- if cfg.useBootLoader
- then driveDeviceName 2 # second disk
- else cfg.bootDevice
- );
+ boot.loader.grub.device = mkVMOverride (if cfg.useBootLoader then
+ driveDeviceName 2 # second disk
+ else
+ cfg.bootDevice);
- boot.initrd.extraUtilsCommands =
- ''
- # We need mke2fs in the initrd.
- copy_bin_and_libs ${pkgs.e2fsprogs}/bin/mke2fs
- '';
+ boot.initrd.extraUtilsCommands = ''
+ # We need mke2fs in the initrd.
+ copy_bin_and_libs ${pkgs.e2fsprogs}/bin/mke2fs
+ '';
- boot.initrd.postDeviceCommands =
- ''
- # If the disk image appears to be empty, run mke2fs to
- # initialise.
- FSTYPE=$(blkid -o value -s TYPE ${cfg.bootDevice} || true)
- if test -z "$FSTYPE"; then
- mke2fs -t ext4 ${cfg.bootDevice}
- fi
- '';
+ boot.initrd.postDeviceCommands = ''
+ # If the disk image appears to be empty, run mke2fs to
+ # initialise.
+ FSTYPE=$(blkid -o value -s TYPE ${cfg.bootDevice} || true)
+ if test -z "$FSTYPE"; then
+ mke2fs -t ext4 ${cfg.bootDevice}
+ fi
+ '';
- boot.initrd.postMountCommands =
- ''
- # Mark this as a NixOS machine.
- mkdir -p $targetRoot/etc
- echo -n > $targetRoot/etc/NIXOS
+ boot.initrd.postMountCommands = ''
+ # Mark this as a NixOS machine.
+ mkdir -p $targetRoot/etc
+ echo -n > $targetRoot/etc/NIXOS
- # Fix the permissions on /tmp.
- chmod 1777 $targetRoot/tmp
+ # Fix the permissions on /tmp.
+ chmod 1777 $targetRoot/tmp
- mkdir -p $targetRoot/boot
+ mkdir -p $targetRoot/boot
- ${optionalString cfg.writableStore ''
- echo "mounting overlay filesystem on /nix/store..."
- mkdir -p 0755 $targetRoot/nix/.rw-store/store $targetRoot/nix/.rw-store/work $targetRoot/nix/store
- mount -t overlay overlay $targetRoot/nix/store \
- -o lowerdir=$targetRoot/nix/.ro-store,upperdir=$targetRoot/nix/.rw-store/store,workdir=$targetRoot/nix/.rw-store/work || fail
- ''}
- '';
-
- # After booting, register the closure of the paths in
- # `virtualisation.pathsInNixDB' in the Nix database in the VM. This
- # allows Nix operations to work in the VM. The path to the
- # registration file is passed through the kernel command line to
- # allow `system.build.toplevel' to be included. (If we had a direct
- # reference to ${regInfo} here, then we would get a cyclic
- # dependency.)
- boot.postBootCommands =
- ''
- if [[ "$(cat /proc/cmdline)" =~ regInfo=([^ ]*) ]]; then
- ${config.nix.package.out}/bin/nix-store --load-db < ''${BASH_REMATCH[1]}
- fi
- '';
-
- boot.initrd.availableKernelModules =
- optional cfg.writableStore "overlay"
- ++ optional (cfg.qemu.diskInterface == "scsi") "sym53c8xx";
+ ${optionalString cfg.writableStore ''
+ echo "mounting overlay filesystem on /nix/store..."
+ mkdir -p 0755 $targetRoot/nix/.rw-store/store $targetRoot/nix/.rw-store/work $targetRoot/nix/store
+ mount -t overlay overlay $targetRoot/nix/store \
+ -o lowerdir=$targetRoot/nix/.ro-store,upperdir=$targetRoot/nix/.rw-store/store,workdir=$targetRoot/nix/.rw-store/work || fail
+ ''}
+ '';
virtualisation.bootDevice = mkDefault (driveDeviceName 1);
- virtualisation.pathsInNixDB = [ config.system.build.toplevel ];
-
# FIXME: Consolidate this one day.
virtualisation.qemu.options = mkMerge [
(mkIf (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) [
- "-usb" "-device usb-tablet,bus=usb-bus.0"
+ "-usb"
+ "-device usb-tablet,bus=usb-bus.0"
])
(mkIf (pkgs.stdenv.isAarch32 || pkgs.stdenv.isAarch64) [
- "-device virtio-gpu-pci" "-device usb-ehci,id=usb0" "-device usb-kbd" "-device usb-tablet"
+ "-device virtio-gpu-pci"
+ "-device usb-ehci,id=usb0"
+ "-device usb-kbd"
+ "-device usb-tablet"
])
(mkIf (!cfg.useBootLoader) [
- "-kernel ${config.system.build.toplevel}/kernel"
- "-initrd ${config.system.build.toplevel}/initrd"
- ''-append "$(cat ${config.system.build.toplevel}/kernel-params) init=${config.system.build.toplevel}/init regInfo=${regInfo}/registration ${consoles} $QEMU_KERNEL_PARAMS"''
+ "-kernel ${config.genode.boot.kernel}"
+ "-initrd ${config.genode.boot.initrd}"
])
(mkIf cfg.useEFIBoot [
"-drive if=pflash,format=raw,unit=0,readonly,file=${efiFirmware}"
"-drive if=pflash,format=raw,unit=1,file=$NIX_EFI_VARS"
])
- (mkIf (cfg.bios != null) [
- "-bios ${cfg.bios}/bios.bin"
- ])
- (mkIf (!cfg.graphics) [
- "-nographic"
- ])
+ (mkIf (cfg.bios != null) [ "-bios ${cfg.bios}/bios.bin" ])
+ (mkIf (!cfg.graphics) [ "-nographic" ])
];
virtualisation.qemu.drives = mkMerge [
@@ -646,106 +551,12 @@ in
}) cfg.emptyDiskImages)
];
- # Mount the host filesystem via 9P, and bind-mount the Nix store
- # of the host into our own filesystem. We use mkVMOverride to
- # allow this module to be applied to "normal" NixOS system
- # configuration, where the regular value for the `fileSystems'
- # attribute should be disregarded for the purpose of building a VM
- # test image (since those filesystems don't exist in the VM).
- fileSystems = mkVMOverride (
- { "/".device = cfg.bootDevice;
- ${if cfg.writableStore then "/nix/.ro-store" else "/nix/store"} =
- { device = "store";
- fsType = "9p";
- options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ];
- neededForBoot = true;
- };
- "/tmp" = mkIf config.boot.tmpOnTmpfs
- { device = "tmpfs";
- fsType = "tmpfs";
- neededForBoot = true;
- # Sync with systemd's tmp.mount;
- options = [ "mode=1777" "strictatime" "nosuid" "nodev" ];
- };
- "/tmp/xchg" =
- { device = "xchg";
- fsType = "9p";
- options = [ "trans=virtio" "version=9p2000.L" ];
- neededForBoot = true;
- };
- "/tmp/shared" =
- { device = "shared";
- fsType = "9p";
- options = [ "trans=virtio" "version=9p2000.L" ];
- neededForBoot = true;
- };
- } // optionalAttrs (cfg.writableStore && cfg.writableStoreUseTmpfs)
- { "/nix/.rw-store" =
- { fsType = "tmpfs";
- options = [ "mode=0755" ];
- neededForBoot = true;
- };
- } // optionalAttrs cfg.useBootLoader
- { "/boot" =
- # see note [Disk layout with `useBootLoader`]
- { device = "${lookupDriveDeviceName "boot" cfg.qemu.drives}2"; # 2 for e.g. `vdb2`, as created in `bootDisk`
- fsType = "vfat";
- noCheck = true; # fsck fails on a r/o filesystem
- };
- });
-
- swapDevices = mkVMOverride [ ];
- boot.initrd.luks.devices = mkVMOverride {};
-
- # Don't run ntpd in the guest. It should get the correct time from KVM.
- services.timesyncd.enable = false;
-
- services.qemuGuest.enable = cfg.qemu.guestAgent.enable;
-
- system.build.vm = pkgs.runCommand "nixos-vm" { preferLocalBuild = true; }
- ''
- mkdir -p $out/bin
- ln -s ${config.system.build.toplevel} $out/system
- ln -s ${pkgs.writeScript "run-nixos-vm" startVM} $out/bin/run-${config.system.name}-vm
- '';
-
- # When building a regular system configuration, override whatever
- # video driver the host uses.
- services.xserver.videoDrivers = mkVMOverride [ "modesetting" ];
- services.xserver.defaultDepth = mkVMOverride 0;
- services.xserver.resolutions = mkVMOverride [ { x = 1024; y = 768; } ];
- services.xserver.monitorSection =
- ''
- # Set a higher refresh rate so that resolutions > 800x600 work.
- HorizSync 30-140
- VertRefresh 50-160
- '';
-
- # Wireless won't work in the VM.
- networking.wireless.enable = mkVMOverride false;
- services.connman.enable = mkVMOverride false;
-
- # Speed up booting by not waiting for ARP.
- networking.dhcpcd.extraConfig = "noarp";
-
- networking.usePredictableInterfaceNames = false;
-
- system.requiredKernelConfig = with config.lib.kernelConfig;
- [ (isEnabled "VIRTIO_BLK")
- (isEnabled "VIRTIO_PCI")
- (isEnabled "VIRTIO_NET")
- (isEnabled "EXT4_FS")
- (isYes "BLK_DEV")
- (isYes "PCI")
- (isYes "EXPERIMENTAL")
- (isYes "NETDEVICES")
- (isYes "NET_CORE")
- (isYes "INET")
- (isYes "NETWORK_FILESYSTEMS")
- ] ++ optional (!cfg.graphics) [
- (isYes "SERIAL_8250_CONSOLE")
- (isYes "SERIAL_8250")
- ];
+ system.build.vm = pkgs.runCommand "nixos-vm" { preferLocalBuild = true; } ''
+ mkdir -p $out/bin
+ ln -s ${
+ pkgs.writeScript "run-nixos-vm" startVM
+ } $out/bin/run-${config.system.name}-vm
+ '';
};
}
diff --git a/nixos-modules/store-wrapper.dhall b/nixos-modules/store-wrapper.dhall
new file mode 100644
index 0000000..1922540
--- /dev/null
+++ b/nixos-modules/store-wrapper.dhall
@@ -0,0 +1,166 @@
+let Genode =
+ env:DHALL_GENODE sha256:e90438be23b5100003cf018b783986df67bc6d0e3d35e800677d0d9109ff6aa9
+
+let Prelude = Genode.Prelude
+
+let XML = Prelude.XML
+
+let Init = Genode.Init
+
+let Child = Init.Child
+
+let TextMapType = Prelude.Map.Type Text
+
+let Manifest/Type = TextMapType (TextMapType Text)
+
+let Manifest/toRoutes =
+ λ(manifest : Manifest/Type) →
+ Prelude.List.map
+ (Prelude.Map.Entry Text Text)
+ Init.ServiceRoute.Type
+ ( λ(entry : Prelude.Map.Entry Text Text) →
+ { service =
+ { name = "ROM"
+ , label = Init.LabelSelector.Type.Last entry.mapKey
+ }
+ , route =
+ Init.Route.Type.Child
+ { name = "store_rom"
+ , label = Some entry.mapValue
+ , diag = Some True
+ }
+ }
+ )
+ ( Prelude.List.concat
+ (Prelude.Map.Entry Text Text)
+ (Prelude.Map.values Text (Prelude.Map.Type Text Text) manifest)
+ )
+
+let parentROMs =
+ Prelude.List.map
+ Text
+ Init.ServiceRoute.Type
+ ( λ(label : Text) →
+ { service = { name = "ROM", label = Init.LabelSelector.last label }
+ , route =
+ Init.Route.Type.Parent { label = Some label, diag = None Bool }
+ }
+ )
+
+let wrapStore
+ : Init.Type → Manifest/Type → Child.Type
+ = λ(init : Init.Type) →
+ λ(manifest : Manifest/Type) →
+ Init.toChild
+ init
+ Init.Attributes::{
+ , exitPropagate = True
+ , resources = Init.Resources::{ ram = Genode.units.MiB 4 }
+ , routes =
+ [ Init.ServiceRoute.parent "IO_MEM"
+ , Init.ServiceRoute.parent "IO_PORT"
+ , Init.ServiceRoute.parent "IRQ"
+ , Init.ServiceRoute.parent "VM"
+ , Init.ServiceRoute.child "Timer" "timer"
+ , Init.ServiceRoute.child "Rtc" "rtc"
+ ]
+ # parentROMs
+ [ "ld.lib.so"
+ , "init"
+ , "platform_info"
+ , "core_log"
+ , "kernel_log"
+ , "vfs"
+ , "vfs.lib.so"
+ , "cached_fs_rom"
+ ]
+ # Manifest/toRoutes manifest
+ # [ Init.ServiceRoute.child "ROM" "store_rom" ]
+ }
+
+in λ(subinit : Init.Type) →
+ λ(storeName : Text) →
+ λ(storeSize : Natural) →
+ λ(storeManifest : Manifest/Type) →
+ λ(bootManifest : Manifest/Type) →
+ Genode.Boot::{
+ , config = Init::{
+ , children =
+ let child = Prelude.Map.keyValue Child.Type
+
+ in [ child
+ "timer"
+ ( Child.flat
+ Child.Attributes::{
+ , binary = "timer_drv"
+ , provides = [ "Timer" ]
+ }
+ )
+ , child
+ "rtc"
+ ( Child.flat
+ Child.Attributes::{
+ , binary = "rtc_drv"
+ , provides = [ "Rtc" ]
+ , routes = [ Init.ServiceRoute.parent "IO_PORT" ]
+ }
+ )
+ , child
+ "store_fs"
+ ( Child.flat
+ Child.Attributes::{
+ , binary = "vfs"
+ , config = Init.Config::{
+ , content =
+ [ XML.element
+ { name = "vfs"
+ , attributes = XML.emptyAttributes
+ , content =
+ [ XML.leaf
+ { name = "tar"
+ , attributes = toMap { name = storeName }
+ }
+ ]
+ }
+ ]
+ , policies =
+ [ Init.Config.Policy::{
+ , service = "File_system"
+ , label = Init.LabelSelector.suffix "nix-store"
+ , attributes = toMap { root = "/nix/store" }
+ }
+ , Init.Config.Policy::{
+ , service = "File_system"
+ , label = Init.LabelSelector.prefix "store_rom"
+ , attributes = toMap { root = "/" }
+ }
+ ]
+ }
+ , provides = [ "File_system" ]
+ }
+ )
+ , child
+ "store_rom"
+ ( Child.flat
+ Child.Attributes::{
+ , binary = "cached_fs_rom"
+ , provides = [ "ROM" ]
+ , resources = Init.Resources::{
+ , ram = storeSize + Genode.units.MiB 1
+ }
+ }
+ )
+ , child "init" (wrapStore subinit storeManifest)
+ ]
+ }
+ , rom =
+ Genode.BootModules.toRomPaths
+ ( Prelude.List.concat
+ (Prelude.Map.Entry Text Text)
+ ( Prelude.Map.values
+ Text
+ (Prelude.Map.Type Text Text)
+ bootManifest
+ )
+ )
+ }
diff --git a/nixos-modules/systemd-runner.dhall b/nixos-modules/systemd-runner.dhall
new file mode 100644
index 0000000..42f6576
--- /dev/null
+++ b/nixos-modules/systemd-runner.dhall
@@ -0,0 +1,159 @@
+let Genode = env:DHALL_GENODE
+
+let Prelude = Genode.Prelude
+
+let XML = Prelude.XML
+
+let Init = Genode.Init
+
+let Child = Init.Child
+
+let parentRoutes =
+ Prelude.List.map Text Init.ServiceRoute.Type Init.ServiceRoute.parent
+
+in λ(params : { coreutils : Text, execStart : Text }) →
+ Init::{
+ , verbose = True
+ , routes = parentRoutes [ "Timer", "Rtc", "File_system" ]
+ , children = toMap
+ { vfs =
+ Child.flat
+ Child.Attributes::{
+ , binary = "vfs"
+ , exitPropagate = True
+ , provides = [ "File_system" ]
+ , resources = Genode.Init.Resources::{
+ , caps = 256
+ , ram = Genode.units.MiB 8
+ }
+ , config = Init.Config::{
+ , content =
+ [ XML.element
+ { name = "vfs"
+ , attributes = XML.emptyAttributes
+ , content =
+ let dir =
+ λ(name : Text) →
+ λ(content : List XML.Type) →
+ XML.element
+ { name = "dir"
+ , content
+ , attributes = toMap { name }
+ }
+
+ let leaf =
+ λ(name : Text) →
+ XML.leaf
+ { name, attributes = XML.emptyAttributes }
+
+ in [ dir
+ "dev"
+ [ dir "pipes" [ leaf "pipe" ]
+ , dir
+ "sockets"
+ [ XML.leaf
+ { name = "fs"
+ , attributes = toMap
+ { label = "sockets" }
+ }
+ ]
+ , leaf "log"
+ , leaf "null"
+ , leaf "rtc"
+ , leaf "zero"
+ ]
+ , dir
+ "etc"
+ [ XML.element
+ { name = "inline"
+ , attributes = toMap
+ { name = "ExecStart" }
+ , content =
+ [ XML.text params.execStart ]
+ }
+ ]
+ , dir
+ "usr"
+ [ dir
+ "bin"
+ [ XML.leaf
+ { name = "symlink"
+ , attributes = toMap
+ { name = "env"
+ , target =
+ "${params.coreutils}/bin/env"
+ }
+ }
+ ]
+ ]
+ , dir "tmp" [ leaf "ram" ]
+ , dir
+ "nix"
+ [ dir
+ "store"
+ [ XML.leaf
+ { name = "fs"
+ , attributes = toMap
+ { label = "nix-store" }
+ }
+ ]
+ ]
+ ]
+ }
+ ]
+ , policies =
+ [ Init.Config.Policy::{
+ , service = "File_system"
+ , label = Init.LabelSelector.prefix "shell"
+ , attributes = toMap { root = "/", writeable = "yes" }
+ }
+ ]
+ }
+ }
+ , shell =
+ Child.flat
+ Child.Attributes::{
+ , binary = "bash"
+ , exitPropagate = True
+ , resources = Genode.Init.Resources::{
+ , caps = 256
+ , ram = Genode.units.MiB 8
+ }
+ , config = Genode.Init.Config::{
+ , content =
+ [ XML.leaf
+ { name = "libc"
+ , attributes = toMap
+ { stdin = "/dev/null"
+ , stdout = "/dev/log"
+ , stderr = "/dev/log"
+ , pipe = "/dev/pipes"
+ , rtc = "/dev/rtc"
+ , socket = "/dev/sockets"
+ }
+ }
+ , XML.element
+ { name = "vfs"
+ , attributes = XML.emptyAttributes
+ , content =
+ [ XML.leaf
+ { name = "fs"
+ , attributes = XML.emptyAttributes
+ }
+ ]
+ }
+ ]
+ # Prelude.List.map
+ Text
+ XML.Type
+ ( λ(x : Text) →
+ XML.leaf
+ { name = "arg"
+ , attributes = toMap { value = x }
+ }
+ )
+ [ "bash", "/etc/ExecStart" ]
+ }
+ }
+ }
+ }
diff --git a/nixos-modules/systemd.nix b/nixos-modules/systemd.nix
new file mode 100644
index 0000000..c823a4e
--- /dev/null
+++ b/nixos-modules/systemd.nix
@@ -0,0 +1,33 @@
+{ config, pkgs, lib, ... }:
+with lib; {
+
+ options.systemd.services = lib.mkOption {
+ type = types.attrsOf (types.submodule ({ name, config, ... }: {
+ options.genode.enable = lib.mkOption {
+ type = types.bool;
+ default = false;
+ description = "Translate this systemd unit to a Genode subsystem.";
+ };
+ }));
+ };
+
+ config.services.klogd.enable = false;
+ # The default is determined by checking the Linux version
+ # which cannot be evaluated here.
+
+ config.genode.init.subinits = mapAttrs' (name: service:
+ let name' = "services." + name;
+ in {
+ name = name';
+ value = {
+ inputs = with pkgs; with genodePackages; [ bash libc posix vfs_pipe ];
+ configFile = pkgs.writeText "${name'}.dhall" ''
+ ${./systemd-runner.dhall} {
+ , coreutils = "${pkgs.coreutils}"
+ , execStart = "${toString service.serviceConfig.ExecStart}"
+ }
+ '';
+ };
+ }) (filterAttrs (name: service: service.genode.enable)
+ config.systemd.services);
+}
diff --git a/packages/genodelabs/targets.nix b/packages/genodelabs/targets.nix
index 9d62b5a..10bb479 100644
--- a/packages/genodelabs/targets.nix
+++ b/packages/genodelabs/targets.nix
@@ -1,5 +1,7 @@
-# This file contains overrides necesarry to build some Make and Depot targets.
+# This file contains overrides necessary to build some Make and Depot targets.
# Many targets can be built with the default attributes, and are not listed here.
+# However, any package listed here with empty overrides ({ }) will be added to
+# the package attributes of this flake.
{ buildPackages, ports }:
with ports;
@@ -15,6 +17,8 @@ let
};
in {
+ acpi_drv = { };
+
cached_fs_rom = { };
fb_sdl = with buildPackages; {
@@ -47,10 +51,20 @@ in {
lx_block.HOST_INC_DIR = [ hostLibcInc ];
+ nic_bridge = { };
+
+ nic_loopback = { };
+
noux.portInputs = [ libc ];
+ platform_drv = { };
+
posix.portInputs = [ libc ];
+ report_rom = { };
+
+ rom_logger = { };
+
rtc_drv.meta.platforms = [ "x86_64-genode" ];
rump = {
@@ -58,6 +72,8 @@ in {
buildInputs = with buildPackages; [ zlib ];
};
+ sequence = { };
+
stdcxx.portInputs = [ libc stdcxx ];
# The following are tests are patched to exit at completion
@@ -79,12 +95,17 @@ in {
vesa_drv.portInputs = [ libc x86emu ];
vfs.outputs = [ "out" "lib" ];
+ vfs_audit = {};
vfs_block = { };
vfs_import.patches = [ ./vfs_import.patch ];
vfs_jitterentropy.portInputs = [ jitterentropy libc ];
vfs_lwip.portInputs = [ lwip ];
+ vfs_pipe = { };
vfs_ttf.portInputs = [ libc stb ];
+ virtdev_rom = { };
+ virtio_nic_drv = { };
+
wifi_drv.portInputs = [ dde_linux libc openssl ];
}
diff --git a/packages/genodelabs/test-pci.patch b/packages/genodelabs/test-pci.patch
index 51c9d06..47fcc8d 100644
--- a/packages/genodelabs/test-pci.patch
+++ b/packages/genodelabs/test-pci.patch
@@ -8,3 +8,36 @@ index c6d9e2012b..050de6136c 100644
log("--- Platform test finished ---");
+ env.parent().exit(0);
}
+commit 03a5f469313e9fdc9ee1135ebf0b167e4d3d3266
+Author: Emery Hemingway
+Date: Wed Oct 21 15:16:34 2020 +0200
+
+ test-pci: recognize VirtIO vendor IDs
+
+diff --git a/repos/os/src/test/pci/test.cc b/repos/os/src/test/pci/test.cc
+index c6d9e2012b..9cc2a2ac4b 100644
+--- a/repos/os/src/test/pci/test.cc
++++ b/repos/os/src/test/pci/test.cc
+@@ -19,7 +19,10 @@
+
+ using namespace Genode;
+
+-enum { INTEL_VENDOR_ID = 0x8086 };
++enum {
++ INTEL_VENDOR_ID = 0x8086,
++ VIRTIO_VENDOR_ID = 0x1af4,
++};
+
+
+ /**
+@@ -45,7 +48,9 @@ static void print_device_info(Platform::Device_capability device_cap)
+ Hex(fun, Hex::OMIT_PREFIX), " "
+ "class=", Hex(class_code), " "
+ "vendor=", Hex(vendor_id), " ",
+- (vendor_id == INTEL_VENDOR_ID ? "(Intel)" : "(unknown)"),
++ (vendor_id == INTEL_VENDOR_ID ? "(Intel)" :
++ vendor_id == VIRTIO_VENDOR_ID ? "(VirtIO)" :
++ "(unknown)"),
+ " device=", Hex(device_id));
+
+ for (int resource_id = 0; resource_id < 6; resource_id++) {
diff --git a/tests/default.nix b/tests/default.nix
index 2b0f417..f002555 100644
--- a/tests/default.nix
+++ b/tests/default.nix
@@ -1,324 +1,63 @@
-{ self, apps, localPackages, genodepkgs, lib, nixpkgs, legacyPackages }:
+{ flake, system, localSystem, crossSystem, pkgs }:
let
+ lib = flake.lib.${system};
+ nixpkgs = flake.legacyPackages.${system};
+ legacyPackages = flake.legacyPackages.${system};
- callTest = path:
- import path {
- pkgs = testPkgs;
- inherit nixpkgs localPackages legacyPackages;
- };
+ testingPython = import ./lib/testing-python.nix;
- testFiles =
- map callTest [ ./log.nix ./posix.nix ./vmm_arm.nix ./vmm_x86.nix ./x86.nix ]
- ++ (callTest ./solo5);
-
- testPkgs = genodepkgs;
-
- qemu' = localPackages.qemu;
-
- qemuBinary = qemuPkg:
- {
- aarch64-genode = "${qemuPkg}/bin/qemu-system-aarch64";
- x86_64-genode = "${qemuPkg}/bin/qemu-system-x86_64";
- }.${genodepkgs.stdenv.hostPlatform.system};
+ testSpecs = map (p: import p) [
+ ./hello.nix
+ ./log.nix
+ ./solo5/multi.nix
+ ./vmm_x86.nix
+ ./x86.nix
+ ];
cores = [
{
prefix = "hw-pc-";
+ testingPython = testingPython {
+ inherit flake system localSystem crossSystem pkgs;
+ extraConfigurations = [ ../nixos-modules/base-hw-pc.nix ];
+ };
specs = [ "x86" "hw" ];
platforms = [ "x86_64-genode" ];
- basePackages = [ testPkgs.base-hw-pc ]
- ++ map testPkgs.genodeSources.depot [ "rtc_drv" ];
- makeImage =
- lib.hwImage "0xffffffc000000000" "0x00200000" testPkgs.base-hw-pc;
- startVM = vmName: image: ''
- #! ${localPackages.runtimeShell}
- exec ${qemuBinary qemu'} \
- -name ${vmName} \
- -machine q35 \
- -m 384 \
- -netdev user,id=net0 \
- -device virtio-net-pci,netdev=net0 \
- -kernel "${testPkgs.bender}/bender" \
- -initrd "${image}/image.elf" \
- $QEMU_OPTS \
- "$@"
- '';
- }
- {
- prefix = "hw-virt_qemu-";
- specs = [ "aarch64" "hw" ];
- platforms = [ "aarch64-genode" ];
- basePackages = with testPkgs; [ base-hw-virt_qemu rtc-dummy ];
- makeImage = lib.hwImage "0xffffffc000000000" "0x40000000"
- testPkgs.base-hw-virt_qemu;
- startVM = vmName: image: ''
- #! ${localPackages.runtimeShell}
- exec ${qemuBinary qemu'} \
- -name ${vmName} \
- -M virt,virtualization=true,gic_version=3 \
- -cpu cortex-a53 \
- -smp 4 \
- -m 384 \
- -kernel "${image}/image.elf" \
- $QEMU_OPTS \
- "$@"
- '';
}
+ /* {
+ prefix = "hw-virt_qemu-";
+ testingPython = testingPython {
+ inherit flake system localSystem crossSystem pkgs;
+ extraConfigurations = [ ../nixos-modules/base-hw-virt_qemu.nix ];
+ };
+ specs = [ "aarch64" "hw" ];
+ platforms = [ "aarch64-genode" ];
+ }
+ */
{
prefix = "nova-";
+ testingPython = testingPython {
+ inherit flake system localSystem crossSystem pkgs;
+ extraConfigurations = [ ../nixos-modules/nova.nix ];
+ };
specs = [ "x86" "nova" ];
platforms = [ "x86_64-genode" ];
- basePackages = [ testPkgs.base-nova ]
- ++ map testPkgs.genodeSources.depot [ "rtc_drv" ];
- makeImage = lib.novaImage;
- startVM = vmName: image: ''
- #! ${localPackages.runtimeShell}
- exec ${qemuBinary qemu'} \
- -name ${vmName} \
- -machine q35 \
- -m 384 \
- -kernel "${testPkgs.bender}/bender" \
- -initrd "${testPkgs.NOVA}/hypervisor-x86_64 arg=iommu novpid serial,${image}/image.elf" \
- $QEMU_OPTS \
- "$@"
- '';
}
];
cores' = builtins.filter (core:
- builtins.any (x: x == genodepkgs.stdenv.hostPlatform.system) core.platforms)
+ builtins.any (x: x == pkgs.stdenv.hostPlatform.system) core.platforms)
cores;
- testDriver = with localPackages;
- let testDriverScript = ./test-driver/test-driver.py;
- in stdenv.mkDerivation {
- name = "nixos-test-driver";
-
- nativeBuildInputs = [ makeWrapper ];
- buildInputs = [ (python3.withPackages (p: [ p.ptpython ])) ];
- checkInputs = with python3Packages; [ pylint mypy ];
-
- dontUnpack = true;
-
- preferLocalBuild = true;
-
- doCheck = true;
- checkPhase = ''
- mypy --disallow-untyped-defs \
- --no-implicit-optional \
- --ignore-missing-imports ${testDriverScript}
- pylint --errors-only ${testDriverScript}
- '';
-
- installPhase = ''
- mkdir -p $out/bin
- cp ${testDriverScript} $out/bin/nixos-test-driver
- chmod u+x $out/bin/nixos-test-driver
- # TODO: copy user script part into this file (append)
-
- wrapProgram $out/bin/nixos-test-driver \
- --prefix PATH : "${lib.makeBinPath [ qemu' coreutils ]}" \
- '';
- };
-
- defaultTestScript = ''
- start_all()
- machine.wait_until_serial_output('child "init" exited with exit value 0')
- '';
-
- makeTest = with localPackages;
- { prefix, specs, platforms, basePackages, makeImage, startVM }:
- { name ? "unnamed", testScript ? defaultTestScript,
- # Skip linting (mainly intended for faster dev cycles)
- skipLint ? false, ... }@t:
-
- let
- testDriverName = "genode-test-driver-${name}";
-
- buildVM = vmName:
- { config, inputs, env ? { }, extraPaths ? [ ] }:
- let
- storeTarball = localPackages.runCommand "store" { } ''
- mkdir -p $out
- tar cf "$out/store.tar" --absolute-names ${toString inputs} ${
- toString extraPaths
- }
- '';
- addManifest = drv:
- drv // {
- manifest =
- nixpkgs.runCommand "${drv.name}.dhall" { inherit drv; } ''
- set -eu
- echo -n '[' >> $out
- find $drv/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out
- ${if builtins.elem "lib" drv.outputs then
- ''
- find ${drv.lib}/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out''
- else
- ""}
- echo -n ']' >> $out
- '';
- };
-
- storeManifest = lib.mergeManifests (map addManifest inputs);
- manifest = lib.mergeManifests (map addManifest (basePackages
- ++ [ testPkgs.sotest-producer storeTarball ]
- ++ map testPkgs.genodeSources.depot [
- "init"
- "vfs"
- "cached_fs_rom"
- ]));
- config' = "${
- ./test-wrapper.dhall
- } (${config}) $(stat --format '%s' ${storeTarball}/store.tar) ${storeManifest} ${manifest}";
- env' = {
- DHALL_GENODE = "${testPkgs.dhallGenode}/source.dhall";
- DHALL_GENODE_TEST = "${./test.dhall}";
- } // env;
-
- image = makeImage vmName env' config';
- startVM' = startVM vmName image;
- in {
- script = localPackages.writeScriptBin "run-${vmName}-vm" startVM';
-
- config = lib.runDhallCommand (name + ".dhall") env' ''
- ${apps.dhall.program} <<< "${config'}" > $out
- '';
-
- store = storeTarball;
-
- xml = lib.runDhallCommand (name + ".config") env'
- ''${apps.render-init.program} <<< "(${config'}).config" > $out'';
- };
-
- nodes = lib.mapAttrs buildVM
- (t.nodes or (if t ? machine then { machine = t.machine; } else { }));
-
- testScript' =
- # Call the test script with the computed nodes.
- if lib.isFunction testScript then
- testScript { inherit nodes; }
- else
- testScript;
-
- vms = map (node: node.script) (lib.attrValues nodes);
-
- # Generate onvenience wrappers for running the test driver
- # interactively with the specified network, and for starting the
- # VMs from the command line.
- driver =
- let warn = if skipLint then lib.warn "Linting is disabled!" else lib.id;
- in warn (runCommand testDriverName {
- buildInputs = [ makeWrapper ];
- testScript = testScript';
- preferLocalBuild = true;
- testName = name;
- } ''
- mkdir -p $out/bin
-
- echo -n "$testScript" > $out/test-script
- ${lib.optionalString (!skipLint) ''
- ${python3Packages.black}/bin/black --check --quiet --diff $out/test-script
- ''}
-
- ln -s ${testDriver}/bin/nixos-test-driver $out/bin/
- vms=($(for i in ${toString vms}; do echo $i/bin/run-*-vm; done))
- wrapProgram $out/bin/nixos-test-driver \
- --add-flags "''${vms[*]}" \
- --run "export testScript=\"\$(${coreutils}/bin/cat $out/test-script)\""
- ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-run-vms
- wrapProgram $out/bin/nixos-run-vms \
- --add-flags "''${vms[*]}" \
- --set tests 'start_all(); join_all();'
- ''); # "
-
- passMeta = drv:
- drv
- // lib.optionalAttrs (t ? meta) { meta = (drv.meta or { }) // t.meta; };
-
- # Run an automated test suite in the given virtual network.
- # `driver' is the script that runs the network.
- runTests = driver:
- stdenv.mkDerivation {
- name = "test-run-${driver.testName}";
-
- buildCommand = ''
- mkdir -p $out
-
- LOGFILE=/dev/null tests='exec(os.environ["testScript"])' ${driver}/bin/nixos-test-driver
- '';
- };
-
- test = passMeta (runTests driver);
-
- nodeNames = builtins.attrNames nodes;
- invalidNodeNames =
- lib.filter (node: builtins.match "^[A-z_]([A-z0-9_]+)?$" node == null)
- nodeNames;
-
- in if lib.length invalidNodeNames > 0 then
- throw ''
- Cannot create machines out of (${
- lib.concatStringsSep ", " invalidNodeNames
- })!
- All machines are referenced as python variables in the testing framework which will break the
- script when special characters are used.
-
- Please stick to alphanumeric chars and underscores as separation.
- ''
- else
- test // { inherit nodes driver test; };
-
testList = let
- f = core:
- let makeTest' = makeTest core;
- in test:
+ f = core: test:
if (test.constraints or (_: true)) core.specs then {
name = core.prefix + test.name;
- value = makeTest' test;
+ value = core.testingPython.makeTest test;
} else
null;
- in lib.lists.crossLists f [ cores' testFiles ];
+ in lib.lists.crossLists f [ cores' testSpecs ];
in builtins.listToAttrs (builtins.filter (_: _ != null) testList)
-
-/* sotest = let
- hwTests = with hw; [ multi posix x86 ];
- novaTests = with nova; [ multi posix x86 vmm ];
- allTests = hwTests ++ novaTests;
-
- projectCfg.boot_items =
-
- (map (test: {
- inherit (test) name;
- exec = "bender";
- load = [ "${test.name}.image.elf" ];
- }) hwTests)
-
- ++ (map (test: {
- inherit (test) name;
- exec = "bender";
- load = [ "hypervisor serial novga iommu" test.image.name ];
- }) novaTests);
-
- in localPackages.stdenv.mkDerivation {
- name = "sotest";
- buildCommand = ''
- mkdir zip; cd zip
- cp "${testPkgs.bender}/bender" bender
- cp "${testPkgs.NOVA}/hypervisor-x86_64" hypervisor
- ${concatStringsSep "\n"
- (map (test: "cp ${test.image}/image.elf ${test.name}.image.elf")
- allTests)}
- mkdir -p $out/nix-support
- ${localPackages.zip}/bin/zip "$out/binaries.zip" *
- cat << EOF > "$out/project.json"
- ${builtins.toJSON projectCfg}
- EOF
- echo file sotest-binaries $out/binaries.zip >> "$out/nix-support/hydra-build-products"
- echo file sotest-config $out/project.json >> "$out/nix-support/hydra-build-products"
- '';
- };
-*/
diff --git a/tests/hello.dhall b/tests/hello.dhall
new file mode 100644
index 0000000..34a6ad6
--- /dev/null
+++ b/tests/hello.dhall
@@ -0,0 +1,51 @@
+let Genode =
+ env:DHALL_GENODE
+ ? https://git.sr.ht/~ehmry/dhall-genode/blob/master/package.dhall
+
+let Init = Genode.Init
+
+let Child = Init.Child
+
+in Child.flat
+ Child.Attributes::{
+ , binary = "hello"
+ , exitPropagate = True
+ , resources = Genode.Init.Resources::{
+ , caps = 500
+ , ram = Genode.units.MiB 10
+ }
+ , config = Init.Config::{
+ , content =
+ let XML = Genode.Prelude.XML
+
+ in [ XML.leaf
+ { name = "libc"
+ , attributes = toMap
+ { stdin = "/dev/null"
+ , stdout = "/dev/log"
+ , stderr = "/dev/log"
+ }
+ }
+ , XML.element
+ { name = "vfs"
+ , attributes = XML.emptyAttributes
+ , content =
+ let dir =
+ λ(name : Text) →
+ λ(content : List XML.Type) →
+ XML.element
+ { name = "dir"
+ , content
+ , attributes = toMap { name }
+ }
+
+ let leaf =
+ λ(name : Text) →
+ XML.leaf
+ { name, attributes = XML.emptyAttributes }
+
+ in [ dir "dev" [ leaf "log", leaf "null" ] ]
+ }
+ ]
+ }
+ }
diff --git a/tests/hello.nix b/tests/hello.nix
new file mode 100644
index 0000000..5293880
--- /dev/null
+++ b/tests/hello.nix
@@ -0,0 +1,28 @@
+{
+ name = "hello";
+ machine = { pkgs, ... }:
+ let
+ hello = pkgs.stdenv.mkDerivation {
+ name = "hello";
+ dontUnpack = true;
+ buildPhase = ''
+ cat > hello.c << EOF
+ #include
+ int main(int argc, char **argv) { printf("hello world!\n"); return 0; }
+ EOF
+
+ $CC hello.c -o hello
+ '';
+ installPhase = "install -Dt $out/bin hello";
+ };
+ in {
+ genode.init.children.hello = {
+ configFile = ./hello.dhall;
+ inputs = [ hello ];
+ };
+ };
+ testScript = ''
+ start_all()
+ machine.wait_until_serial_output("child \"init\" exited with exit value 0")
+ '';
+}
diff --git a/tests/lib/build-vms.nix b/tests/lib/build-vms.nix
new file mode 100644
index 0000000..32b6948
--- /dev/null
+++ b/tests/lib/build-vms.nix
@@ -0,0 +1,110 @@
+{ system, localSystem, crossSystem
+# Nixpkgs, for qemu, lib and more
+, pkgs, modulesPath
+# NixOS configuration to add to the VMs
+, extraConfigurations ? [ ] }:
+
+with pkgs.lib;
+with import ./qemu-flags.nix { inherit pkgs; };
+
+rec {
+
+ inherit pkgs;
+
+ qemu = pkgs.buildPackages.buildPackages.qemu_test;
+
+ # Build a virtual network from an attribute set `{ machine1 =
+ # config1; ... machineN = configN; }', where `machineX' is the
+ # hostname and `configX' is a NixOS system configuration. Each
+ # machine is given an arbitrary IP address in the virtual network.
+ buildVirtualNetwork = nodes:
+ let nodesOut = mapAttrs (n: buildVM nodesOut) (assignIPAddresses nodes);
+ in nodesOut;
+
+ buildVM = nodes: configurations:
+
+ import "${modulesPath}/../lib/eval-config.nix" {
+ inherit system;
+ modules = configurations ++ extraConfigurations;
+ baseModules = (import "${modulesPath}/module-list.nix") ++ [
+ ../../nixos-modules/genode-core.nix
+ ../../nixos-modules/genode-init.nix
+ ../../nixos-modules/qemu-vm.nix
+ {
+ key = "no-manual";
+ documentation.nixos.enable = false;
+ }
+ {
+ key = "qemu";
+ system.build.qemu = qemu;
+ }
+ {
+ key = "nodes";
+ _module.args.nodes = nodes;
+ }
+ {
+ system.build.qemu = qemu;
+ nixpkgs = { inherit system crossSystem localSystem pkgs; };
+ }
+ ];
+ };
+
+ # Given an attribute set { machine1 = config1; ... machineN =
+ # configN; }, sequentially assign IP addresses in the 192.168.1.0/24
+ # range to each machine, and set the hostname to the attribute name.
+ assignIPAddresses = nodes:
+
+ let
+
+ machines = attrNames nodes;
+
+ machinesNumbered = zipLists machines (range 1 254);
+
+ nodes_ = forEach machinesNumbered (m:
+ nameValuePair m.fst [
+ ({ config, nodes, ... }:
+ let
+ interfacesNumbered =
+ zipLists config.virtualisation.vlans (range 1 255);
+ interfaces = forEach interfacesNumbered ({ fst, snd }:
+ nameValuePair "eth${toString snd}" {
+ ipv4.addresses = [{
+ address = "192.168.${toString fst}.${toString m.snd}";
+ prefixLength = 24;
+ }];
+ });
+ in {
+ key = "ip-address";
+ config = {
+ networking.hostName = mkDefault m.fst;
+
+ networking.interfaces = listToAttrs interfaces;
+
+ networking.primaryIPAddress = optionalString (interfaces != [ ])
+ (head (head interfaces).value.ipv4.addresses).address;
+
+ # Put the IP addresses of all VMs in this machine's
+ # /etc/hosts file. If a machine has multiple
+ # interfaces, use the IP address corresponding to
+ # the first interface (i.e. the first network in its
+ # virtualisation.vlans option).
+ networking.extraHosts = flip concatMapStrings machines (m':
+ let config = (getAttr m' nodes).config;
+ in optionalString (config.networking.primaryIPAddress != "")
+ ("${config.networking.primaryIPAddress} "
+ + optionalString (config.networking.domain != null)
+ "${config.networking.hostName}.${config.networking.domain} "
+ + ''
+ ${config.networking.hostName}
+ ''));
+
+ virtualisation.qemu.options = forEach interfacesNumbered
+ ({ fst, snd }: qemuNICFlags snd fst m.snd);
+ };
+ })
+ (getAttr m.fst nodes)
+ ]);
+
+ in listToAttrs nodes_;
+
+}
diff --git a/tests/lib/qemu-flags.nix b/tests/lib/qemu-flags.nix
new file mode 100644
index 0000000..a576b1e
--- /dev/null
+++ b/tests/lib/qemu-flags.nix
@@ -0,0 +1,41 @@
+# QEMU flags shared between various Nix expressions.
+{ pkgs }:
+
+let
+ zeroPad = n:
+ pkgs.lib.optionalString (n < 16) "0" + (if n > 255 then
+ throw "Can't have more than 255 nets or nodes!"
+ else
+ pkgs.lib.toHexString n);
+
+in rec {
+ qemuNicMac = net: machine: "52:54:00:12:${zeroPad net}:${zeroPad machine}";
+
+ qemuNICFlags = nic: net: machine: [
+ "-device virtio-net-pci,netdev=vlan${toString nic},mac=${
+ qemuNicMac net machine
+ }"
+ "-netdev vde,id=vlan${toString nic},sock=$QEMU_VDE_SOCKET_${toString net}"
+ ];
+
+ qemuSerialDevice = if pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64 then
+ "ttyS0"
+ else if pkgs.stdenv.isAarch32 || pkgs.stdenv.isAarch64 then
+ "ttyAMA0"
+ else
+ throw
+ "Unknown QEMU serial device for system '${pkgs.stdenv.hostPlatform.system}'";
+
+ qemuBinary = qemuPkg:
+ {
+ x86_64-linux = "${qemuPkg}/bin/qemu-kvm -cpu max";
+ armv7l-linux =
+ "${qemuPkg}/bin/qemu-system-arm -enable-kvm -machine virt -cpu host";
+ aarch64-linux =
+ "${qemuPkg}/bin/qemu-system-aarch64 -enable-kvm -machine virt,gic-version=host -cpu host";
+ x86_64-darwin = "${qemuPkg}/bin/qemu-kvm -cpu max";
+ aarch64-genode =
+ "${qemuPkg}/bin/qemu-system-aarch64 -M virt,virtualization=true,gic_version=3 -cpu cortex-a53";
+ x86_64-genode = "${qemuPkg}/bin/qemu-system-x86_64 -machine q35";
+ }.${pkgs.stdenv.hostPlatform.system} or "${qemuPkg}/bin/qemu-kvm";
+}
diff --git a/tests/lib/test-driver.py b/tests/lib/test-driver.py
new file mode 100644
index 0000000..bcb9564
--- /dev/null
+++ b/tests/lib/test-driver.py
@@ -0,0 +1,967 @@
+#! /somewhere/python3
+# Copyright (c) 2003-2020 Nixpkgs/NixOS contributors
+
+from contextlib import contextmanager, _GeneratorContextManager
+from queue import Queue, Empty
+from typing import Tuple, Any, Callable, Dict, Iterator, Optional, List
+from xml.sax.saxutils import XMLGenerator
+import queue
+import io
+import _thread
+import argparse
+import atexit
+import base64
+import codecs
+import os
+import pathlib
+import ptpython.repl
+import pty
+import re
+import shlex
+import shutil
+import socket
+import subprocess
+import sys
+import tempfile
+import time
+import traceback
+import unicodedata
+
+CHAR_TO_KEY = {
+ "A": "shift-a",
+ "N": "shift-n",
+ "-": "0x0C",
+ "_": "shift-0x0C",
+ "B": "shift-b",
+ "O": "shift-o",
+ "=": "0x0D",
+ "+": "shift-0x0D",
+ "C": "shift-c",
+ "P": "shift-p",
+ "[": "0x1A",
+ "{": "shift-0x1A",
+ "D": "shift-d",
+ "Q": "shift-q",
+ "]": "0x1B",
+ "}": "shift-0x1B",
+ "E": "shift-e",
+ "R": "shift-r",
+ ";": "0x27",
+ ":": "shift-0x27",
+ "F": "shift-f",
+ "S": "shift-s",
+ "'": "0x28",
+ '"': "shift-0x28",
+ "G": "shift-g",
+ "T": "shift-t",
+ "`": "0x29",
+ "~": "shift-0x29",
+ "H": "shift-h",
+ "U": "shift-u",
+ "\\": "0x2B",
+ "|": "shift-0x2B",
+ "I": "shift-i",
+ "V": "shift-v",
+ ",": "0x33",
+ "<": "shift-0x33",
+ "J": "shift-j",
+ "W": "shift-w",
+ ".": "0x34",
+ ">": "shift-0x34",
+ "K": "shift-k",
+ "X": "shift-x",
+ "/": "0x35",
+ "?": "shift-0x35",
+ "L": "shift-l",
+ "Y": "shift-y",
+ " ": "spc",
+ "M": "shift-m",
+ "Z": "shift-z",
+ "\n": "ret",
+ "!": "shift-0x02",
+ "@": "shift-0x03",
+ "#": "shift-0x04",
+ "$": "shift-0x05",
+ "%": "shift-0x06",
+ "^": "shift-0x07",
+ "&": "shift-0x08",
+ "*": "shift-0x09",
+ "(": "shift-0x0A",
+ ")": "shift-0x0B",
+}
+
+# Forward references
+log: "Logger"
+machines: "List[Machine]"
+
+
+def eprint(*args: object, **kwargs: Any) -> None:
+ print(*args, file=sys.stderr, **kwargs)
+
+
+def make_command(args: list) -> str:
+ return " ".join(map(shlex.quote, (map(str, args))))
+
+
+def create_vlan(vlan_nr: str) -> Tuple[str, str, "subprocess.Popen[bytes]", Any]:
+ global log
+ log.log("starting VDE switch for network {}".format(vlan_nr))
+ vde_socket = tempfile.mkdtemp(
+ prefix="nixos-test-vde-", suffix="-vde{}.ctl".format(vlan_nr)
+ )
+ pty_master, pty_slave = pty.openpty()
+ vde_process = subprocess.Popen(
+ ["vde_switch", "-s", vde_socket, "--dirmode", "0700"],
+ bufsize=1,
+ stdin=pty_slave,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=False,
+ )
+ fd = os.fdopen(pty_master, "w")
+ fd.write("version\n")
+ # TODO: perl version checks if this can be read from
+ # an if not, dies. we could hang here forever. Fix it.
+ assert vde_process.stdout is not None
+ vde_process.stdout.readline()
+ if not os.path.exists(os.path.join(vde_socket, "ctl")):
+ raise Exception("cannot start vde_switch")
+
+ return (vlan_nr, vde_socket, vde_process, fd)
+
+
+def retry(fn: Callable) -> None:
+ """Call the given function repeatedly, with 1 second intervals,
+ until it returns True or a timeout is reached.
+ """
+
+ for _ in range(900):
+ if fn(False):
+ return
+ time.sleep(1)
+
+ if not fn(True):
+ raise Exception("action timed out")
+
+
+class Logger:
+ def __init__(self) -> None:
+ self.logfile = os.environ.get("LOGFILE", "/dev/null")
+ self.logfile_handle = codecs.open(self.logfile, "wb")
+ self.xml = XMLGenerator(self.logfile_handle, encoding="utf-8")
+ self.queue: "Queue[Dict[str, str]]" = Queue()
+
+ self.xml.startDocument()
+ self.xml.startElement("logfile", attrs={})
+
+ def close(self) -> None:
+ self.xml.endElement("logfile")
+ self.xml.endDocument()
+ self.logfile_handle.close()
+
+ def sanitise(self, message: str) -> str:
+ return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C")
+
+ def maybe_prefix(self, message: str, attributes: Dict[str, str]) -> str:
+ if "machine" in attributes:
+ return "{}: {}".format(attributes["machine"], message)
+ return message
+
+ def log_line(self, message: str, attributes: Dict[str, str]) -> None:
+ self.xml.startElement("line", attributes)
+ self.xml.characters(message)
+ self.xml.endElement("line")
+
+ def log(self, message: str, attributes: Dict[str, str] = {}) -> None:
+ eprint(self.maybe_prefix(message, attributes))
+ self.drain_log_queue()
+ self.log_line(message, attributes)
+
+ def enqueue(self, message: Dict[str, str]) -> None:
+ self.queue.put(message)
+
+ def drain_log_queue(self) -> None:
+ try:
+ while True:
+ item = self.queue.get_nowait()
+ attributes = {"machine": item["machine"], "type": "serial"}
+ self.log_line(self.sanitise(item["msg"]), attributes)
+ except Empty:
+ pass
+
+ @contextmanager
+ def nested(self, message: str, attributes: Dict[str, str] = {}) -> Iterator[None]:
+ eprint(self.maybe_prefix(message, attributes))
+
+ self.xml.startElement("nest", attrs={})
+ self.xml.startElement("head", attributes)
+ self.xml.characters(message)
+ self.xml.endElement("head")
+
+ tic = time.time()
+ self.drain_log_queue()
+ yield
+ self.drain_log_queue()
+ toc = time.time()
+ self.log("({:.2f} seconds)".format(toc - tic))
+
+ self.xml.endElement("nest")
+
+
+class Machine:
+ def __init__(self, args: Dict[str, Any]) -> None:
+ if "name" in args:
+ self.name = args["name"]
+ else:
+ self.name = "machine"
+ cmd = args.get("startCommand", None)
+ if cmd:
+ match = re.search("bin/run-(.+)-vm$", cmd)
+ if match:
+ self.name = match.group(1)
+
+ self.script = args.get("startCommand", self.create_startcommand(args))
+
+ tmp_dir = os.environ.get("TMPDIR", tempfile.gettempdir())
+
+ def create_dir(name: str) -> str:
+ path = os.path.join(tmp_dir, name)
+ os.makedirs(path, mode=0o700, exist_ok=True)
+ return path
+
+ self.state_dir = create_dir("vm-state-{}".format(self.name))
+ self.shared_dir = create_dir("shared-xchg")
+
+ self.booted = False
+ self.connected = False
+ self.pid: Optional[int] = None
+ self.socket = None
+ self.monitor: Optional[socket.socket] = None
+ self.logger: Logger = args["log"]
+ self.serialQueue: "Queue[str]" = Queue()
+
+ self.allow_reboot = args.get("allowReboot", False)
+
+ @staticmethod
+ def create_startcommand(args: Dict[str, str]) -> str:
+ net_backend = "-netdev user,id=net0"
+ net_frontend = "-device virtio-net-pci,netdev=net0"
+
+ if "netBackendArgs" in args:
+ net_backend += "," + args["netBackendArgs"]
+
+ if "netFrontendArgs" in args:
+ net_frontend += "," + args["netFrontendArgs"]
+
+ start_command = (
+ "qemu-kvm -m 384 " + net_backend + " " + net_frontend + " $QEMU_OPTS "
+ )
+
+ if "hda" in args:
+ hda_path = os.path.abspath(args["hda"])
+ if args.get("hdaInterface", "") == "scsi":
+ start_command += (
+ "-drive id=hda,file="
+ + hda_path
+ + ",werror=report,if=none "
+ + "-device scsi-hd,drive=hda "
+ )
+ else:
+ start_command += (
+ "-drive file="
+ + hda_path
+ + ",if="
+ + args["hdaInterface"]
+ + ",werror=report "
+ )
+
+ if "cdrom" in args:
+ start_command += "-cdrom " + args["cdrom"] + " "
+
+ if "usb" in args:
+ start_command += (
+ "-device piix3-usb-uhci -drive "
+ + "id=usbdisk,file="
+ + args["usb"]
+ + ",if=none,readonly "
+ + "-device usb-storage,drive=usbdisk "
+ )
+ if "bios" in args:
+ start_command += "-bios " + args["bios"] + " "
+
+ start_command += args.get("qemuFlags", "")
+
+ return start_command
+
+ def is_up(self) -> bool:
+ return self.booted and self.connected
+
+ def log(self, msg: str) -> None:
+ self.logger.log(msg, {"machine": self.name})
+
+ def nested(self, msg: str, attrs: Dict[str, str] = {}) -> _GeneratorContextManager:
+ my_attrs = {"machine": self.name}
+ my_attrs.update(attrs)
+ return self.logger.nested(msg, my_attrs)
+
+ def wait_for_monitor_prompt(self) -> str:
+ assert self.monitor is not None
+ answer = ""
+ while True:
+ undecoded_answer = self.monitor.recv(1024)
+ if not undecoded_answer:
+ break
+ answer += undecoded_answer.decode()
+ if answer.endswith("(qemu) "):
+ break
+ return answer
+
+ def send_monitor_command(self, command: str) -> str:
+ message = ("{}\n".format(command)).encode()
+ self.log("sending monitor command: {}".format(command))
+ assert self.monitor is not None
+ self.monitor.send(message)
+ return self.wait_for_monitor_prompt()
+
+ def wait_for_unit(self, unit: str, user: Optional[str] = None) -> None:
+ """Wait for a systemd unit to get into "active" state.
+ Throws exceptions on "failed" and "inactive" states as well as
+ after timing out.
+ """
+
+ def check_active(_: Any) -> bool:
+ info = self.get_unit_info(unit, user)
+ state = info["ActiveState"]
+ if state == "failed":
+ raise Exception('unit "{}" reached state "{}"'.format(unit, state))
+
+ if state == "inactive":
+ status, jobs = self.systemctl("list-jobs --full 2>&1", user)
+ if "No jobs" in jobs:
+ info = self.get_unit_info(unit, user)
+ if info["ActiveState"] == state:
+ raise Exception(
+ (
+ 'unit "{}" is inactive and there ' "are no pending jobs"
+ ).format(unit)
+ )
+
+ return state == "active"
+
+ retry(check_active)
+
+ def get_unit_info(self, unit: str, user: Optional[str] = None) -> Dict[str, str]:
+ status, lines = self.systemctl('--no-pager show "{}"'.format(unit), user)
+ if status != 0:
+ raise Exception(
+ 'retrieving systemctl info for unit "{}" {} failed with exit code {}'.format(
+ unit, "" if user is None else 'under user "{}"'.format(user), status
+ )
+ )
+
+ line_pattern = re.compile(r"^([^=]+)=(.*)$")
+
+ def tuple_from_line(line: str) -> Tuple[str, str]:
+ match = line_pattern.match(line)
+ assert match is not None
+ return match[1], match[2]
+
+ return dict(
+ tuple_from_line(line)
+ for line in lines.split("\n")
+ if line_pattern.match(line)
+ )
+
+ def systemctl(self, q: str, user: Optional[str] = None) -> Tuple[int, str]:
+ if user is not None:
+ q = q.replace("'", "\\'")
+ return self.execute(
+ (
+ "su -l {} --shell /bin/sh -c "
+ "$'XDG_RUNTIME_DIR=/run/user/`id -u` "
+ "systemctl --user {}'"
+ ).format(user, q)
+ )
+ return self.execute("systemctl {}".format(q))
+
+ def require_unit_state(self, unit: str, require_state: str = "active") -> None:
+ with self.nested(
+ "checking if unit ‘{}’ has reached state '{}'".format(unit, require_state)
+ ):
+ info = self.get_unit_info(unit)
+ state = info["ActiveState"]
+ if state != require_state:
+ raise Exception(
+ "Expected unit ‘{}’ to to be in state ".format(unit)
+ + "'{}' but it is in state ‘{}’".format(require_state, state)
+ )
+
+ def execute(self, command: str) -> Tuple[int, str]:
+ self.connect()
+
+ out_command = "( {} ); echo '|!=EOF' $?\n".format(command)
+ self.shell.send(out_command.encode())
+
+ output = ""
+ status_code_pattern = re.compile(r"(.*)\|\!=EOF\s+(\d+)")
+
+ while True:
+ chunk = self.shell.recv(4096).decode(errors="ignore")
+ match = status_code_pattern.match(chunk)
+ if match:
+ output += match[1]
+ status_code = int(match[2])
+ return (status_code, output)
+ output += chunk
+
+ def succeed(self, *commands: str) -> str:
+ """Execute each command and check that it succeeds."""
+ output = ""
+ for command in commands:
+ with self.nested("must succeed: {}".format(command)):
+ (status, out) = self.execute(command)
+ if status != 0:
+ self.log("output: {}".format(out))
+ raise Exception(
+ "command `{}` failed (exit code {})".format(command, status)
+ )
+ output += out
+ return output
+
+ def fail(self, *commands: str) -> None:
+ """Execute each command and check that it fails."""
+ for command in commands:
+ with self.nested("must fail: {}".format(command)):
+ status, output = self.execute(command)
+ if status == 0:
+ raise Exception(
+ "command `{}` unexpectedly succeeded".format(command)
+ )
+
+ def wait_until_succeeds(self, command: str) -> str:
+ """Wait until a command returns success and return its output.
+ Throws an exception on timeout.
+ """
+ output = ""
+
+ def check_success(_: Any) -> bool:
+ nonlocal output
+ status, output = self.execute(command)
+ return status == 0
+
+ with self.nested("waiting for success: {}".format(command)):
+ retry(check_success)
+ return output
+
+ def wait_until_fails(self, command: str) -> str:
+ """Wait until a command returns failure.
+ Throws an exception on timeout.
+ """
+ output = ""
+
+ def check_failure(_: Any) -> bool:
+ nonlocal output
+ status, output = self.execute(command)
+ return status != 0
+
+ with self.nested("waiting for failure: {}".format(command)):
+ retry(check_failure)
+ return output
+
+ def wait_for_shutdown(self) -> None:
+ if not self.booted:
+ return
+
+ with self.nested("waiting for the VM to power off"):
+ sys.stdout.flush()
+ self.process.wait()
+
+ self.pid = None
+ self.booted = False
+ self.connected = False
+
+ def get_tty_text(self, tty: str) -> str:
+ status, output = self.execute(
+ "fold -w$(stty -F /dev/tty{0} size | "
+ "awk '{{print $2}}') /dev/vcs{0}".format(tty)
+ )
+ return output
+
+ def wait_until_tty_matches(self, tty: str, regexp: str) -> None:
+ """Wait until the visible output on the chosen TTY matches regular
+ expression. Throws an exception on timeout.
+ """
+ matcher = re.compile(regexp)
+
+ def tty_matches(last: bool) -> bool:
+ text = self.get_tty_text(tty)
+ if last:
+ self.log(
+ f"Last chance to match /{regexp}/ on TTY{tty}, "
+ f"which currently contains: {text}"
+ )
+ return len(matcher.findall(text)) > 0
+
+ with self.nested("waiting for {} to appear on tty {}".format(regexp, tty)):
+ retry(tty_matches)
+
+ def wait_until_serial_output(self, regexp: str) -> None:
+ """Wait until the serial output matches regular expression.
+ Throws an exception on timeout.
+ """
+ matcher = re.compile(regexp)
+
+ def serial_matches(last: bool) -> bool:
+ while not self.serialQueue.empty():
+ text = self.serialQueue.get()
+ if last:
+ self.log(
+ f"Last chance to match /{regexp}/ on serial, "
+ f"which currently contains: {text}"
+ )
+ if len(matcher.findall(text)) > 0:
+ return True
+ return False
+
+ with self.nested("waiting for {} to appear on serial output".format(regexp)):
+ retry(serial_matches)
+
+ def send_chars(self, chars: List[str]) -> None:
+ with self.nested("sending keys ‘{}‘".format(chars)):
+ for char in chars:
+ self.send_key(char)
+
+ def wait_for_file(self, filename: str) -> None:
+ """Waits until the file exists in machine's file system."""
+
+ def check_file(_: Any) -> bool:
+ status, _ = self.execute("test -e {}".format(filename))
+ return status == 0
+
+ with self.nested("waiting for file ‘{}‘".format(filename)):
+ retry(check_file)
+
+ def wait_for_open_port(self, port: int) -> None:
+ def port_is_open(_: Any) -> bool:
+ status, _ = self.execute("nc -z localhost {}".format(port))
+ return status == 0
+
+ with self.nested("waiting for TCP port {}".format(port)):
+ retry(port_is_open)
+
+ def wait_for_closed_port(self, port: int) -> None:
+ def port_is_closed(_: Any) -> bool:
+ status, _ = self.execute("nc -z localhost {}".format(port))
+ return status != 0
+
+ retry(port_is_closed)
+
+ def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
+ return self.systemctl("start {}".format(jobname), user)
+
+ def stop_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
+ return self.systemctl("stop {}".format(jobname), user)
+
+ def wait_for_job(self, jobname: str) -> None:
+ self.wait_for_unit(jobname)
+
+ def connect(self) -> None:
+ if self.connected:
+ return
+
+ with self.nested("waiting for the VM to finish booting"):
+ self.start()
+
+ tic = time.time()
+ self.shell.recv(1024)
+ # TODO: Timeout
+ toc = time.time()
+
+ self.log("connected to guest root shell")
+ self.log("(connecting took {:.2f} seconds)".format(toc - tic))
+ self.connected = True
+
+ def screenshot(self, filename: str) -> None:
+ out_dir = os.environ.get("out", os.getcwd())
+ word_pattern = re.compile(r"^\w+$")
+ if word_pattern.match(filename):
+ filename = os.path.join(out_dir, "{}.png".format(filename))
+ tmp = "{}.ppm".format(filename)
+
+ with self.nested(
+ "making screenshot {}".format(filename),
+ {"image": os.path.basename(filename)},
+ ):
+ self.send_monitor_command("screendump {}".format(tmp))
+ ret = subprocess.run("pnmtopng {} > {}".format(tmp, filename), shell=True)
+ os.unlink(tmp)
+ if ret.returncode != 0:
+ raise Exception("Cannot convert screenshot")
+
+ def copy_from_host_via_shell(self, source: str, target: str) -> None:
+ """Copy a file from the host into the guest by piping it over the
+ shell into the destination file. Works without host-guest shared folder.
+ Prefer copy_from_host for whenever possible.
+ """
+ with open(source, "rb") as fh:
+ content_b64 = base64.b64encode(fh.read()).decode()
+ self.succeed(
+ f"mkdir -p $(dirname {target})",
+ f"echo -n {content_b64} | base64 -d > {target}",
+ )
+
+ def copy_from_host(self, source: str, target: str) -> None:
+ """Copy a file from the host into the guest via the `shared_dir` shared
+ among all the VMs (using a temporary directory).
+ """
+ host_src = pathlib.Path(source)
+ vm_target = pathlib.Path(target)
+ with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
+ shared_temp = pathlib.Path(shared_td)
+ host_intermediate = shared_temp / host_src.name
+ vm_shared_temp = pathlib.Path("/tmp/shared") / shared_temp.name
+ vm_intermediate = vm_shared_temp / host_src.name
+
+ self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
+ if host_src.is_dir():
+ shutil.copytree(host_src, host_intermediate)
+ else:
+ shutil.copy(host_src, host_intermediate)
+ self.succeed("sync")
+ self.succeed(make_command(["mkdir", "-p", vm_target.parent]))
+ self.succeed(make_command(["cp", "-r", vm_intermediate, vm_target]))
+ # Make sure the cleanup is synced into VM
+ self.succeed("sync")
+
+ def copy_from_vm(self, source: str, target_dir: str = "") -> None:
+ """Copy a file from the VM (specified by an in-VM source path) to a path
+ relative to `$out`. The file is copied via the `shared_dir` shared among
+ all the VMs (using a temporary directory).
+ """
+ # Compute the source, target, and intermediate shared file names
+ out_dir = pathlib.Path(os.environ.get("out", os.getcwd()))
+ vm_src = pathlib.Path(source)
+ with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
+ shared_temp = pathlib.Path(shared_td)
+ vm_shared_temp = pathlib.Path("/tmp/shared") / shared_temp.name
+ vm_intermediate = vm_shared_temp / vm_src.name
+ intermediate = shared_temp / vm_src.name
+ # Copy the file to the shared directory inside VM
+ self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
+ self.succeed(make_command(["cp", "-r", vm_src, vm_intermediate]))
+ self.succeed("sync")
+ abs_target = out_dir / target_dir / vm_src.name
+ abs_target.parent.mkdir(exist_ok=True, parents=True)
+ # Copy the file from the shared directory outside VM
+ if intermediate.is_dir():
+ shutil.copytree(intermediate, abs_target)
+ else:
+ shutil.copy(intermediate, abs_target)
+ # Make sure the cleanup is synced into VM
+ self.succeed("sync")
+
+ def dump_tty_contents(self, tty: str) -> None:
+ """Debugging: Dump the contents of the TTY
+ """
+ self.execute("fold -w 80 /dev/vcs{} | systemd-cat".format(tty))
+
+ def get_screen_text(self) -> str:
+ if shutil.which("tesseract") is None:
+ raise Exception("get_screen_text used but enableOCR is false")
+
+ magick_args = (
+ "-filter Catrom -density 72 -resample 300 "
+ + "-contrast -normalize -despeckle -type grayscale "
+ + "-sharpen 1 -posterize 3 -negate -gamma 100 "
+ + "-blur 1x65535"
+ )
+
+ tess_args = "-c debug_file=/dev/null --psm 11 --oem 2"
+
+ with self.nested("performing optical character recognition"):
+ with tempfile.NamedTemporaryFile() as tmpin:
+ self.send_monitor_command("screendump {}".format(tmpin.name))
+
+ cmd = "convert {} {} tiff:- | tesseract - - {}".format(
+ magick_args, tmpin.name, tess_args
+ )
+ ret = subprocess.run(cmd, shell=True, capture_output=True)
+ if ret.returncode != 0:
+ raise Exception(
+ "OCR failed with exit code {}".format(ret.returncode)
+ )
+
+ return ret.stdout.decode("utf-8")
+
+ def wait_for_text(self, regex: str) -> None:
+ def screen_matches(last: bool) -> bool:
+ text = self.get_screen_text()
+ matches = re.search(regex, text) is not None
+
+ if last and not matches:
+ self.log("Last OCR attempt failed. Text was: {}".format(text))
+
+ return matches
+
+ with self.nested("waiting for {} to appear on screen".format(regex)):
+ retry(screen_matches)
+
+ def send_key(self, key: str) -> None:
+ key = CHAR_TO_KEY.get(key, key)
+ self.send_monitor_command("sendkey {}".format(key))
+
+ def start(self) -> None:
+ if self.booted:
+ return
+
+ self.log("starting vm")
+
+ def create_socket(path: str) -> socket.socket:
+ if os.path.exists(path):
+ os.unlink(path)
+ s = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
+ s.bind(path)
+ s.listen(1)
+ return s
+
+ monitor_path = os.path.join(self.state_dir, "monitor")
+ self.monitor_socket = create_socket(monitor_path)
+
+ shell_path = os.path.join(self.state_dir, "shell")
+ self.shell_socket = create_socket(shell_path)
+
+ qemu_options = (
+ " ".join(
+ [
+ "" if self.allow_reboot else "-no-reboot",
+ "-monitor unix:{}".format(monitor_path),
+ "-chardev socket,id=shell,path={}".format(shell_path),
+ "-device virtio-serial",
+ "-device virtconsole,chardev=shell",
+ "-device virtio-rng-pci",
+ "-serial stdio" if "DISPLAY" in os.environ else "-nographic",
+ ]
+ )
+ + " "
+ + os.environ.get("QEMU_OPTS", "")
+ )
+
+ environment = dict(os.environ)
+ environment.update(
+ {
+ "TMPDIR": self.state_dir,
+ "SHARED_DIR": self.shared_dir,
+ "USE_TMPDIR": "1",
+ "QEMU_OPTS": qemu_options,
+ }
+ )
+
+ self.process = subprocess.Popen(
+ self.script,
+ bufsize=1,
+ stdin=subprocess.DEVNULL,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ shell=True,
+ cwd=self.state_dir,
+ env=environment,
+ )
+ self.monitor, _ = self.monitor_socket.accept()
+ self.shell, _ = self.shell_socket.accept()
+
+ def process_serial_output() -> None:
+ assert self.process.stdout is not None
+ for _line in self.process.stdout:
+ # Ignore undecodable bytes that may occur in boot menus
+ line = _line.decode(errors="ignore").replace("\r", "").rstrip()
+ eprint("{} # {}".format(self.name, line))
+ self.logger.enqueue({"msg": line, "machine": self.name})
+ self.serialQueue.put(line)
+
+ _thread.start_new_thread(process_serial_output, ())
+
+ self.wait_for_monitor_prompt()
+
+ self.pid = self.process.pid
+ self.booted = True
+
+ self.log("QEMU running (pid {})".format(self.pid))
+
+ def shutdown(self) -> None:
+ if not self.booted:
+ return
+
+ self.shell.send("poweroff\n".encode())
+ self.wait_for_shutdown()
+
+ def crash(self) -> None:
+ if not self.booted:
+ return
+
+ self.log("forced crash")
+ self.send_monitor_command("quit")
+ self.wait_for_shutdown()
+
+ def wait_for_x(self) -> None:
+ """Wait until it is possible to connect to the X server. Note that
+ testing the existence of /tmp/.X11-unix/X0 is insufficient.
+ """
+
+ def check_x(_: Any) -> bool:
+ cmd = (
+ "journalctl -b SYSLOG_IDENTIFIER=systemd | "
+ + 'grep "Reached target Current graphical"'
+ )
+ status, _ = self.execute(cmd)
+ if status != 0:
+ return False
+ status, _ = self.execute("[ -e /tmp/.X11-unix/X0 ]")
+ return status == 0
+
+ with self.nested("waiting for the X11 server"):
+ retry(check_x)
+
+ def get_window_names(self) -> List[str]:
+ return self.succeed(
+ r"xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'"
+ ).splitlines()
+
+ def wait_for_window(self, regexp: str) -> None:
+ pattern = re.compile(regexp)
+
+ def window_is_visible(last_try: bool) -> bool:
+ names = self.get_window_names()
+ if last_try:
+ self.log(
+ "Last chance to match {} on the window list,".format(regexp)
+ + " which currently contains: "
+ + ", ".join(names)
+ )
+ return any(pattern.search(name) for name in names)
+
+ with self.nested("Waiting for a window to appear"):
+ retry(window_is_visible)
+
+ def sleep(self, secs: int) -> None:
+ time.sleep(secs)
+
+ def forward_port(self, host_port: int = 8080, guest_port: int = 80) -> None:
+ """Forward a TCP port on the host to a TCP port on the guest.
+ Useful during interactive testing.
+ """
+ self.send_monitor_command(
+ "hostfwd_add tcp::{}-:{}".format(host_port, guest_port)
+ )
+
+ def block(self) -> None:
+ """Make the machine unreachable by shutting down eth1 (the multicast
+ interface used to talk to the other VMs). We keep eth0 up so that
+ the test driver can continue to talk to the machine.
+ """
+ self.send_monitor_command("set_link virtio-net-pci.1 off")
+
+ def unblock(self) -> None:
+ """Make the machine reachable.
+ """
+ self.send_monitor_command("set_link virtio-net-pci.1 on")
+
+
+def create_machine(args: Dict[str, Any]) -> Machine:
+ global log
+ args["log"] = log
+ args["redirectSerial"] = os.environ.get("USE_SERIAL", "0") == "1"
+ return Machine(args)
+
+
+def start_all() -> None:
+ global machines
+ with log.nested("starting all VMs"):
+ for machine in machines:
+ machine.start()
+
+
+def join_all() -> None:
+ global machines
+ with log.nested("waiting for all VMs to finish"):
+ for machine in machines:
+ machine.wait_for_shutdown()
+
+
+def test_script() -> None:
+ exec(os.environ["testScript"])
+
+
+def run_tests() -> None:
+ global machines
+ tests = os.environ.get("tests", None)
+ if tests is not None:
+ with log.nested("running the VM test script"):
+ try:
+ exec(tests, globals())
+ except Exception as e:
+ eprint("error: {}".format(str(e)))
+ sys.exit(1)
+ else:
+ ptpython.repl.embed(locals(), globals())
+
+ # TODO: Collect coverage data
+
+ for machine in machines:
+ if machine.is_up():
+ machine.execute("sync")
+
+
+@contextmanager
+def subtest(name: str) -> Iterator[None]:
+ with log.nested(name):
+ try:
+ yield
+ return True
+ except Exception as e:
+ log.log(f'Test "{name}" failed with error: "{e}"')
+ raise e
+
+ return False
+
+
+if __name__ == "__main__":
+ arg_parser = argparse.ArgumentParser()
+ arg_parser.add_argument(
+ "-K",
+ "--keep-vm-state",
+ help="re-use a VM state coming from a previous run",
+ action="store_true",
+ )
+ (cli_args, vm_scripts) = arg_parser.parse_known_args()
+
+ log = Logger()
+
+ vlan_nrs = list(dict.fromkeys(os.environ.get("VLANS", "").split()))
+ vde_sockets = [create_vlan(v) for v in vlan_nrs]
+ for nr, vde_socket, _, _ in vde_sockets:
+ os.environ["QEMU_VDE_SOCKET_{}".format(nr)] = vde_socket
+
+ machines = [
+ create_machine({"startCommand": s, "keepVmState": cli_args.keep_vm_state})
+ for s in vm_scripts
+ ]
+ machine_eval = [
+ "{0} = machines[{1}]".format(m.name, idx) for idx, m in enumerate(machines)
+ ]
+ exec("\n".join(machine_eval))
+
+ @atexit.register
+ def clean_up() -> None:
+ with log.nested("cleaning up"):
+ for machine in machines:
+ if machine.pid is None:
+ continue
+ log.log("killing {} (pid {})".format(machine.name, machine.pid))
+ machine.process.kill()
+
+ log.close()
+
+ tic = time.time()
+ run_tests()
+ toc = time.time()
+ print("test script finished in {:.2f}s".format(toc - tic))
diff --git a/tests/lib/testing-python.nix b/tests/lib/testing-python.nix
new file mode 100644
index 0000000..45426ea
--- /dev/null
+++ b/tests/lib/testing-python.nix
@@ -0,0 +1,235 @@
+{ flake, system, localSystem, crossSystem, pkgs
+# Modules to add to each VM
+, extraConfigurations ? [ ] }:
+
+with import ./build-vms.nix {
+ inherit system localSystem crossSystem pkgs extraConfigurations;
+ modulesPath = "${flake.inputs.nixpkgs}/nixos/modules";
+};
+with pkgs.buildPackages.buildPackages;
+
+rec {
+
+ inherit pkgs;
+
+ testDriver = let testDriverScript = ./test-driver.py;
+ in stdenv.mkDerivation {
+ name = "nixos-test-driver";
+
+ nativeBuildInputs = [ makeWrapper ];
+ buildInputs = [ (python3.withPackages (p: [ p.ptpython ])) ];
+ checkInputs = with python3Packages; [ pylint mypy ];
+
+ dontUnpack = true;
+
+ preferLocalBuild = true;
+
+ doCheck = true;
+ checkPhase = ''
+ mypy --disallow-untyped-defs \
+ --no-implicit-optional \
+ --ignore-missing-imports ${testDriverScript}
+ pylint --errors-only ${testDriverScript}
+ '';
+
+ installPhase = ''
+ mkdir -p $out/bin
+ cp ${testDriverScript} $out/bin/nixos-test-driver
+ chmod u+x $out/bin/nixos-test-driver
+ # TODO: copy user script part into this file (append)
+
+ wrapProgram $out/bin/nixos-test-driver \
+ --prefix PATH : "${
+ lib.makeBinPath [ qemu_test vde2 netpbm coreutils ]
+ }" \
+ '';
+ };
+
+ # Run an automated test suite in the given virtual network.
+ # `driver' is the script that runs the network.
+ runTests = driver:
+ stdenv.mkDerivation {
+ name = "vm-test-run-${driver.testName}";
+
+ requiredSystemFeatures = [ "nixos-test" ];
+
+ buildCommand = ''
+ mkdir -p $out
+
+ LOGFILE=/dev/null tests='exec(os.environ["testScript"])' ${driver}/bin/nixos-test-driver
+ '';
+ };
+
+ defaultTestScript = ''
+ start_all()
+ machine.wait_until_serial_output('child "init" exited with exit value 0')
+ '';
+
+ makeTest = { testScript ? defaultTestScript, enableOCR ? false, name ?
+ "unnamed"
+ # Skip linting (mainly intended for faster dev cycles)
+ , skipLint ? false, ... }@t:
+
+ let
+ testDriverName = "genode-test-driver-${name}";
+
+ nodes = buildVirtualNetwork
+ (t.nodes or (if t ? machine then { machine = t.machine; } else { }));
+
+ testScript' =
+ # Call the test script with the computed nodes.
+ if lib.isFunction testScript then
+ testScript { inherit nodes; }
+ else
+ testScript;
+
+ vlans = map (m: m.config.virtualisation.vlans) (lib.attrValues nodes);
+
+ vms = map (m: m.config.system.build.vm) (lib.attrValues nodes);
+
+ ocrProg = tesseract4.override { enableLanguages = [ "eng" ]; };
+
+ imagemagick_tiff = imagemagick_light.override { inherit libtiff; };
+
+ # Generate convenience wrappers for running the test driver
+ # interactively with the specified network, and for starting the
+ # VMs from the command line.
+ driver =
+ let warn = if skipLint then lib.warn "Linting is disabled!" else lib.id;
+ in warn (runCommand testDriverName {
+ buildInputs = [ makeWrapper ];
+ testScript = testScript';
+ preferLocalBuild = true;
+ testName = name;
+ } ''
+ mkdir -p $out/bin
+
+ echo -n "$testScript" > $out/test-script
+
+ ln -s ${testDriver}/bin/nixos-test-driver $out/bin/
+ vms=($(for i in ${toString vms}; do echo $i/bin/run-*-vm; done))
+ wrapProgram $out/bin/nixos-test-driver \
+ --add-flags "''${vms[*]}" \
+ ${
+ lib.optionalString enableOCR
+ "--prefix PATH : '${ocrProg}/bin:${imagemagick_tiff}/bin'"
+ } \
+ --run "export testScript=\"\$(${coreutils}/bin/cat $out/test-script)\"" \
+ --set VLANS '${toString vlans}'
+ ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-run-vms
+ wrapProgram $out/bin/nixos-run-vms \
+ --add-flags "''${vms[*]}" \
+ ${lib.optionalString enableOCR "--prefix PATH : '${ocrProg}/bin'"} \
+ --set tests 'start_all(); join_all();' \
+ --set VLANS '${toString vlans}' \
+ ${
+ lib.optionalString (builtins.length vms == 1) "--set USE_SERIAL 1"
+ }
+ ''); # "
+
+ passMeta = drv:
+ drv
+ // lib.optionalAttrs (t ? meta) { meta = (drv.meta or { }) // t.meta; };
+
+ test = passMeta (runTests driver);
+
+ nodeNames = builtins.attrNames nodes;
+ invalidNodeNames =
+ lib.filter (node: builtins.match "^[A-z_]([A-z0-9_]+)?$" node == null)
+ nodeNames;
+
+ in if lib.length invalidNodeNames > 0 then
+ throw ''
+ Cannot create machines out of (${
+ lib.concatStringsSep ", " invalidNodeNames
+ })!
+ All machines are referenced as python variables in the testing framework which will break the
+ script when special characters are used.
+
+ Please stick to alphanumeric chars and underscores as separation.
+ ''
+ else
+ test // { inherit nodes driver test; };
+
+ runInMachine = { drv, machine, preBuild ? "", postBuild ? "", ... # ???
+ }:
+ let
+ vm = buildVM { } [
+ machine
+ {
+ key = "run-in-machine";
+ networking.hostName = "client";
+ nix.readOnlyStore = false;
+ virtualisation.writableStore = false;
+ }
+ ];
+
+ buildrunner = writeText "vm-build" ''
+ source $1
+
+ ${coreutils}/bin/mkdir -p $TMPDIR
+ cd $TMPDIR
+
+ exec $origBuilder $origArgs
+ '';
+
+ testScript = ''
+ start_all()
+ client.wait_for_unit("multi-user.target")
+ ${preBuild}
+ client.succeed("env -i ${bash}/bin/bash ${buildrunner} /tmp/xchg/saved-env >&2")
+ ${postBuild}
+ client.succeed("sync") # flush all data before pulling the plug
+ '';
+
+ vmRunCommand = writeText "vm-run" ''
+ xchg=vm-state-client/xchg
+ ${coreutils}/bin/mkdir $out
+ ${coreutils}/bin/mkdir -p $xchg
+
+ for i in $passAsFile; do
+ i2=''${i}Path
+ _basename=$(${coreutils}/bin/basename ''${!i2})
+ ${coreutils}/bin/cp ''${!i2} $xchg/$_basename
+ eval $i2=/tmp/xchg/$_basename
+ ${coreutils}/bin/ls -la $xchg
+ done
+
+ unset i i2 _basename
+ export | ${gnugrep}/bin/grep -v '^xchg=' > $xchg/saved-env
+ unset xchg
+
+ export tests='${testScript}'
+ ${testDriver}/bin/nixos-test-driver ${vm.config.system.build.vm}/bin/run-*-vm
+ ''; # */
+
+ in lib.overrideDerivation drv (attrs: {
+ requiredSystemFeatures = [ "kvm" ];
+ builder = "${bash}/bin/sh";
+ args = [ "-e" vmRunCommand ];
+ origArgs = attrs.args;
+ origBuilder = attrs.builder;
+ });
+
+ runInMachineWithX = { require ? [ ], ... }@args:
+ let
+ client = { ... }: {
+ inherit require;
+ imports = [ ../tests/common/auto.nix ];
+ virtualisation.memorySize = 1024;
+ services.xserver.enable = true;
+ test-support.displayManager.auto.enable = true;
+ services.xserver.displayManager.defaultSession = "none+icewm";
+ services.xserver.windowManager.icewm.enable = true;
+ };
+ in runInMachine ({
+ machine = client;
+ preBuild = ''
+ client.wait_for_x()
+ '';
+ } // args);
+
+ simpleTest = as: (makeTest as).test;
+
+}
+
diff --git a/tests/log.dhall b/tests/log.dhall
index f5bc194..26c2989 100644
--- a/tests/log.dhall
+++ b/tests/log.dhall
@@ -1,20 +1,15 @@
-let Test = ./test.dhall ? env:DHALL_GENODE_TEST
-
-let Genode = Test.Genode
+let Genode =
+ env:DHALL_GENODE
+ ? https://git.sr.ht/~ehmry/dhall-genode/blob/master/package.dhall
let Child = Genode.Init.Child
-in Test::{
- , children = toMap
- { test-log =
- Child.flat
- Child.Attributes::{
- , binary = "test-log"
- , exitPropagate = True
- , resources = Genode.Init.Resources::{
- , caps = 500
- , ram = Genode.units.MiB 10
- }
- }
+in Child.flat
+ Child.Attributes::{
+ , binary = "test-log"
+ , exitPropagate = True
+ , resources = Genode.Init.Resources::{
+ , caps = 500
+ , ram = Genode.units.MiB 10
}
- }
+ }
diff --git a/tests/log.nix b/tests/log.nix
index bd6f935..932a344 100644
--- a/tests/log.nix
+++ b/tests/log.nix
@@ -1,11 +1,10 @@
-{ pkgs, ... }:
-with pkgs;
-
{
name = "log";
- machine = {
- config = ./log.dhall;
- inputs = [ (pkgs.genodeSources.depot "test-log") ];
+ machine = { pkgs, ... }: {
+ genode.init.children.log = {
+ configFile = ./log.dhall;
+ inputs = [ pkgs.genodePackages.test-log ];
+ };
};
testScript = ''
start_all()
diff --git a/tests/solo5/multi.nix b/tests/solo5/multi.nix
new file mode 100644
index 0000000..f4c4220
--- /dev/null
+++ b/tests/solo5/multi.nix
@@ -0,0 +1,24 @@
+{
+ name = "solo5-multi";
+ machine = { pkgs, ... }: {
+ genode.init.children.tests = {
+ configFile = "${./.}/solo5.dhall { isAarch64 = ${
+ if pkgs.stdenv.hostPlatform.isAarch64 then "True" else "False"
+ } }";
+ inputs = with pkgs.genodePackages; [
+ solo5
+ solo5.tests
+ nic_bridge
+ nic_loopback
+ sequence
+ vfs_block
+ vfs_import
+ (genodeSources.make "app/ping")
+ ];
+ };
+ };
+ testScript = ''
+ start_all()
+ machine.wait_until_serial_output("child \"tests\" exited with exit value 0")
+ '';
+}
diff --git a/tests/solo5/solo5.dhall b/tests/solo5/solo5.dhall
index c206df2..a90ca21 100644
--- a/tests/solo5/solo5.dhall
+++ b/tests/solo5/solo5.dhall
@@ -1,6 +1,4 @@
-let Test = ../test.dhall ? env:DHALL_GENODE_TEST
-
-let Genode = Test.Genode
+let Genode = env:DHALL_GENODE
let Prelude = Genode.Prelude
@@ -28,9 +26,8 @@ let toSimple =
}
in λ(params : { isAarch64 : Bool }) →
- let tests
- : Prelude.Map.Type Text Child.Type
- = toMap
+ let children =
+ toMap
{ quiet = toSimple "quiet"
, hello = toSimple "hello"
, globals = toSimple "globals"
@@ -44,4 +41,12 @@ in λ(params : { isAarch64 : Bool }) →
else toMap { fpu = toSimple "fpu" }
)
- in Test::{ children = tests }
+ in Init.Child.nested
+ children
+ Init.Child.Attributes::{
+ , binary = "sequence"
+ , routes =
+ [ Init.ServiceRoute.parent "Timer"
+ , Init.ServiceRoute.parent "Rtc"
+ ]
+ }
diff --git a/tests/vmm_x86.dhall b/tests/vmm_x86.dhall
index e9cb586..b5938f0 100644
--- a/tests/vmm_x86.dhall
+++ b/tests/vmm_x86.dhall
@@ -1,20 +1,13 @@
-let Test = ./test.dhall ? env:DHALL_GENODE_TEST
-
-let Genode = Test.Genode
+let Genode = env:DHALL_GENODE
let Init = Genode.Init
let Child = Init.Child
-let vmm =
- Child.flat
- Child.Attributes::{
- , binary = "test-vmm_x86"
- , resources = Init.Resources::{
- , caps = 2048
- , ram = Genode.units.MiB 256
- }
- , routes = [ Genode.Init.ServiceRoute.parent "VM" ]
- }
-
-in Test::{ children = toMap { vmm } }
+in Child.flat
+ Child.Attributes::{
+ , binary = "test-vmm_x86"
+ , exitPropagate = True
+ , resources = Init.Resources::{ caps = 2048, ram = Genode.units.MiB 256 }
+ , routes = [ Genode.Init.ServiceRoute.parent "VM" ]
+ }
diff --git a/tests/vmm_x86.nix b/tests/vmm_x86.nix
index 405e7ee..6706cf7 100644
--- a/tests/vmm_x86.nix
+++ b/tests/vmm_x86.nix
@@ -1,12 +1,12 @@
-{ pkgs, ... }:
-
{
name = "vmm_x86";
constraints = specs:
with builtins;
all (f: any f specs) [ (spec: spec == "nova") (spec: spec == "x86") ];
- machine = {
- config = ./vmm_x86.dhall;
- inputs = map pkgs.genodeSources.make [ "test/vmm_x86" ];
+ machine = { pkgs, ... }: {
+ genode.init.children.vmm = {
+ configFile = ./vmm_x86.dhall;
+ inputs = map pkgs.genodeSources.make [ "test/vmm_x86" ];
+ };
};
}
diff --git a/tests/x86.dhall b/tests/x86.dhall
index 63cf1d6..868bee4 100644
--- a/tests/x86.dhall
+++ b/tests/x86.dhall
@@ -1,6 +1,4 @@
-let Test = ./test.dhall ? env:DHALL_GENODE_TEST
-
-let Genode = Test.Genode
+let Genode = env:DHALL_GENODE
let XML = Genode.Prelude.XML
@@ -14,27 +12,27 @@ let ServiceRoute = Init.ServiceRoute
let label = λ(_ : Text) → { local = _, route = _ } : Child.Attributes.Label
-let signal =
- Child.flat
- Child.Attributes::{
- , binary = "test-signal"
- , exitPropagate = True
- , priority = 5
- , resources = Init.Resources::{ caps = 500, ram = Genode.units.MiB 10 }
- }
-
-let rtc = Child.flat Child.Attributes::{ binary = "test-rtc" }
-
let pciInit =
Init::{
, verbose = True
- , routes = [ ServiceRoute.parent "Timer" ]
+ , routes = [ ServiceRoute.parent "Timer", ServiceRoute.parent "Rtc" ]
, children = toMap
- { test-pci =
+ { signal =
+ Child.flat
+ Child.Attributes::{
+ , binary = "test-signal"
+ , exitPropagate = True
+ , priority = 5
+ , resources = Init.Resources::{
+ , caps = 500
+ , ram = Genode.units.MiB 10
+ }
+ }
+ , rtc = Child.flat Child.Attributes::{ binary = "test-rtc" }
+ , test-pci =
Child.flat
Child.Attributes::{
, binary = "test-pci"
- , exitPropagate = True
, resources = Resources::{ ram = Genode.units.MiB 3 }
}
, acpi_drv =
@@ -87,10 +85,4 @@ let pciInit =
}
}
-in Test::{
- , children =
- [ { mapKey = "signal", mapValue = signal }
- , { mapKey = "rtc", mapValue = rtc }
- ]
- # Test.initToChildren pciInit
- }
+in pciInit
diff --git a/tests/x86.nix b/tests/x86.nix
index c4f4051..b51a431 100644
--- a/tests/x86.nix
+++ b/tests/x86.nix
@@ -1,13 +1,16 @@
-{ pkgs, ... }: {
+{
name = "x86";
constraints = builtins.any (spec: spec == "x86");
- machine = {
- config = ./x86.dhall;
- inputs = (map pkgs.genodeSources.depot [
- "acpi_drv"
- "platform_drv"
- "report_rom"
- "test-signal"
- ]) ++ (map pkgs.genodeSources.make [ "test/pci" "test/rtc" ]);
+ machine = { pkgs, ... }: {
+ genode.init.subinits.test = {
+ configFile = ./x86.dhall;
+ inputs = with pkgs.genodePackages;
+ [ acpi_drv platform_drv report_rom test-signal ]
+ ++ (map genodeSources.make [ "test/pci" "test/rtc" ]);
+ };
};
+ testScript = ''
+ start_all()
+ machine.wait_until_serial_output("child \"test\" exited with exit value 0")
+ '';
}