2
0
Fork 0

Port NixOS module system

Convert the tests to use the module system from NixOS.
This commit is contained in:
Emery Hemingway 2020-11-01 20:21:27 +01:00
parent 6889fb09c2
commit ebf3606705
29 changed files with 2891 additions and 947 deletions

View File

@ -65,9 +65,7 @@
forAllCrossSystems ({ system, localSystem, crossSystem }: forAllCrossSystems ({ system, localSystem, crossSystem }:
nixpkgs.lib // (import ./lib { nixpkgs.lib // (import ./lib {
inherit system localSystem crossSystem; inherit system localSystem crossSystem;
localPackages = nixpkgs.legacyPackages.${localSystem}; pkgs = self.legacyPackages.${system};
genodepkgs = self;
nixpkgs = nixpkgsFor.${system};
})); }));
legacyPackages = legacyPackages =
@ -139,15 +137,12 @@
checks = checks =
# Checks for continous testing # Checks for continous testing
with (forAllCrossSystems ({ system, localSystem, crossSystem }: let tests = import ./tests;
import ./tests { in with (forAllCrossSystems ({ system, localSystem, crossSystem }:
inherit self; tests {
apps = self.apps.${system}; flake = self;
localPackages = nixpkgsFor.${localSystem}; inherit system localSystem crossSystem;
genodepkgs = self.packages.${system}; pkgs = self.legacyPackages.${system};
lib = self.lib.${system};
nixpkgs = nixpkgsFor.${system};
legacyPackages = self.legacyPackages.${system};
} // { } // {
ports = nixpkgsFor.${localSystem}.symlinkJoin { ports = nixpkgsFor.${localSystem}.symlinkJoin {
name = "ports"; name = "ports";

View File

@ -1,22 +1,22 @@
{ system, localSystem, crossSystem, genodepkgs, nixpkgs, localPackages }: { system, localSystem, crossSystem, pkgs }:
let let
thisSystem = builtins.getAttr system; inherit (pkgs) buildPackages;
inherit (nixpkgs) buildPackages; localPackages = pkgs.buildPackages.buildPackages;
testPkgs = thisSystem genodepkgs.packages; inherit (pkgs.genodePackages) dhallGenode genodeSources;
dhallCachePrelude = '' dhallCachePrelude = ''
export XDG_CACHE_HOME=$NIX_BUILD_TOP export XDG_CACHE_HOME=$NIX_BUILD_TOP
export DHALL_GENODE="${testPkgs.dhallGenode}/binary.dhall"; export DHALL_GENODE="${dhallGenode}/binary.dhall";
${buildPackages.xorg.lndir}/bin/lndir -silent \ ${buildPackages.xorg.lndir}/bin/lndir -silent \
${testPkgs.dhallGenode}/.cache \ ${dhallGenode}/.cache \
$XDG_CACHE_HOME $XDG_CACHE_HOME
''; '';
in rec { in rec {
runDhallCommand = name: env: script: runDhallCommand = name: env: script:
nixpkgs.runCommand name (env // { pkgs.runCommand name (env // {
nativeBuildInputs = [ localPackages.dhall ] nativeBuildInputs = [ localPackages.dhall ]
++ env.nativeBuildInputs or [ ]; ++ env.nativeBuildInputs or [ ];
}) '' }) ''
@ -42,7 +42,7 @@ in rec {
hwImage = coreLinkAddr: bootstrapLinkAddr: basePkg: name: hwImage = coreLinkAddr: bootstrapLinkAddr: basePkg: name:
{ gzip ? false, ... }@env: { gzip ? false, ... }@env:
boot: boot:
nixpkgs.stdenv.mkDerivation { pkgs.stdenv.mkDerivation {
name = name + "-hw-image"; name = name + "-hw-image";
build = compileBoot name env boot; build = compileBoot name env boot;
nativeBuildInputs = [ localPackages.dhall ]; nativeBuildInputs = [ localPackages.dhall ];
@ -74,7 +74,7 @@ in rec {
LD="${buildPackages.binutils}/bin/${buildPackages.binutils.targetPrefix}ld" LD="${buildPackages.binutils}/bin/${buildPackages.binutils.targetPrefix}ld"
$LD \ $LD \
--strip-all \ --strip-all \
-T${testPkgs.genodeSources}/repos/base/src/ld/genode.ld \ -T${genodeSources}/repos/base/src/ld/genode.ld \
-z max-page-size=0x1000 \ -z max-page-size=0x1000 \
-Ttext=$link_address -gc-sections \ -Ttext=$link_address -gc-sections \
"$lib" "boot_modules.o" \ "$lib" "boot_modules.o" \
@ -96,13 +96,13 @@ in rec {
bootstrap/modules_asm \ bootstrap/modules_asm \
${bootstrapLinkAddr} \ ${bootstrapLinkAddr} \
$out/image.elf $out/image.elf
'' + nixpkgs.lib.optionalString gzip "gzip $out/image.elf"; '' + pkgs.lib.optionalString gzip "gzip $out/image.elf";
}; };
novaImage = name: novaImage = name:
{ gzip ? false, ... }@env: { gzip ? false, ... }@env:
boot: boot:
nixpkgs.stdenv.mkDerivation { pkgs.stdenv.mkDerivation {
name = name + "-nova-image"; name = name + "-nova-image";
build = compileBoot name env boot; build = compileBoot name env boot;
@ -115,17 +115,17 @@ in rec {
# link final image # link final image
LD="${buildPackages.binutils}/bin/${buildPackages.binutils.targetPrefix}ld" LD="${buildPackages.binutils}/bin/${buildPackages.binutils.targetPrefix}ld"
$LD --strip-all -nostdlib \ $LD --strip-all -nostdlib \
-T${testPkgs.genodeSources}/repos/base/src/ld/genode.ld \ -T${genodeSources}/repos/base/src/ld/genode.ld \
-T${testPkgs.genodeSources}/repos/base-nova/src/core/core-bss.ld \ -T${genodeSources}/repos/base-nova/src/core/core-bss.ld \
-z max-page-size=0x1000 \ -z max-page-size=0x1000 \
-Ttext=0x100000 -gc-sections \ -Ttext=0x100000 -gc-sections \
"${testPkgs.base-nova.coreObj}" boot_modules.o \ "${pkgs.genodePackages.base-nova.coreObj}" boot_modules.o \
-o $out/image.elf -o $out/image.elf
'' + nixpkgs.lib.optionalString gzip "gzip $out/image.elf"; '' + pkgs.lib.optionalString gzip "gzip $out/image.elf";
}; };
mergeManifests = inputs: mergeManifests = inputs:
nixpkgs.writeTextFile { pkgs.writeTextFile {
name = "manifest.dhall"; name = "manifest.dhall";
text = with builtins; text = with builtins;
let let
@ -133,7 +133,7 @@ in rec {
if hasAttr "manifest" input then if hasAttr "manifest" input then
'' ''
${head}, { mapKey = "${ ${head}, { mapKey = "${
nixpkgs.lib.getName input pkgs.lib.getName input
}", mapValue = ${input.manifest} }'' }", mapValue = ${input.manifest} }''
else else
abort "${input.pname} does not have a manifest"; abort "${input.pname} does not have a manifest";

View File

@ -0,0 +1,27 @@
{ config, pkgs, lib, ... }:
with lib;
let
localPackages = pkgs.buildPackages;
utils = import ../lib {
inherit (config.nixpkgs) system localSystem crossSystem;
inherit pkgs;
};
in {
genode.core = {
prefix = "hw-pc-";
supportedSystems = [ "x86_64-genode" ];
basePackages = with pkgs.genodePackages; [ base-hw-pc rtc_drv ];
};
genode.boot = {
initrd = "${config.genode.boot.image}/image.elf";
image = utils.hwImage "0xffffffc000000000" "0x00200000"
pkgs.genodePackages.base-hw-pc config.system.name { }
config.genode.boot.configFile;
};
}

View File

@ -0,0 +1,27 @@
{ config, pkgs, lib, ... }:
with lib;
let
localPackages = pkgs.buildPackages;
utils = import ../lib {
inherit (config.nixpkgs) system localSystem crossSystem;
inherit pkgs;
};
in {
genode.core = {
prefix = "hw-virt_qemu";
supportedSystems = [ "aarch64-genode" ];
basePackages = with pkgs.genodePackages; [ base-hw-virt_qemu rtc-dummy ];
};
genode.boot = {
initrd = "${config.genode.boot.image}/image.elf";
image = utils.hwImage "0xffffffc000000000" "0x40000000"
pkgs.genodePackages.base-hw-virt_qemu config.system.name { }
config.genode.boot.configFile;
};
}

View File

@ -0,0 +1,157 @@
{ config, pkgs, lib, modulesPath, ... }:
with lib;
let localPackages = pkgs.buildPackages;
in {
options.genode = {
core = {
prefix = mkOption {
type = types.str;
example = "hw-pc-";
};
supportedSystems = mkOption {
type = types.listOf types.str;
example = [ "i686-genode" "x86_64-genode" ];
};
basePackages = mkOption { type = types.listOf types.package; };
};
boot = {
kernel = mkOption {
type = types.path;
default = "${pkgs.genodePackages.bender}/bender";
};
initrd = mkOption {
type = types.str;
default = "${pkgs.genodePackages.bender}/bender";
description = "Path to an image or a command-line arguments";
};
configFile = mkOption {
type = types.path;
description = ''
Dhall boot configuration. See
https://git.sr.ht/~ehmry/dhall-genode/tree/master/Boot/package.dhall
'';
};
image = mkOption {
type = types.path;
description =
"Boot image containing the base component binaries and configuration.";
};
romModules = mkOption {
type = types.attrsOf types.path;
description = "Attr set of initial ROM modules";
};
};
};
config = let
initInputs = unique config.genode.init.inputs;
addManifest = drv:
drv // {
manifest =
localPackages.runCommand "${drv.name}.dhall" { inherit drv; } ''
set -eu
echo -n '[' >> $out
find $drv/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out
${if builtins.elem "lib" drv.outputs then
''
find ${drv.lib}/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out''
else
""}
echo -n ']' >> $out
'';
};
mergeManifests = inputs:
localPackages.writeTextFile {
name = "manifest.dhall";
text = with builtins;
let
f = head: input:
if hasAttr "manifest" input then
''
${head}, { mapKey = "${
lib.getName input
}", mapValue = ${input.manifest} }''
else
abort "${input.pname} does not have a manifest";
in (foldl' f "[" inputs) + "]";
};
in {
assertions = [{
assertion = builtins.any (s: s == config.nixpkgs.system)
config.genode.core.supportedSystems;
message = "invalid Genode core for this system";
}];
genode.boot.configFile = let
tarball =
"${config.system.build.tarball}/tarball/${config.system.build.tarball.fileName}.tar";
manifest = mergeManifests (map addManifest
(config.genode.core.basePackages ++ [ config.system.build.tarball ]
++ (with pkgs.genodePackages; [ init cached_fs_rom vfs ])));
in localPackages.runCommand "boot.dhall" { } ''
cat > $out << EOF
${./store-wrapper.dhall}
(${config.genode.init.configFile})
"${config.system.build.tarball.fileName}.tar"
$(stat --format '%s' ${tarball})
${config.system.build.storeManifest} ${manifest}
EOF
'';
system.build.storeManifest = mergeManifests (map addManifest initInputs);
# Create the tarball of the store to live in core ROM
system.build.tarball =
pkgs.callPackage "${modulesPath}/../lib/make-system-tarball.nix" {
contents = [ ];
storeContents = [
{
# assume that the init config will depend
# on every store path needed to boot
object = config.genode.init.configFile;
symlink = "/config.dhall";
}
{
object = pkgs.buildPackages.symlinkJoin {
name = config.system.name + ".rom";
paths = config.genode.init.inputs;
};
symlink = "/rom";
}
];
compressCommand = "cat";
compressionExtension = "";
};
system.build.initXml = pkgs.buildPackages.runCommand "init.xml" {
nativeBuildInputs = with pkgs.buildPackages; [ dhall xorg.lndir ];
DHALL_GENODE = "${pkgs.genodePackages.dhallGenode}/binary.dhall";
BOOT_CONFIG = config.genode.boot.configFile;
} ''
export XDG_CACHE_HOME=$NIX_BUILD_TOP
lndir -silent \
${pkgs.genodePackages.dhallGenode}/.cache \
$XDG_CACHE_HOME
dhall text <<< "(env:DHALL_GENODE).Init.render (env:BOOT_CONFIG).config" > $out
'';
};
}

View File

@ -0,0 +1,111 @@
{ config, pkgs, lib, ... }:
with lib;
let
inputs = mkOption {
description = "List of packages to build a ROM store with.";
type = types.listOf types.package;
};
in {
options.genode.init = {
inherit inputs;
configFile = mkOption {
description = ''
Dhall configuration of this init instance after children have been merged.
'';
type = types.path;
};
baseConfig = mkOption {
description =
"Dhall configuration of this init instance before merging children.";
type = types.str;
default = ''
let Genode = env:DHALL_GENODE
in Genode.Init::{
, routes =
[ Genode.Init.ServiceRoute.parent "File_system"
, Genode.Init.ServiceRoute.parent "Rtc"
, Genode.Init.ServiceRoute.parent "Timer"
, Genode.Init.ServiceRoute.parent "IRQ"
, Genode.Init.ServiceRoute.parent "IO_MEM"
, Genode.Init.ServiceRoute.parent "IO_PORT"
]
}
'';
};
children = mkOption {
default = { };
type = with types;
attrsOf (submodule {
options = {
inherit inputs;
configFile = mkOption {
type = types.path;
description = ''
Dhall configuration of child.
See https://git.sr.ht/~ehmry/dhall-genode/tree/master/Init/Child/Type
'';
};
};
});
};
subinits = mkOption {
default = { };
type = with types;
attrsOf (submodule {
options = {
inherit inputs;
configFile = mkOption {
type = types.path;
description = ''
Dhall configuration of child init.
See https://git.sr.ht/~ehmry/dhall-genode/tree/master/Init/Type
'';
};
};
});
};
};
config = {
genode.init.inputs = with builtins;
[ pkgs.genodePackages.report_rom ] ++ concatLists (catAttrs "inputs"
((attrValues config.genode.init.children)
++ (attrValues config.genode.init.subinits)));
# TODO: convert the subinits to children
genode.init.configFile = pkgs.writeText "init.dhall" ''
let Genode = env:DHALL_GENODE
let baseConfig = ${config.genode.init.baseConfig}
in baseConfig with children = baseConfig.children # toMap {${
concatMapStrings (name:
", `${name}` = (${
config.genode.init.children.${name}.configFile
} : Genode.Init.Child.Type)")
(builtins.attrNames config.genode.init.children)
} ${
concatMapStrings (name: ''
, `${name}` =
Genode.Init.toChild
(${
config.genode.init.subinits.${name}.configFile
} : Genode.Init.Type)
Genode.Init.Attributes.default
'') (builtins.attrNames config.genode.init.subinits)
} }
'';
};
}

195
nixos-modules/hardware.nix Normal file
View File

@ -0,0 +1,195 @@
{ config, pkgs, lib, ... }:
with lib;
{
options.networking.interfaces = lib.mkOption {
type = with types;
attrsOf (submodule ({ ... }: {
options.genode = {
driver = mkOption {
type = types.enum [ "ipxe" "virtio" ];
default = "ipxe";
};
stack = mkOption {
type = types.enum [ "lwip" "lxip" ];
default = "lwip";
};
};
}));
};
config.genode.init.children = let
inherit (builtins) toFile;
nics = mapAttrs' (name: interface:
let name' = "nic." + name;
in {
name = name';
value = {
inputs = with pkgs.genodePackages;
{
ipxe = [ ipxe_nic_drv ];
virtio = [ virtio_nic_drv ];
}.${interface.genode.driver};
configFile = toFile "${name'}.dhall" ''
let Genode = env:DHALL_GENODE
let Init = Genode.Init
in Init.Child.flat
Init.Child.Attributes::{
, binary = "virtio_pci_nic"
, provides = [ "Nic" ]
, resources = Init.Resources::{
, caps = 128
, ram = Genode.units.MiB 4
}
, routes = [ Init.ServiceRoute.parent "IO_MEM" ]
, config = Init.Config::{
, policies =
[ Init.Config.Policy::{
, service = "Nic"
, label =
Init.LabelSelector.prefix "sockets.${name}"
}
]
}
}
'';
};
}) config.networking.interfaces;
sockets = mapAttrs' (name: interface:
let name' = "sockets." + name;
in {
name = name';
value = {
inputs = with pkgs.genodePackages;
{
lwip = [ vfs_lwip ];
lxip = [ vfs_lixp ];
}.${interface.genode.stack};
configFile = let ipv4 = builtins.head interface.ipv4.addresses;
in toFile "${name'}.dhall" ''
let Genode = env:DHALL_GENODE
let Init = Genode.Init
in Init.Child.flat
Init.Child.Attributes::{
, binary = "vfs"
, provides = [ "File_system" ]
, resources = Init.Resources::{ caps = 128, ram = Genode.units.MiB 16 }
, config = Init.Config::{
, policies =
[ Init.Config.Policy::{
, service = "File_system"
, label = Init.LabelSelector.suffix "sockets"
, attributes = toMap { root = "/" }
}
]
, content =
let XML = Genode.Prelude.XML
in [ XML.element
{ name = "vfs"
, attributes = XML.emptyAttributes
, content =
[ XML.leaf
{ name = "lwip"
, attributes = toMap
{ ip_addr = "${ipv4.address}", netmask = "${
if ipv4.prefixLength == 24 then
"255.255.255.0"
else
throw
"missing prefix to netmask conversion"
}" }
}
]
}
]
}
}
'';
};
}) config.networking.interfaces;
in nics // sockets // {
platform_drv = {
inputs = [ pkgs.genodePackages.platform_drv ];
configFile = let
policies = concatMapStrings (name: ''
Init.Config.Policy::{
, service = "Platform"
, label = Init.LabelSelector.prefix "nic.${name}"
, content =
[ Genode.Prelude.XML.leaf
{ name = "pci", attributes = toMap { class = "ETHERNET" } }
]
}
'') (builtins.attrNames config.networking.interfaces);
in toFile "platform_drv.dhall" ''
let Genode = env:DHALL_GENODE
let Init = Genode.Init
let label = \(_ : Text) -> { local = _, route = _ }
in Init.Child.flat
Init.Child.Attributes::{
, binary = "platform_drv"
, resources = Init.Resources::{
, caps = 800
, ram = Genode.units.MiB 4
, constrainPhys = True
}
, reportRoms = [ label "acpi" ]
, provides = [ "Platform" ]
, routes =
[ Init.ServiceRoute.parent "IRQ"
, Init.ServiceRoute.parent "IO_MEM"
, Init.ServiceRoute.parent "IO_PORT"
]
, config = Init.Config::{
, policies = [ ${policies} ]
}
}
'';
};
acpi_drv = {
inputs = [ pkgs.genodePackages.acpi_drv ];
configFile = toFile "acpi_drv.dhall" ''
let Genode = env:DHALL_GENODE
let Init = Genode.Init
let label = \(_ : Text) -> { local = _, route = _ }
in Init.Child.flat
Init.Child.Attributes::{
, binary = "acpi_drv"
, resources = Init.Resources::{
, caps = 400
, ram = Genode.units.MiB 4
, constrainPhys = True
}
, romReports = [ label "acpi" ]
, routes =
[ Init.ServiceRoute.parent "IRQ"
, Init.ServiceRoute.parent "IO_MEM"
, Init.ServiceRoute.parent "IO_PORT"
]
}
'';
};
};
}

27
nixos-modules/nova.nix Normal file
View File

@ -0,0 +1,27 @@
{ config, pkgs, lib, ... }:
with lib;
let
localPackages = pkgs.buildPackages;
utils = import ../lib {
inherit (config.nixpkgs) system localSystem crossSystem;
inherit pkgs;
};
in {
genode.core = {
prefix = "nova-";
supportedSystems = [ "x86_64-genode" ];
basePackages = with pkgs.genodePackages; [ base-nova rtc_drv ];
};
genode.boot = {
initrd =
"'${pkgs.genodePackages.NOVA}/hypervisor-x86_64 arg=iommu novpid serial,${config.genode.boot.image}/image.elf'";
image =
utils.novaImage config.system.name { } config.genode.boot.configFile;
};
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,166 @@
let Genode =
env:DHALL_GENODE sha256:e90438be23b5100003cf018b783986df67bc6d0e3d35e800677d0d9109ff6aa9
let Prelude = Genode.Prelude
let XML = Prelude.XML
let Init = Genode.Init
let Child = Init.Child
let TextMapType = Prelude.Map.Type Text
let Manifest/Type = TextMapType (TextMapType Text)
let Manifest/toRoutes =
λ(manifest : Manifest/Type) →
Prelude.List.map
(Prelude.Map.Entry Text Text)
Init.ServiceRoute.Type
( λ(entry : Prelude.Map.Entry Text Text) →
{ service =
{ name = "ROM"
, label = Init.LabelSelector.Type.Last entry.mapKey
}
, route =
Init.Route.Type.Child
{ name = "store_rom"
, label = Some entry.mapValue
, diag = Some True
}
}
)
( Prelude.List.concat
(Prelude.Map.Entry Text Text)
(Prelude.Map.values Text (Prelude.Map.Type Text Text) manifest)
)
let parentROMs =
Prelude.List.map
Text
Init.ServiceRoute.Type
( λ(label : Text) →
{ service = { name = "ROM", label = Init.LabelSelector.last label }
, route =
Init.Route.Type.Parent { label = Some label, diag = None Bool }
}
)
let wrapStore
: Init.Type → Manifest/Type → Child.Type
= λ(init : Init.Type) →
λ(manifest : Manifest/Type) →
Init.toChild
init
Init.Attributes::{
, exitPropagate = True
, resources = Init.Resources::{ ram = Genode.units.MiB 4 }
, routes =
[ Init.ServiceRoute.parent "IO_MEM"
, Init.ServiceRoute.parent "IO_PORT"
, Init.ServiceRoute.parent "IRQ"
, Init.ServiceRoute.parent "VM"
, Init.ServiceRoute.child "Timer" "timer"
, Init.ServiceRoute.child "Rtc" "rtc"
]
# parentROMs
[ "ld.lib.so"
, "init"
, "platform_info"
, "core_log"
, "kernel_log"
, "vfs"
, "vfs.lib.so"
, "cached_fs_rom"
]
# Manifest/toRoutes manifest
# [ Init.ServiceRoute.child "ROM" "store_rom" ]
}
in λ(subinit : Init.Type) →
λ(storeName : Text) →
λ(storeSize : Natural) →
λ(storeManifest : Manifest/Type) →
λ(bootManifest : Manifest/Type) →
Genode.Boot::{
, config = Init::{
, children =
let child = Prelude.Map.keyValue Child.Type
in [ child
"timer"
( Child.flat
Child.Attributes::{
, binary = "timer_drv"
, provides = [ "Timer" ]
}
)
, child
"rtc"
( Child.flat
Child.Attributes::{
, binary = "rtc_drv"
, provides = [ "Rtc" ]
, routes = [ Init.ServiceRoute.parent "IO_PORT" ]
}
)
, child
"store_fs"
( Child.flat
Child.Attributes::{
, binary = "vfs"
, config = Init.Config::{
, content =
[ XML.element
{ name = "vfs"
, attributes = XML.emptyAttributes
, content =
[ XML.leaf
{ name = "tar"
, attributes = toMap { name = storeName }
}
]
}
]
, policies =
[ Init.Config.Policy::{
, service = "File_system"
, label = Init.LabelSelector.suffix "nix-store"
, attributes = toMap { root = "/nix/store" }
}
, Init.Config.Policy::{
, service = "File_system"
, label = Init.LabelSelector.prefix "store_rom"
, attributes = toMap { root = "/" }
}
]
}
, provides = [ "File_system" ]
}
)
, child
"store_rom"
( Child.flat
Child.Attributes::{
, binary = "cached_fs_rom"
, provides = [ "ROM" ]
, resources = Init.Resources::{
, ram = storeSize + Genode.units.MiB 1
}
}
)
, child "init" (wrapStore subinit storeManifest)
]
}
, rom =
Genode.BootModules.toRomPaths
( Prelude.List.concat
(Prelude.Map.Entry Text Text)
( Prelude.Map.values
Text
(Prelude.Map.Type Text Text)
bootManifest
)
)
}

View File

@ -0,0 +1,159 @@
let Genode = env:DHALL_GENODE
let Prelude = Genode.Prelude
let XML = Prelude.XML
let Init = Genode.Init
let Child = Init.Child
let parentRoutes =
Prelude.List.map Text Init.ServiceRoute.Type Init.ServiceRoute.parent
in λ(params : { coreutils : Text, execStart : Text }) →
Init::{
, verbose = True
, routes = parentRoutes [ "Timer", "Rtc", "File_system" ]
, children = toMap
{ vfs =
Child.flat
Child.Attributes::{
, binary = "vfs"
, exitPropagate = True
, provides = [ "File_system" ]
, resources = Genode.Init.Resources::{
, caps = 256
, ram = Genode.units.MiB 8
}
, config = Init.Config::{
, content =
[ XML.element
{ name = "vfs"
, attributes = XML.emptyAttributes
, content =
let dir =
λ(name : Text) →
λ(content : List XML.Type) →
XML.element
{ name = "dir"
, content
, attributes = toMap { name }
}
let leaf =
λ(name : Text) →
XML.leaf
{ name, attributes = XML.emptyAttributes }
in [ dir
"dev"
[ dir "pipes" [ leaf "pipe" ]
, dir
"sockets"
[ XML.leaf
{ name = "fs"
, attributes = toMap
{ label = "sockets" }
}
]
, leaf "log"
, leaf "null"
, leaf "rtc"
, leaf "zero"
]
, dir
"etc"
[ XML.element
{ name = "inline"
, attributes = toMap
{ name = "ExecStart" }
, content =
[ XML.text params.execStart ]
}
]
, dir
"usr"
[ dir
"bin"
[ XML.leaf
{ name = "symlink"
, attributes = toMap
{ name = "env"
, target =
"${params.coreutils}/bin/env"
}
}
]
]
, dir "tmp" [ leaf "ram" ]
, dir
"nix"
[ dir
"store"
[ XML.leaf
{ name = "fs"
, attributes = toMap
{ label = "nix-store" }
}
]
]
]
}
]
, policies =
[ Init.Config.Policy::{
, service = "File_system"
, label = Init.LabelSelector.prefix "shell"
, attributes = toMap { root = "/", writeable = "yes" }
}
]
}
}
, shell =
Child.flat
Child.Attributes::{
, binary = "bash"
, exitPropagate = True
, resources = Genode.Init.Resources::{
, caps = 256
, ram = Genode.units.MiB 8
}
, config = Genode.Init.Config::{
, content =
[ XML.leaf
{ name = "libc"
, attributes = toMap
{ stdin = "/dev/null"
, stdout = "/dev/log"
, stderr = "/dev/log"
, pipe = "/dev/pipes"
, rtc = "/dev/rtc"
, socket = "/dev/sockets"
}
}
, XML.element
{ name = "vfs"
, attributes = XML.emptyAttributes
, content =
[ XML.leaf
{ name = "fs"
, attributes = XML.emptyAttributes
}
]
}
]
# Prelude.List.map
Text
XML.Type
( λ(x : Text) →
XML.leaf
{ name = "arg"
, attributes = toMap { value = x }
}
)
[ "bash", "/etc/ExecStart" ]
}
}
}
}

33
nixos-modules/systemd.nix Normal file
View File

@ -0,0 +1,33 @@
{ config, pkgs, lib, ... }:
with lib; {
options.systemd.services = lib.mkOption {
type = types.attrsOf (types.submodule ({ name, config, ... }: {
options.genode.enable = lib.mkOption {
type = types.bool;
default = false;
description = "Translate this systemd unit to a Genode subsystem.";
};
}));
};
config.services.klogd.enable = false;
# The default is determined by checking the Linux version
# which cannot be evaluated here.
config.genode.init.subinits = mapAttrs' (name: service:
let name' = "services." + name;
in {
name = name';
value = {
inputs = with pkgs; with genodePackages; [ bash libc posix vfs_pipe ];
configFile = pkgs.writeText "${name'}.dhall" ''
${./systemd-runner.dhall} {
, coreutils = "${pkgs.coreutils}"
, execStart = "${toString service.serviceConfig.ExecStart}"
}
'';
};
}) (filterAttrs (name: service: service.genode.enable)
config.systemd.services);
}

View File

@ -1,5 +1,7 @@
# This file contains overrides necesarry to build some Make and Depot targets. # This file contains overrides necessary to build some Make and Depot targets.
# Many targets can be built with the default attributes, and are not listed here. # Many targets can be built with the default attributes, and are not listed here.
# However, any package listed here with empty overrides ({ }) will be added to
# the package attributes of this flake.
{ buildPackages, ports }: { buildPackages, ports }:
with ports; with ports;
@ -15,6 +17,8 @@ let
}; };
in { in {
acpi_drv = { };
cached_fs_rom = { }; cached_fs_rom = { };
fb_sdl = with buildPackages; { fb_sdl = with buildPackages; {
@ -47,10 +51,20 @@ in {
lx_block.HOST_INC_DIR = [ hostLibcInc ]; lx_block.HOST_INC_DIR = [ hostLibcInc ];
nic_bridge = { };
nic_loopback = { };
noux.portInputs = [ libc ]; noux.portInputs = [ libc ];
platform_drv = { };
posix.portInputs = [ libc ]; posix.portInputs = [ libc ];
report_rom = { };
rom_logger = { };
rtc_drv.meta.platforms = [ "x86_64-genode" ]; rtc_drv.meta.platforms = [ "x86_64-genode" ];
rump = { rump = {
@ -58,6 +72,8 @@ in {
buildInputs = with buildPackages; [ zlib ]; buildInputs = with buildPackages; [ zlib ];
}; };
sequence = { };
stdcxx.portInputs = [ libc stdcxx ]; stdcxx.portInputs = [ libc stdcxx ];
# The following are tests are patched to exit at completion # The following are tests are patched to exit at completion
@ -79,12 +95,17 @@ in {
vesa_drv.portInputs = [ libc x86emu ]; vesa_drv.portInputs = [ libc x86emu ];
vfs.outputs = [ "out" "lib" ]; vfs.outputs = [ "out" "lib" ];
vfs_audit = {};
vfs_block = { }; vfs_block = { };
vfs_import.patches = [ ./vfs_import.patch ]; vfs_import.patches = [ ./vfs_import.patch ];
vfs_jitterentropy.portInputs = [ jitterentropy libc ]; vfs_jitterentropy.portInputs = [ jitterentropy libc ];
vfs_lwip.portInputs = [ lwip ]; vfs_lwip.portInputs = [ lwip ];
vfs_pipe = { };
vfs_ttf.portInputs = [ libc stb ]; vfs_ttf.portInputs = [ libc stb ];
virtdev_rom = { };
virtio_nic_drv = { };
wifi_drv.portInputs = [ dde_linux libc openssl ]; wifi_drv.portInputs = [ dde_linux libc openssl ];
} }

View File

@ -8,3 +8,36 @@ index c6d9e2012b..050de6136c 100644
log("--- Platform test finished ---"); log("--- Platform test finished ---");
+ env.parent().exit(0); + env.parent().exit(0);
} }
commit 03a5f469313e9fdc9ee1135ebf0b167e4d3d3266
Author: Emery Hemingway <ehmry@posteo.net>
Date: Wed Oct 21 15:16:34 2020 +0200
test-pci: recognize VirtIO vendor IDs
diff --git a/repos/os/src/test/pci/test.cc b/repos/os/src/test/pci/test.cc
index c6d9e2012b..9cc2a2ac4b 100644
--- a/repos/os/src/test/pci/test.cc
+++ b/repos/os/src/test/pci/test.cc
@@ -19,7 +19,10 @@
using namespace Genode;
-enum { INTEL_VENDOR_ID = 0x8086 };
+enum {
+ INTEL_VENDOR_ID = 0x8086,
+ VIRTIO_VENDOR_ID = 0x1af4,
+};
/**
@@ -45,7 +48,9 @@ static void print_device_info(Platform::Device_capability device_cap)
Hex(fun, Hex::OMIT_PREFIX), " "
"class=", Hex(class_code), " "
"vendor=", Hex(vendor_id), " ",
- (vendor_id == INTEL_VENDOR_ID ? "(Intel)" : "(unknown)"),
+ (vendor_id == INTEL_VENDOR_ID ? "(Intel)" :
+ vendor_id == VIRTIO_VENDOR_ID ? "(VirtIO)" :
+ "(unknown)"),
" device=", Hex(device_id));
for (int resource_id = 0; resource_id < 6; resource_id++) {

View File

@ -1,324 +1,63 @@
{ self, apps, localPackages, genodepkgs, lib, nixpkgs, legacyPackages }: { flake, system, localSystem, crossSystem, pkgs }:
let let
lib = flake.lib.${system};
nixpkgs = flake.legacyPackages.${system};
legacyPackages = flake.legacyPackages.${system};
callTest = path: testingPython = import ./lib/testing-python.nix;
import path {
pkgs = testPkgs;
inherit nixpkgs localPackages legacyPackages;
};
testFiles = testSpecs = map (p: import p) [
map callTest [ ./log.nix ./posix.nix ./vmm_arm.nix ./vmm_x86.nix ./x86.nix ] ./hello.nix
++ (callTest ./solo5); ./log.nix
./solo5/multi.nix
testPkgs = genodepkgs; ./vmm_x86.nix
./x86.nix
qemu' = localPackages.qemu; ];
qemuBinary = qemuPkg:
{
aarch64-genode = "${qemuPkg}/bin/qemu-system-aarch64";
x86_64-genode = "${qemuPkg}/bin/qemu-system-x86_64";
}.${genodepkgs.stdenv.hostPlatform.system};
cores = [ cores = [
{ {
prefix = "hw-pc-"; prefix = "hw-pc-";
testingPython = testingPython {
inherit flake system localSystem crossSystem pkgs;
extraConfigurations = [ ../nixos-modules/base-hw-pc.nix ];
};
specs = [ "x86" "hw" ]; specs = [ "x86" "hw" ];
platforms = [ "x86_64-genode" ]; platforms = [ "x86_64-genode" ];
basePackages = [ testPkgs.base-hw-pc ]
++ map testPkgs.genodeSources.depot [ "rtc_drv" ];
makeImage =
lib.hwImage "0xffffffc000000000" "0x00200000" testPkgs.base-hw-pc;
startVM = vmName: image: ''
#! ${localPackages.runtimeShell}
exec ${qemuBinary qemu'} \
-name ${vmName} \
-machine q35 \
-m 384 \
-netdev user,id=net0 \
-device virtio-net-pci,netdev=net0 \
-kernel "${testPkgs.bender}/bender" \
-initrd "${image}/image.elf" \
$QEMU_OPTS \
"$@"
'';
}
{
prefix = "hw-virt_qemu-";
specs = [ "aarch64" "hw" ];
platforms = [ "aarch64-genode" ];
basePackages = with testPkgs; [ base-hw-virt_qemu rtc-dummy ];
makeImage = lib.hwImage "0xffffffc000000000" "0x40000000"
testPkgs.base-hw-virt_qemu;
startVM = vmName: image: ''
#! ${localPackages.runtimeShell}
exec ${qemuBinary qemu'} \
-name ${vmName} \
-M virt,virtualization=true,gic_version=3 \
-cpu cortex-a53 \
-smp 4 \
-m 384 \
-kernel "${image}/image.elf" \
$QEMU_OPTS \
"$@"
'';
} }
/* {
prefix = "hw-virt_qemu-";
testingPython = testingPython {
inherit flake system localSystem crossSystem pkgs;
extraConfigurations = [ ../nixos-modules/base-hw-virt_qemu.nix ];
};
specs = [ "aarch64" "hw" ];
platforms = [ "aarch64-genode" ];
}
*/
{ {
prefix = "nova-"; prefix = "nova-";
testingPython = testingPython {
inherit flake system localSystem crossSystem pkgs;
extraConfigurations = [ ../nixos-modules/nova.nix ];
};
specs = [ "x86" "nova" ]; specs = [ "x86" "nova" ];
platforms = [ "x86_64-genode" ]; platforms = [ "x86_64-genode" ];
basePackages = [ testPkgs.base-nova ]
++ map testPkgs.genodeSources.depot [ "rtc_drv" ];
makeImage = lib.novaImage;
startVM = vmName: image: ''
#! ${localPackages.runtimeShell}
exec ${qemuBinary qemu'} \
-name ${vmName} \
-machine q35 \
-m 384 \
-kernel "${testPkgs.bender}/bender" \
-initrd "${testPkgs.NOVA}/hypervisor-x86_64 arg=iommu novpid serial,${image}/image.elf" \
$QEMU_OPTS \
"$@"
'';
} }
]; ];
cores' = builtins.filter (core: cores' = builtins.filter (core:
builtins.any (x: x == genodepkgs.stdenv.hostPlatform.system) core.platforms) builtins.any (x: x == pkgs.stdenv.hostPlatform.system) core.platforms)
cores; cores;
testDriver = with localPackages;
let testDriverScript = ./test-driver/test-driver.py;
in stdenv.mkDerivation {
name = "nixos-test-driver";
nativeBuildInputs = [ makeWrapper ];
buildInputs = [ (python3.withPackages (p: [ p.ptpython ])) ];
checkInputs = with python3Packages; [ pylint mypy ];
dontUnpack = true;
preferLocalBuild = true;
doCheck = true;
checkPhase = ''
mypy --disallow-untyped-defs \
--no-implicit-optional \
--ignore-missing-imports ${testDriverScript}
pylint --errors-only ${testDriverScript}
'';
installPhase = ''
mkdir -p $out/bin
cp ${testDriverScript} $out/bin/nixos-test-driver
chmod u+x $out/bin/nixos-test-driver
# TODO: copy user script part into this file (append)
wrapProgram $out/bin/nixos-test-driver \
--prefix PATH : "${lib.makeBinPath [ qemu' coreutils ]}" \
'';
};
defaultTestScript = ''
start_all()
machine.wait_until_serial_output('child "init" exited with exit value 0')
'';
makeTest = with localPackages;
{ prefix, specs, platforms, basePackages, makeImage, startVM }:
{ name ? "unnamed", testScript ? defaultTestScript,
# Skip linting (mainly intended for faster dev cycles)
skipLint ? false, ... }@t:
let
testDriverName = "genode-test-driver-${name}";
buildVM = vmName:
{ config, inputs, env ? { }, extraPaths ? [ ] }:
let
storeTarball = localPackages.runCommand "store" { } ''
mkdir -p $out
tar cf "$out/store.tar" --absolute-names ${toString inputs} ${
toString extraPaths
}
'';
addManifest = drv:
drv // {
manifest =
nixpkgs.runCommand "${drv.name}.dhall" { inherit drv; } ''
set -eu
echo -n '[' >> $out
find $drv/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out
${if builtins.elem "lib" drv.outputs then
''
find ${drv.lib}/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out''
else
""}
echo -n ']' >> $out
'';
};
storeManifest = lib.mergeManifests (map addManifest inputs);
manifest = lib.mergeManifests (map addManifest (basePackages
++ [ testPkgs.sotest-producer storeTarball ]
++ map testPkgs.genodeSources.depot [
"init"
"vfs"
"cached_fs_rom"
]));
config' = "${
./test-wrapper.dhall
} (${config}) $(stat --format '%s' ${storeTarball}/store.tar) ${storeManifest} ${manifest}";
env' = {
DHALL_GENODE = "${testPkgs.dhallGenode}/source.dhall";
DHALL_GENODE_TEST = "${./test.dhall}";
} // env;
image = makeImage vmName env' config';
startVM' = startVM vmName image;
in {
script = localPackages.writeScriptBin "run-${vmName}-vm" startVM';
config = lib.runDhallCommand (name + ".dhall") env' ''
${apps.dhall.program} <<< "${config'}" > $out
'';
store = storeTarball;
xml = lib.runDhallCommand (name + ".config") env'
''${apps.render-init.program} <<< "(${config'}).config" > $out'';
};
nodes = lib.mapAttrs buildVM
(t.nodes or (if t ? machine then { machine = t.machine; } else { }));
testScript' =
# Call the test script with the computed nodes.
if lib.isFunction testScript then
testScript { inherit nodes; }
else
testScript;
vms = map (node: node.script) (lib.attrValues nodes);
# Generate onvenience wrappers for running the test driver
# interactively with the specified network, and for starting the
# VMs from the command line.
driver =
let warn = if skipLint then lib.warn "Linting is disabled!" else lib.id;
in warn (runCommand testDriverName {
buildInputs = [ makeWrapper ];
testScript = testScript';
preferLocalBuild = true;
testName = name;
} ''
mkdir -p $out/bin
echo -n "$testScript" > $out/test-script
${lib.optionalString (!skipLint) ''
${python3Packages.black}/bin/black --check --quiet --diff $out/test-script
''}
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/
vms=($(for i in ${toString vms}; do echo $i/bin/run-*-vm; done))
wrapProgram $out/bin/nixos-test-driver \
--add-flags "''${vms[*]}" \
--run "export testScript=\"\$(${coreutils}/bin/cat $out/test-script)\""
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-run-vms
wrapProgram $out/bin/nixos-run-vms \
--add-flags "''${vms[*]}" \
--set tests 'start_all(); join_all();'
''); # "
passMeta = drv:
drv
// lib.optionalAttrs (t ? meta) { meta = (drv.meta or { }) // t.meta; };
# Run an automated test suite in the given virtual network.
# `driver' is the script that runs the network.
runTests = driver:
stdenv.mkDerivation {
name = "test-run-${driver.testName}";
buildCommand = ''
mkdir -p $out
LOGFILE=/dev/null tests='exec(os.environ["testScript"])' ${driver}/bin/nixos-test-driver
'';
};
test = passMeta (runTests driver);
nodeNames = builtins.attrNames nodes;
invalidNodeNames =
lib.filter (node: builtins.match "^[A-z_]([A-z0-9_]+)?$" node == null)
nodeNames;
in if lib.length invalidNodeNames > 0 then
throw ''
Cannot create machines out of (${
lib.concatStringsSep ", " invalidNodeNames
})!
All machines are referenced as python variables in the testing framework which will break the
script when special characters are used.
Please stick to alphanumeric chars and underscores as separation.
''
else
test // { inherit nodes driver test; };
testList = let testList = let
f = core: f = core: test:
let makeTest' = makeTest core;
in test:
if (test.constraints or (_: true)) core.specs then { if (test.constraints or (_: true)) core.specs then {
name = core.prefix + test.name; name = core.prefix + test.name;
value = makeTest' test; value = core.testingPython.makeTest test;
} else } else
null; null;
in lib.lists.crossLists f [ cores' testFiles ]; in lib.lists.crossLists f [ cores' testSpecs ];
in builtins.listToAttrs (builtins.filter (_: _ != null) testList) in builtins.listToAttrs (builtins.filter (_: _ != null) testList)
/* sotest = let
hwTests = with hw; [ multi posix x86 ];
novaTests = with nova; [ multi posix x86 vmm ];
allTests = hwTests ++ novaTests;
projectCfg.boot_items =
(map (test: {
inherit (test) name;
exec = "bender";
load = [ "${test.name}.image.elf" ];
}) hwTests)
++ (map (test: {
inherit (test) name;
exec = "bender";
load = [ "hypervisor serial novga iommu" test.image.name ];
}) novaTests);
in localPackages.stdenv.mkDerivation {
name = "sotest";
buildCommand = ''
mkdir zip; cd zip
cp "${testPkgs.bender}/bender" bender
cp "${testPkgs.NOVA}/hypervisor-x86_64" hypervisor
${concatStringsSep "\n"
(map (test: "cp ${test.image}/image.elf ${test.name}.image.elf")
allTests)}
mkdir -p $out/nix-support
${localPackages.zip}/bin/zip "$out/binaries.zip" *
cat << EOF > "$out/project.json"
${builtins.toJSON projectCfg}
EOF
echo file sotest-binaries $out/binaries.zip >> "$out/nix-support/hydra-build-products"
echo file sotest-config $out/project.json >> "$out/nix-support/hydra-build-products"
'';
};
*/

51
tests/hello.dhall Normal file
View File

@ -0,0 +1,51 @@
let Genode =
env:DHALL_GENODE
? https://git.sr.ht/~ehmry/dhall-genode/blob/master/package.dhall
let Init = Genode.Init
let Child = Init.Child
in Child.flat
Child.Attributes::{
, binary = "hello"
, exitPropagate = True
, resources = Genode.Init.Resources::{
, caps = 500
, ram = Genode.units.MiB 10
}
, config = Init.Config::{
, content =
let XML = Genode.Prelude.XML
in [ XML.leaf
{ name = "libc"
, attributes = toMap
{ stdin = "/dev/null"
, stdout = "/dev/log"
, stderr = "/dev/log"
}
}
, XML.element
{ name = "vfs"
, attributes = XML.emptyAttributes
, content =
let dir =
λ(name : Text) →
λ(content : List XML.Type) →
XML.element
{ name = "dir"
, content
, attributes = toMap { name }
}
let leaf =
λ(name : Text) →
XML.leaf
{ name, attributes = XML.emptyAttributes }
in [ dir "dev" [ leaf "log", leaf "null" ] ]
}
]
}
}

28
tests/hello.nix Normal file
View File

@ -0,0 +1,28 @@
{
name = "hello";
machine = { pkgs, ... }:
let
hello = pkgs.stdenv.mkDerivation {
name = "hello";
dontUnpack = true;
buildPhase = ''
cat > hello.c << EOF
#include <stdio.h>
int main(int argc, char **argv) { printf("hello world!\n"); return 0; }
EOF
$CC hello.c -o hello
'';
installPhase = "install -Dt $out/bin hello";
};
in {
genode.init.children.hello = {
configFile = ./hello.dhall;
inputs = [ hello ];
};
};
testScript = ''
start_all()
machine.wait_until_serial_output("child \"init\" exited with exit value 0")
'';
}

110
tests/lib/build-vms.nix Normal file
View File

@ -0,0 +1,110 @@
{ system, localSystem, crossSystem
# Nixpkgs, for qemu, lib and more
, pkgs, modulesPath
# NixOS configuration to add to the VMs
, extraConfigurations ? [ ] }:
with pkgs.lib;
with import ./qemu-flags.nix { inherit pkgs; };
rec {
inherit pkgs;
qemu = pkgs.buildPackages.buildPackages.qemu_test;
# Build a virtual network from an attribute set `{ machine1 =
# config1; ... machineN = configN; }', where `machineX' is the
# hostname and `configX' is a NixOS system configuration. Each
# machine is given an arbitrary IP address in the virtual network.
buildVirtualNetwork = nodes:
let nodesOut = mapAttrs (n: buildVM nodesOut) (assignIPAddresses nodes);
in nodesOut;
buildVM = nodes: configurations:
import "${modulesPath}/../lib/eval-config.nix" {
inherit system;
modules = configurations ++ extraConfigurations;
baseModules = (import "${modulesPath}/module-list.nix") ++ [
../../nixos-modules/genode-core.nix
../../nixos-modules/genode-init.nix
../../nixos-modules/qemu-vm.nix
{
key = "no-manual";
documentation.nixos.enable = false;
}
{
key = "qemu";
system.build.qemu = qemu;
}
{
key = "nodes";
_module.args.nodes = nodes;
}
{
system.build.qemu = qemu;
nixpkgs = { inherit system crossSystem localSystem pkgs; };
}
];
};
# Given an attribute set { machine1 = config1; ... machineN =
# configN; }, sequentially assign IP addresses in the 192.168.1.0/24
# range to each machine, and set the hostname to the attribute name.
assignIPAddresses = nodes:
let
machines = attrNames nodes;
machinesNumbered = zipLists machines (range 1 254);
nodes_ = forEach machinesNumbered (m:
nameValuePair m.fst [
({ config, nodes, ... }:
let
interfacesNumbered =
zipLists config.virtualisation.vlans (range 1 255);
interfaces = forEach interfacesNumbered ({ fst, snd }:
nameValuePair "eth${toString snd}" {
ipv4.addresses = [{
address = "192.168.${toString fst}.${toString m.snd}";
prefixLength = 24;
}];
});
in {
key = "ip-address";
config = {
networking.hostName = mkDefault m.fst;
networking.interfaces = listToAttrs interfaces;
networking.primaryIPAddress = optionalString (interfaces != [ ])
(head (head interfaces).value.ipv4.addresses).address;
# Put the IP addresses of all VMs in this machine's
# /etc/hosts file. If a machine has multiple
# interfaces, use the IP address corresponding to
# the first interface (i.e. the first network in its
# virtualisation.vlans option).
networking.extraHosts = flip concatMapStrings machines (m':
let config = (getAttr m' nodes).config;
in optionalString (config.networking.primaryIPAddress != "")
("${config.networking.primaryIPAddress} "
+ optionalString (config.networking.domain != null)
"${config.networking.hostName}.${config.networking.domain} "
+ ''
${config.networking.hostName}
''));
virtualisation.qemu.options = forEach interfacesNumbered
({ fst, snd }: qemuNICFlags snd fst m.snd);
};
})
(getAttr m.fst nodes)
]);
in listToAttrs nodes_;
}

41
tests/lib/qemu-flags.nix Normal file
View File

@ -0,0 +1,41 @@
# QEMU flags shared between various Nix expressions.
{ pkgs }:
let
zeroPad = n:
pkgs.lib.optionalString (n < 16) "0" + (if n > 255 then
throw "Can't have more than 255 nets or nodes!"
else
pkgs.lib.toHexString n);
in rec {
qemuNicMac = net: machine: "52:54:00:12:${zeroPad net}:${zeroPad machine}";
qemuNICFlags = nic: net: machine: [
"-device virtio-net-pci,netdev=vlan${toString nic},mac=${
qemuNicMac net machine
}"
"-netdev vde,id=vlan${toString nic},sock=$QEMU_VDE_SOCKET_${toString net}"
];
qemuSerialDevice = if pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64 then
"ttyS0"
else if pkgs.stdenv.isAarch32 || pkgs.stdenv.isAarch64 then
"ttyAMA0"
else
throw
"Unknown QEMU serial device for system '${pkgs.stdenv.hostPlatform.system}'";
qemuBinary = qemuPkg:
{
x86_64-linux = "${qemuPkg}/bin/qemu-kvm -cpu max";
armv7l-linux =
"${qemuPkg}/bin/qemu-system-arm -enable-kvm -machine virt -cpu host";
aarch64-linux =
"${qemuPkg}/bin/qemu-system-aarch64 -enable-kvm -machine virt,gic-version=host -cpu host";
x86_64-darwin = "${qemuPkg}/bin/qemu-kvm -cpu max";
aarch64-genode =
"${qemuPkg}/bin/qemu-system-aarch64 -M virt,virtualization=true,gic_version=3 -cpu cortex-a53";
x86_64-genode = "${qemuPkg}/bin/qemu-system-x86_64 -machine q35";
}.${pkgs.stdenv.hostPlatform.system} or "${qemuPkg}/bin/qemu-kvm";
}

967
tests/lib/test-driver.py Normal file
View File

@ -0,0 +1,967 @@
#! /somewhere/python3
# Copyright (c) 2003-2020 Nixpkgs/NixOS contributors
from contextlib import contextmanager, _GeneratorContextManager
from queue import Queue, Empty
from typing import Tuple, Any, Callable, Dict, Iterator, Optional, List
from xml.sax.saxutils import XMLGenerator
import queue
import io
import _thread
import argparse
import atexit
import base64
import codecs
import os
import pathlib
import ptpython.repl
import pty
import re
import shlex
import shutil
import socket
import subprocess
import sys
import tempfile
import time
import traceback
import unicodedata
CHAR_TO_KEY = {
"A": "shift-a",
"N": "shift-n",
"-": "0x0C",
"_": "shift-0x0C",
"B": "shift-b",
"O": "shift-o",
"=": "0x0D",
"+": "shift-0x0D",
"C": "shift-c",
"P": "shift-p",
"[": "0x1A",
"{": "shift-0x1A",
"D": "shift-d",
"Q": "shift-q",
"]": "0x1B",
"}": "shift-0x1B",
"E": "shift-e",
"R": "shift-r",
";": "0x27",
":": "shift-0x27",
"F": "shift-f",
"S": "shift-s",
"'": "0x28",
'"': "shift-0x28",
"G": "shift-g",
"T": "shift-t",
"`": "0x29",
"~": "shift-0x29",
"H": "shift-h",
"U": "shift-u",
"\\": "0x2B",
"|": "shift-0x2B",
"I": "shift-i",
"V": "shift-v",
",": "0x33",
"<": "shift-0x33",
"J": "shift-j",
"W": "shift-w",
".": "0x34",
">": "shift-0x34",
"K": "shift-k",
"X": "shift-x",
"/": "0x35",
"?": "shift-0x35",
"L": "shift-l",
"Y": "shift-y",
" ": "spc",
"M": "shift-m",
"Z": "shift-z",
"\n": "ret",
"!": "shift-0x02",
"@": "shift-0x03",
"#": "shift-0x04",
"$": "shift-0x05",
"%": "shift-0x06",
"^": "shift-0x07",
"&": "shift-0x08",
"*": "shift-0x09",
"(": "shift-0x0A",
")": "shift-0x0B",
}
# Forward references
log: "Logger"
machines: "List[Machine]"
def eprint(*args: object, **kwargs: Any) -> None:
print(*args, file=sys.stderr, **kwargs)
def make_command(args: list) -> str:
return " ".join(map(shlex.quote, (map(str, args))))
def create_vlan(vlan_nr: str) -> Tuple[str, str, "subprocess.Popen[bytes]", Any]:
global log
log.log("starting VDE switch for network {}".format(vlan_nr))
vde_socket = tempfile.mkdtemp(
prefix="nixos-test-vde-", suffix="-vde{}.ctl".format(vlan_nr)
)
pty_master, pty_slave = pty.openpty()
vde_process = subprocess.Popen(
["vde_switch", "-s", vde_socket, "--dirmode", "0700"],
bufsize=1,
stdin=pty_slave,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
)
fd = os.fdopen(pty_master, "w")
fd.write("version\n")
# TODO: perl version checks if this can be read from
# an if not, dies. we could hang here forever. Fix it.
assert vde_process.stdout is not None
vde_process.stdout.readline()
if not os.path.exists(os.path.join(vde_socket, "ctl")):
raise Exception("cannot start vde_switch")
return (vlan_nr, vde_socket, vde_process, fd)
def retry(fn: Callable) -> None:
"""Call the given function repeatedly, with 1 second intervals,
until it returns True or a timeout is reached.
"""
for _ in range(900):
if fn(False):
return
time.sleep(1)
if not fn(True):
raise Exception("action timed out")
class Logger:
def __init__(self) -> None:
self.logfile = os.environ.get("LOGFILE", "/dev/null")
self.logfile_handle = codecs.open(self.logfile, "wb")
self.xml = XMLGenerator(self.logfile_handle, encoding="utf-8")
self.queue: "Queue[Dict[str, str]]" = Queue()
self.xml.startDocument()
self.xml.startElement("logfile", attrs={})
def close(self) -> None:
self.xml.endElement("logfile")
self.xml.endDocument()
self.logfile_handle.close()
def sanitise(self, message: str) -> str:
return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C")
def maybe_prefix(self, message: str, attributes: Dict[str, str]) -> str:
if "machine" in attributes:
return "{}: {}".format(attributes["machine"], message)
return message
def log_line(self, message: str, attributes: Dict[str, str]) -> None:
self.xml.startElement("line", attributes)
self.xml.characters(message)
self.xml.endElement("line")
def log(self, message: str, attributes: Dict[str, str] = {}) -> None:
eprint(self.maybe_prefix(message, attributes))
self.drain_log_queue()
self.log_line(message, attributes)
def enqueue(self, message: Dict[str, str]) -> None:
self.queue.put(message)
def drain_log_queue(self) -> None:
try:
while True:
item = self.queue.get_nowait()
attributes = {"machine": item["machine"], "type": "serial"}
self.log_line(self.sanitise(item["msg"]), attributes)
except Empty:
pass
@contextmanager
def nested(self, message: str, attributes: Dict[str, str] = {}) -> Iterator[None]:
eprint(self.maybe_prefix(message, attributes))
self.xml.startElement("nest", attrs={})
self.xml.startElement("head", attributes)
self.xml.characters(message)
self.xml.endElement("head")
tic = time.time()
self.drain_log_queue()
yield
self.drain_log_queue()
toc = time.time()
self.log("({:.2f} seconds)".format(toc - tic))
self.xml.endElement("nest")
class Machine:
def __init__(self, args: Dict[str, Any]) -> None:
if "name" in args:
self.name = args["name"]
else:
self.name = "machine"
cmd = args.get("startCommand", None)
if cmd:
match = re.search("bin/run-(.+)-vm$", cmd)
if match:
self.name = match.group(1)
self.script = args.get("startCommand", self.create_startcommand(args))
tmp_dir = os.environ.get("TMPDIR", tempfile.gettempdir())
def create_dir(name: str) -> str:
path = os.path.join(tmp_dir, name)
os.makedirs(path, mode=0o700, exist_ok=True)
return path
self.state_dir = create_dir("vm-state-{}".format(self.name))
self.shared_dir = create_dir("shared-xchg")
self.booted = False
self.connected = False
self.pid: Optional[int] = None
self.socket = None
self.monitor: Optional[socket.socket] = None
self.logger: Logger = args["log"]
self.serialQueue: "Queue[str]" = Queue()
self.allow_reboot = args.get("allowReboot", False)
@staticmethod
def create_startcommand(args: Dict[str, str]) -> str:
net_backend = "-netdev user,id=net0"
net_frontend = "-device virtio-net-pci,netdev=net0"
if "netBackendArgs" in args:
net_backend += "," + args["netBackendArgs"]
if "netFrontendArgs" in args:
net_frontend += "," + args["netFrontendArgs"]
start_command = (
"qemu-kvm -m 384 " + net_backend + " " + net_frontend + " $QEMU_OPTS "
)
if "hda" in args:
hda_path = os.path.abspath(args["hda"])
if args.get("hdaInterface", "") == "scsi":
start_command += (
"-drive id=hda,file="
+ hda_path
+ ",werror=report,if=none "
+ "-device scsi-hd,drive=hda "
)
else:
start_command += (
"-drive file="
+ hda_path
+ ",if="
+ args["hdaInterface"]
+ ",werror=report "
)
if "cdrom" in args:
start_command += "-cdrom " + args["cdrom"] + " "
if "usb" in args:
start_command += (
"-device piix3-usb-uhci -drive "
+ "id=usbdisk,file="
+ args["usb"]
+ ",if=none,readonly "
+ "-device usb-storage,drive=usbdisk "
)
if "bios" in args:
start_command += "-bios " + args["bios"] + " "
start_command += args.get("qemuFlags", "")
return start_command
def is_up(self) -> bool:
return self.booted and self.connected
def log(self, msg: str) -> None:
self.logger.log(msg, {"machine": self.name})
def nested(self, msg: str, attrs: Dict[str, str] = {}) -> _GeneratorContextManager:
my_attrs = {"machine": self.name}
my_attrs.update(attrs)
return self.logger.nested(msg, my_attrs)
def wait_for_monitor_prompt(self) -> str:
assert self.monitor is not None
answer = ""
while True:
undecoded_answer = self.monitor.recv(1024)
if not undecoded_answer:
break
answer += undecoded_answer.decode()
if answer.endswith("(qemu) "):
break
return answer
def send_monitor_command(self, command: str) -> str:
message = ("{}\n".format(command)).encode()
self.log("sending monitor command: {}".format(command))
assert self.monitor is not None
self.monitor.send(message)
return self.wait_for_monitor_prompt()
def wait_for_unit(self, unit: str, user: Optional[str] = None) -> None:
"""Wait for a systemd unit to get into "active" state.
Throws exceptions on "failed" and "inactive" states as well as
after timing out.
"""
def check_active(_: Any) -> bool:
info = self.get_unit_info(unit, user)
state = info["ActiveState"]
if state == "failed":
raise Exception('unit "{}" reached state "{}"'.format(unit, state))
if state == "inactive":
status, jobs = self.systemctl("list-jobs --full 2>&1", user)
if "No jobs" in jobs:
info = self.get_unit_info(unit, user)
if info["ActiveState"] == state:
raise Exception(
(
'unit "{}" is inactive and there ' "are no pending jobs"
).format(unit)
)
return state == "active"
retry(check_active)
def get_unit_info(self, unit: str, user: Optional[str] = None) -> Dict[str, str]:
status, lines = self.systemctl('--no-pager show "{}"'.format(unit), user)
if status != 0:
raise Exception(
'retrieving systemctl info for unit "{}" {} failed with exit code {}'.format(
unit, "" if user is None else 'under user "{}"'.format(user), status
)
)
line_pattern = re.compile(r"^([^=]+)=(.*)$")
def tuple_from_line(line: str) -> Tuple[str, str]:
match = line_pattern.match(line)
assert match is not None
return match[1], match[2]
return dict(
tuple_from_line(line)
for line in lines.split("\n")
if line_pattern.match(line)
)
def systemctl(self, q: str, user: Optional[str] = None) -> Tuple[int, str]:
if user is not None:
q = q.replace("'", "\\'")
return self.execute(
(
"su -l {} --shell /bin/sh -c "
"$'XDG_RUNTIME_DIR=/run/user/`id -u` "
"systemctl --user {}'"
).format(user, q)
)
return self.execute("systemctl {}".format(q))
def require_unit_state(self, unit: str, require_state: str = "active") -> None:
with self.nested(
"checking if unit {} has reached state '{}'".format(unit, require_state)
):
info = self.get_unit_info(unit)
state = info["ActiveState"]
if state != require_state:
raise Exception(
"Expected unit {} to to be in state ".format(unit)
+ "'{}' but it is in state {}".format(require_state, state)
)
def execute(self, command: str) -> Tuple[int, str]:
self.connect()
out_command = "( {} ); echo '|!=EOF' $?\n".format(command)
self.shell.send(out_command.encode())
output = ""
status_code_pattern = re.compile(r"(.*)\|\!=EOF\s+(\d+)")
while True:
chunk = self.shell.recv(4096).decode(errors="ignore")
match = status_code_pattern.match(chunk)
if match:
output += match[1]
status_code = int(match[2])
return (status_code, output)
output += chunk
def succeed(self, *commands: str) -> str:
"""Execute each command and check that it succeeds."""
output = ""
for command in commands:
with self.nested("must succeed: {}".format(command)):
(status, out) = self.execute(command)
if status != 0:
self.log("output: {}".format(out))
raise Exception(
"command `{}` failed (exit code {})".format(command, status)
)
output += out
return output
def fail(self, *commands: str) -> None:
"""Execute each command and check that it fails."""
for command in commands:
with self.nested("must fail: {}".format(command)):
status, output = self.execute(command)
if status == 0:
raise Exception(
"command `{}` unexpectedly succeeded".format(command)
)
def wait_until_succeeds(self, command: str) -> str:
"""Wait until a command returns success and return its output.
Throws an exception on timeout.
"""
output = ""
def check_success(_: Any) -> bool:
nonlocal output
status, output = self.execute(command)
return status == 0
with self.nested("waiting for success: {}".format(command)):
retry(check_success)
return output
def wait_until_fails(self, command: str) -> str:
"""Wait until a command returns failure.
Throws an exception on timeout.
"""
output = ""
def check_failure(_: Any) -> bool:
nonlocal output
status, output = self.execute(command)
return status != 0
with self.nested("waiting for failure: {}".format(command)):
retry(check_failure)
return output
def wait_for_shutdown(self) -> None:
if not self.booted:
return
with self.nested("waiting for the VM to power off"):
sys.stdout.flush()
self.process.wait()
self.pid = None
self.booted = False
self.connected = False
def get_tty_text(self, tty: str) -> str:
status, output = self.execute(
"fold -w$(stty -F /dev/tty{0} size | "
"awk '{{print $2}}') /dev/vcs{0}".format(tty)
)
return output
def wait_until_tty_matches(self, tty: str, regexp: str) -> None:
"""Wait until the visible output on the chosen TTY matches regular
expression. Throws an exception on timeout.
"""
matcher = re.compile(regexp)
def tty_matches(last: bool) -> bool:
text = self.get_tty_text(tty)
if last:
self.log(
f"Last chance to match /{regexp}/ on TTY{tty}, "
f"which currently contains: {text}"
)
return len(matcher.findall(text)) > 0
with self.nested("waiting for {} to appear on tty {}".format(regexp, tty)):
retry(tty_matches)
def wait_until_serial_output(self, regexp: str) -> None:
"""Wait until the serial output matches regular expression.
Throws an exception on timeout.
"""
matcher = re.compile(regexp)
def serial_matches(last: bool) -> bool:
while not self.serialQueue.empty():
text = self.serialQueue.get()
if last:
self.log(
f"Last chance to match /{regexp}/ on serial, "
f"which currently contains: {text}"
)
if len(matcher.findall(text)) > 0:
return True
return False
with self.nested("waiting for {} to appear on serial output".format(regexp)):
retry(serial_matches)
def send_chars(self, chars: List[str]) -> None:
with self.nested("sending keys {}".format(chars)):
for char in chars:
self.send_key(char)
def wait_for_file(self, filename: str) -> None:
"""Waits until the file exists in machine's file system."""
def check_file(_: Any) -> bool:
status, _ = self.execute("test -e {}".format(filename))
return status == 0
with self.nested("waiting for file {}".format(filename)):
retry(check_file)
def wait_for_open_port(self, port: int) -> None:
def port_is_open(_: Any) -> bool:
status, _ = self.execute("nc -z localhost {}".format(port))
return status == 0
with self.nested("waiting for TCP port {}".format(port)):
retry(port_is_open)
def wait_for_closed_port(self, port: int) -> None:
def port_is_closed(_: Any) -> bool:
status, _ = self.execute("nc -z localhost {}".format(port))
return status != 0
retry(port_is_closed)
def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
return self.systemctl("start {}".format(jobname), user)
def stop_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
return self.systemctl("stop {}".format(jobname), user)
def wait_for_job(self, jobname: str) -> None:
self.wait_for_unit(jobname)
def connect(self) -> None:
if self.connected:
return
with self.nested("waiting for the VM to finish booting"):
self.start()
tic = time.time()
self.shell.recv(1024)
# TODO: Timeout
toc = time.time()
self.log("connected to guest root shell")
self.log("(connecting took {:.2f} seconds)".format(toc - tic))
self.connected = True
def screenshot(self, filename: str) -> None:
out_dir = os.environ.get("out", os.getcwd())
word_pattern = re.compile(r"^\w+$")
if word_pattern.match(filename):
filename = os.path.join(out_dir, "{}.png".format(filename))
tmp = "{}.ppm".format(filename)
with self.nested(
"making screenshot {}".format(filename),
{"image": os.path.basename(filename)},
):
self.send_monitor_command("screendump {}".format(tmp))
ret = subprocess.run("pnmtopng {} > {}".format(tmp, filename), shell=True)
os.unlink(tmp)
if ret.returncode != 0:
raise Exception("Cannot convert screenshot")
def copy_from_host_via_shell(self, source: str, target: str) -> None:
"""Copy a file from the host into the guest by piping it over the
shell into the destination file. Works without host-guest shared folder.
Prefer copy_from_host for whenever possible.
"""
with open(source, "rb") as fh:
content_b64 = base64.b64encode(fh.read()).decode()
self.succeed(
f"mkdir -p $(dirname {target})",
f"echo -n {content_b64} | base64 -d > {target}",
)
def copy_from_host(self, source: str, target: str) -> None:
"""Copy a file from the host into the guest via the `shared_dir` shared
among all the VMs (using a temporary directory).
"""
host_src = pathlib.Path(source)
vm_target = pathlib.Path(target)
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
shared_temp = pathlib.Path(shared_td)
host_intermediate = shared_temp / host_src.name
vm_shared_temp = pathlib.Path("/tmp/shared") / shared_temp.name
vm_intermediate = vm_shared_temp / host_src.name
self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
if host_src.is_dir():
shutil.copytree(host_src, host_intermediate)
else:
shutil.copy(host_src, host_intermediate)
self.succeed("sync")
self.succeed(make_command(["mkdir", "-p", vm_target.parent]))
self.succeed(make_command(["cp", "-r", vm_intermediate, vm_target]))
# Make sure the cleanup is synced into VM
self.succeed("sync")
def copy_from_vm(self, source: str, target_dir: str = "") -> None:
"""Copy a file from the VM (specified by an in-VM source path) to a path
relative to `$out`. The file is copied via the `shared_dir` shared among
all the VMs (using a temporary directory).
"""
# Compute the source, target, and intermediate shared file names
out_dir = pathlib.Path(os.environ.get("out", os.getcwd()))
vm_src = pathlib.Path(source)
with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
shared_temp = pathlib.Path(shared_td)
vm_shared_temp = pathlib.Path("/tmp/shared") / shared_temp.name
vm_intermediate = vm_shared_temp / vm_src.name
intermediate = shared_temp / vm_src.name
# Copy the file to the shared directory inside VM
self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
self.succeed(make_command(["cp", "-r", vm_src, vm_intermediate]))
self.succeed("sync")
abs_target = out_dir / target_dir / vm_src.name
abs_target.parent.mkdir(exist_ok=True, parents=True)
# Copy the file from the shared directory outside VM
if intermediate.is_dir():
shutil.copytree(intermediate, abs_target)
else:
shutil.copy(intermediate, abs_target)
# Make sure the cleanup is synced into VM
self.succeed("sync")
def dump_tty_contents(self, tty: str) -> None:
"""Debugging: Dump the contents of the TTY<n>
"""
self.execute("fold -w 80 /dev/vcs{} | systemd-cat".format(tty))
def get_screen_text(self) -> str:
if shutil.which("tesseract") is None:
raise Exception("get_screen_text used but enableOCR is false")
magick_args = (
"-filter Catrom -density 72 -resample 300 "
+ "-contrast -normalize -despeckle -type grayscale "
+ "-sharpen 1 -posterize 3 -negate -gamma 100 "
+ "-blur 1x65535"
)
tess_args = "-c debug_file=/dev/null --psm 11 --oem 2"
with self.nested("performing optical character recognition"):
with tempfile.NamedTemporaryFile() as tmpin:
self.send_monitor_command("screendump {}".format(tmpin.name))
cmd = "convert {} {} tiff:- | tesseract - - {}".format(
magick_args, tmpin.name, tess_args
)
ret = subprocess.run(cmd, shell=True, capture_output=True)
if ret.returncode != 0:
raise Exception(
"OCR failed with exit code {}".format(ret.returncode)
)
return ret.stdout.decode("utf-8")
def wait_for_text(self, regex: str) -> None:
def screen_matches(last: bool) -> bool:
text = self.get_screen_text()
matches = re.search(regex, text) is not None
if last and not matches:
self.log("Last OCR attempt failed. Text was: {}".format(text))
return matches
with self.nested("waiting for {} to appear on screen".format(regex)):
retry(screen_matches)
def send_key(self, key: str) -> None:
key = CHAR_TO_KEY.get(key, key)
self.send_monitor_command("sendkey {}".format(key))
def start(self) -> None:
if self.booted:
return
self.log("starting vm")
def create_socket(path: str) -> socket.socket:
if os.path.exists(path):
os.unlink(path)
s = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
s.bind(path)
s.listen(1)
return s
monitor_path = os.path.join(self.state_dir, "monitor")
self.monitor_socket = create_socket(monitor_path)
shell_path = os.path.join(self.state_dir, "shell")
self.shell_socket = create_socket(shell_path)
qemu_options = (
" ".join(
[
"" if self.allow_reboot else "-no-reboot",
"-monitor unix:{}".format(monitor_path),
"-chardev socket,id=shell,path={}".format(shell_path),
"-device virtio-serial",
"-device virtconsole,chardev=shell",
"-device virtio-rng-pci",
"-serial stdio" if "DISPLAY" in os.environ else "-nographic",
]
)
+ " "
+ os.environ.get("QEMU_OPTS", "")
)
environment = dict(os.environ)
environment.update(
{
"TMPDIR": self.state_dir,
"SHARED_DIR": self.shared_dir,
"USE_TMPDIR": "1",
"QEMU_OPTS": qemu_options,
}
)
self.process = subprocess.Popen(
self.script,
bufsize=1,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
cwd=self.state_dir,
env=environment,
)
self.monitor, _ = self.monitor_socket.accept()
self.shell, _ = self.shell_socket.accept()
def process_serial_output() -> None:
assert self.process.stdout is not None
for _line in self.process.stdout:
# Ignore undecodable bytes that may occur in boot menus
line = _line.decode(errors="ignore").replace("\r", "").rstrip()
eprint("{} # {}".format(self.name, line))
self.logger.enqueue({"msg": line, "machine": self.name})
self.serialQueue.put(line)
_thread.start_new_thread(process_serial_output, ())
self.wait_for_monitor_prompt()
self.pid = self.process.pid
self.booted = True
self.log("QEMU running (pid {})".format(self.pid))
def shutdown(self) -> None:
if not self.booted:
return
self.shell.send("poweroff\n".encode())
self.wait_for_shutdown()
def crash(self) -> None:
if not self.booted:
return
self.log("forced crash")
self.send_monitor_command("quit")
self.wait_for_shutdown()
def wait_for_x(self) -> None:
"""Wait until it is possible to connect to the X server. Note that
testing the existence of /tmp/.X11-unix/X0 is insufficient.
"""
def check_x(_: Any) -> bool:
cmd = (
"journalctl -b SYSLOG_IDENTIFIER=systemd | "
+ 'grep "Reached target Current graphical"'
)
status, _ = self.execute(cmd)
if status != 0:
return False
status, _ = self.execute("[ -e /tmp/.X11-unix/X0 ]")
return status == 0
with self.nested("waiting for the X11 server"):
retry(check_x)
def get_window_names(self) -> List[str]:
return self.succeed(
r"xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'"
).splitlines()
def wait_for_window(self, regexp: str) -> None:
pattern = re.compile(regexp)
def window_is_visible(last_try: bool) -> bool:
names = self.get_window_names()
if last_try:
self.log(
"Last chance to match {} on the window list,".format(regexp)
+ " which currently contains: "
+ ", ".join(names)
)
return any(pattern.search(name) for name in names)
with self.nested("Waiting for a window to appear"):
retry(window_is_visible)
def sleep(self, secs: int) -> None:
time.sleep(secs)
def forward_port(self, host_port: int = 8080, guest_port: int = 80) -> None:
"""Forward a TCP port on the host to a TCP port on the guest.
Useful during interactive testing.
"""
self.send_monitor_command(
"hostfwd_add tcp::{}-:{}".format(host_port, guest_port)
)
def block(self) -> None:
"""Make the machine unreachable by shutting down eth1 (the multicast
interface used to talk to the other VMs). We keep eth0 up so that
the test driver can continue to talk to the machine.
"""
self.send_monitor_command("set_link virtio-net-pci.1 off")
def unblock(self) -> None:
"""Make the machine reachable.
"""
self.send_monitor_command("set_link virtio-net-pci.1 on")
def create_machine(args: Dict[str, Any]) -> Machine:
global log
args["log"] = log
args["redirectSerial"] = os.environ.get("USE_SERIAL", "0") == "1"
return Machine(args)
def start_all() -> None:
global machines
with log.nested("starting all VMs"):
for machine in machines:
machine.start()
def join_all() -> None:
global machines
with log.nested("waiting for all VMs to finish"):
for machine in machines:
machine.wait_for_shutdown()
def test_script() -> None:
exec(os.environ["testScript"])
def run_tests() -> None:
global machines
tests = os.environ.get("tests", None)
if tests is not None:
with log.nested("running the VM test script"):
try:
exec(tests, globals())
except Exception as e:
eprint("error: {}".format(str(e)))
sys.exit(1)
else:
ptpython.repl.embed(locals(), globals())
# TODO: Collect coverage data
for machine in machines:
if machine.is_up():
machine.execute("sync")
@contextmanager
def subtest(name: str) -> Iterator[None]:
with log.nested(name):
try:
yield
return True
except Exception as e:
log.log(f'Test "{name}" failed with error: "{e}"')
raise e
return False
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"-K",
"--keep-vm-state",
help="re-use a VM state coming from a previous run",
action="store_true",
)
(cli_args, vm_scripts) = arg_parser.parse_known_args()
log = Logger()
vlan_nrs = list(dict.fromkeys(os.environ.get("VLANS", "").split()))
vde_sockets = [create_vlan(v) for v in vlan_nrs]
for nr, vde_socket, _, _ in vde_sockets:
os.environ["QEMU_VDE_SOCKET_{}".format(nr)] = vde_socket
machines = [
create_machine({"startCommand": s, "keepVmState": cli_args.keep_vm_state})
for s in vm_scripts
]
machine_eval = [
"{0} = machines[{1}]".format(m.name, idx) for idx, m in enumerate(machines)
]
exec("\n".join(machine_eval))
@atexit.register
def clean_up() -> None:
with log.nested("cleaning up"):
for machine in machines:
if machine.pid is None:
continue
log.log("killing {} (pid {})".format(machine.name, machine.pid))
machine.process.kill()
log.close()
tic = time.time()
run_tests()
toc = time.time()
print("test script finished in {:.2f}s".format(toc - tic))

View File

@ -0,0 +1,235 @@
{ flake, system, localSystem, crossSystem, pkgs
# Modules to add to each VM
, extraConfigurations ? [ ] }:
with import ./build-vms.nix {
inherit system localSystem crossSystem pkgs extraConfigurations;
modulesPath = "${flake.inputs.nixpkgs}/nixos/modules";
};
with pkgs.buildPackages.buildPackages;
rec {
inherit pkgs;
testDriver = let testDriverScript = ./test-driver.py;
in stdenv.mkDerivation {
name = "nixos-test-driver";
nativeBuildInputs = [ makeWrapper ];
buildInputs = [ (python3.withPackages (p: [ p.ptpython ])) ];
checkInputs = with python3Packages; [ pylint mypy ];
dontUnpack = true;
preferLocalBuild = true;
doCheck = true;
checkPhase = ''
mypy --disallow-untyped-defs \
--no-implicit-optional \
--ignore-missing-imports ${testDriverScript}
pylint --errors-only ${testDriverScript}
'';
installPhase = ''
mkdir -p $out/bin
cp ${testDriverScript} $out/bin/nixos-test-driver
chmod u+x $out/bin/nixos-test-driver
# TODO: copy user script part into this file (append)
wrapProgram $out/bin/nixos-test-driver \
--prefix PATH : "${
lib.makeBinPath [ qemu_test vde2 netpbm coreutils ]
}" \
'';
};
# Run an automated test suite in the given virtual network.
# `driver' is the script that runs the network.
runTests = driver:
stdenv.mkDerivation {
name = "vm-test-run-${driver.testName}";
requiredSystemFeatures = [ "nixos-test" ];
buildCommand = ''
mkdir -p $out
LOGFILE=/dev/null tests='exec(os.environ["testScript"])' ${driver}/bin/nixos-test-driver
'';
};
defaultTestScript = ''
start_all()
machine.wait_until_serial_output('child "init" exited with exit value 0')
'';
makeTest = { testScript ? defaultTestScript, enableOCR ? false, name ?
"unnamed"
# Skip linting (mainly intended for faster dev cycles)
, skipLint ? false, ... }@t:
let
testDriverName = "genode-test-driver-${name}";
nodes = buildVirtualNetwork
(t.nodes or (if t ? machine then { machine = t.machine; } else { }));
testScript' =
# Call the test script with the computed nodes.
if lib.isFunction testScript then
testScript { inherit nodes; }
else
testScript;
vlans = map (m: m.config.virtualisation.vlans) (lib.attrValues nodes);
vms = map (m: m.config.system.build.vm) (lib.attrValues nodes);
ocrProg = tesseract4.override { enableLanguages = [ "eng" ]; };
imagemagick_tiff = imagemagick_light.override { inherit libtiff; };
# Generate convenience wrappers for running the test driver
# interactively with the specified network, and for starting the
# VMs from the command line.
driver =
let warn = if skipLint then lib.warn "Linting is disabled!" else lib.id;
in warn (runCommand testDriverName {
buildInputs = [ makeWrapper ];
testScript = testScript';
preferLocalBuild = true;
testName = name;
} ''
mkdir -p $out/bin
echo -n "$testScript" > $out/test-script
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/
vms=($(for i in ${toString vms}; do echo $i/bin/run-*-vm; done))
wrapProgram $out/bin/nixos-test-driver \
--add-flags "''${vms[*]}" \
${
lib.optionalString enableOCR
"--prefix PATH : '${ocrProg}/bin:${imagemagick_tiff}/bin'"
} \
--run "export testScript=\"\$(${coreutils}/bin/cat $out/test-script)\"" \
--set VLANS '${toString vlans}'
ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-run-vms
wrapProgram $out/bin/nixos-run-vms \
--add-flags "''${vms[*]}" \
${lib.optionalString enableOCR "--prefix PATH : '${ocrProg}/bin'"} \
--set tests 'start_all(); join_all();' \
--set VLANS '${toString vlans}' \
${
lib.optionalString (builtins.length vms == 1) "--set USE_SERIAL 1"
}
''); # "
passMeta = drv:
drv
// lib.optionalAttrs (t ? meta) { meta = (drv.meta or { }) // t.meta; };
test = passMeta (runTests driver);
nodeNames = builtins.attrNames nodes;
invalidNodeNames =
lib.filter (node: builtins.match "^[A-z_]([A-z0-9_]+)?$" node == null)
nodeNames;
in if lib.length invalidNodeNames > 0 then
throw ''
Cannot create machines out of (${
lib.concatStringsSep ", " invalidNodeNames
})!
All machines are referenced as python variables in the testing framework which will break the
script when special characters are used.
Please stick to alphanumeric chars and underscores as separation.
''
else
test // { inherit nodes driver test; };
runInMachine = { drv, machine, preBuild ? "", postBuild ? "", ... # ???
}:
let
vm = buildVM { } [
machine
{
key = "run-in-machine";
networking.hostName = "client";
nix.readOnlyStore = false;
virtualisation.writableStore = false;
}
];
buildrunner = writeText "vm-build" ''
source $1
${coreutils}/bin/mkdir -p $TMPDIR
cd $TMPDIR
exec $origBuilder $origArgs
'';
testScript = ''
start_all()
client.wait_for_unit("multi-user.target")
${preBuild}
client.succeed("env -i ${bash}/bin/bash ${buildrunner} /tmp/xchg/saved-env >&2")
${postBuild}
client.succeed("sync") # flush all data before pulling the plug
'';
vmRunCommand = writeText "vm-run" ''
xchg=vm-state-client/xchg
${coreutils}/bin/mkdir $out
${coreutils}/bin/mkdir -p $xchg
for i in $passAsFile; do
i2=''${i}Path
_basename=$(${coreutils}/bin/basename ''${!i2})
${coreutils}/bin/cp ''${!i2} $xchg/$_basename
eval $i2=/tmp/xchg/$_basename
${coreutils}/bin/ls -la $xchg
done
unset i i2 _basename
export | ${gnugrep}/bin/grep -v '^xchg=' > $xchg/saved-env
unset xchg
export tests='${testScript}'
${testDriver}/bin/nixos-test-driver ${vm.config.system.build.vm}/bin/run-*-vm
''; # */
in lib.overrideDerivation drv (attrs: {
requiredSystemFeatures = [ "kvm" ];
builder = "${bash}/bin/sh";
args = [ "-e" vmRunCommand ];
origArgs = attrs.args;
origBuilder = attrs.builder;
});
runInMachineWithX = { require ? [ ], ... }@args:
let
client = { ... }: {
inherit require;
imports = [ ../tests/common/auto.nix ];
virtualisation.memorySize = 1024;
services.xserver.enable = true;
test-support.displayManager.auto.enable = true;
services.xserver.displayManager.defaultSession = "none+icewm";
services.xserver.windowManager.icewm.enable = true;
};
in runInMachine ({
machine = client;
preBuild = ''
client.wait_for_x()
'';
} // args);
simpleTest = as: (makeTest as).test;
}

View File

@ -1,20 +1,15 @@
let Test = ./test.dhall ? env:DHALL_GENODE_TEST let Genode =
env:DHALL_GENODE
let Genode = Test.Genode ? https://git.sr.ht/~ehmry/dhall-genode/blob/master/package.dhall
let Child = Genode.Init.Child let Child = Genode.Init.Child
in Test::{ in Child.flat
, children = toMap Child.Attributes::{
{ test-log = , binary = "test-log"
Child.flat , exitPropagate = True
Child.Attributes::{ , resources = Genode.Init.Resources::{
, binary = "test-log" , caps = 500
, exitPropagate = True , ram = Genode.units.MiB 10
, resources = Genode.Init.Resources::{
, caps = 500
, ram = Genode.units.MiB 10
}
}
} }
} }

View File

@ -1,11 +1,10 @@
{ pkgs, ... }:
with pkgs;
{ {
name = "log"; name = "log";
machine = { machine = { pkgs, ... }: {
config = ./log.dhall; genode.init.children.log = {
inputs = [ (pkgs.genodeSources.depot "test-log") ]; configFile = ./log.dhall;
inputs = [ pkgs.genodePackages.test-log ];
};
}; };
testScript = '' testScript = ''
start_all() start_all()

24
tests/solo5/multi.nix Normal file
View File

@ -0,0 +1,24 @@
{
name = "solo5-multi";
machine = { pkgs, ... }: {
genode.init.children.tests = {
configFile = "${./.}/solo5.dhall { isAarch64 = ${
if pkgs.stdenv.hostPlatform.isAarch64 then "True" else "False"
} }";
inputs = with pkgs.genodePackages; [
solo5
solo5.tests
nic_bridge
nic_loopback
sequence
vfs_block
vfs_import
(genodeSources.make "app/ping")
];
};
};
testScript = ''
start_all()
machine.wait_until_serial_output("child \"tests\" exited with exit value 0")
'';
}

View File

@ -1,6 +1,4 @@
let Test = ../test.dhall ? env:DHALL_GENODE_TEST let Genode = env:DHALL_GENODE
let Genode = Test.Genode
let Prelude = Genode.Prelude let Prelude = Genode.Prelude
@ -28,9 +26,8 @@ let toSimple =
} }
in λ(params : { isAarch64 : Bool }) → in λ(params : { isAarch64 : Bool }) →
let tests let children =
: Prelude.Map.Type Text Child.Type toMap
= toMap
{ quiet = toSimple "quiet" { quiet = toSimple "quiet"
, hello = toSimple "hello" , hello = toSimple "hello"
, globals = toSimple "globals" , globals = toSimple "globals"
@ -44,4 +41,12 @@ in λ(params : { isAarch64 : Bool }) →
else toMap { fpu = toSimple "fpu" } else toMap { fpu = toSimple "fpu" }
) )
in Test::{ children = tests } in Init.Child.nested
children
Init.Child.Attributes::{
, binary = "sequence"
, routes =
[ Init.ServiceRoute.parent "Timer"
, Init.ServiceRoute.parent "Rtc"
]
}

View File

@ -1,20 +1,13 @@
let Test = ./test.dhall ? env:DHALL_GENODE_TEST let Genode = env:DHALL_GENODE
let Genode = Test.Genode
let Init = Genode.Init let Init = Genode.Init
let Child = Init.Child let Child = Init.Child
let vmm = in Child.flat
Child.flat Child.Attributes::{
Child.Attributes::{ , binary = "test-vmm_x86"
, binary = "test-vmm_x86" , exitPropagate = True
, resources = Init.Resources::{ , resources = Init.Resources::{ caps = 2048, ram = Genode.units.MiB 256 }
, caps = 2048 , routes = [ Genode.Init.ServiceRoute.parent "VM" ]
, ram = Genode.units.MiB 256 }
}
, routes = [ Genode.Init.ServiceRoute.parent "VM" ]
}
in Test::{ children = toMap { vmm } }

View File

@ -1,12 +1,12 @@
{ pkgs, ... }:
{ {
name = "vmm_x86"; name = "vmm_x86";
constraints = specs: constraints = specs:
with builtins; with builtins;
all (f: any f specs) [ (spec: spec == "nova") (spec: spec == "x86") ]; all (f: any f specs) [ (spec: spec == "nova") (spec: spec == "x86") ];
machine = { machine = { pkgs, ... }: {
config = ./vmm_x86.dhall; genode.init.children.vmm = {
inputs = map pkgs.genodeSources.make [ "test/vmm_x86" ]; configFile = ./vmm_x86.dhall;
inputs = map pkgs.genodeSources.make [ "test/vmm_x86" ];
};
}; };
} }

View File

@ -1,6 +1,4 @@
let Test = ./test.dhall ? env:DHALL_GENODE_TEST let Genode = env:DHALL_GENODE
let Genode = Test.Genode
let XML = Genode.Prelude.XML let XML = Genode.Prelude.XML
@ -14,27 +12,27 @@ let ServiceRoute = Init.ServiceRoute
let label = λ(_ : Text) → { local = _, route = _ } : Child.Attributes.Label let label = λ(_ : Text) → { local = _, route = _ } : Child.Attributes.Label
let signal =
Child.flat
Child.Attributes::{
, binary = "test-signal"
, exitPropagate = True
, priority = 5
, resources = Init.Resources::{ caps = 500, ram = Genode.units.MiB 10 }
}
let rtc = Child.flat Child.Attributes::{ binary = "test-rtc" }
let pciInit = let pciInit =
Init::{ Init::{
, verbose = True , verbose = True
, routes = [ ServiceRoute.parent "Timer" ] , routes = [ ServiceRoute.parent "Timer", ServiceRoute.parent "Rtc" ]
, children = toMap , children = toMap
{ test-pci = { signal =
Child.flat
Child.Attributes::{
, binary = "test-signal"
, exitPropagate = True
, priority = 5
, resources = Init.Resources::{
, caps = 500
, ram = Genode.units.MiB 10
}
}
, rtc = Child.flat Child.Attributes::{ binary = "test-rtc" }
, test-pci =
Child.flat Child.flat
Child.Attributes::{ Child.Attributes::{
, binary = "test-pci" , binary = "test-pci"
, exitPropagate = True
, resources = Resources::{ ram = Genode.units.MiB 3 } , resources = Resources::{ ram = Genode.units.MiB 3 }
} }
, acpi_drv = , acpi_drv =
@ -87,10 +85,4 @@ let pciInit =
} }
} }
in Test::{ in pciInit
, children =
[ { mapKey = "signal", mapValue = signal }
, { mapKey = "rtc", mapValue = rtc }
]
# Test.initToChildren pciInit
}

View File

@ -1,13 +1,16 @@
{ pkgs, ... }: { {
name = "x86"; name = "x86";
constraints = builtins.any (spec: spec == "x86"); constraints = builtins.any (spec: spec == "x86");
machine = { machine = { pkgs, ... }: {
config = ./x86.dhall; genode.init.subinits.test = {
inputs = (map pkgs.genodeSources.depot [ configFile = ./x86.dhall;
"acpi_drv" inputs = with pkgs.genodePackages;
"platform_drv" [ acpi_drv platform_drv report_rom test-signal ]
"report_rom" ++ (map genodeSources.make [ "test/pci" "test/rtc" ]);
"test-signal" };
]) ++ (map pkgs.genodeSources.make [ "test/pci" "test/rtc" ]);
}; };
testScript = ''
start_all()
machine.wait_until_serial_output("child \"test\" exited with exit value 0")
'';
} }