diff --git a/flake.nix b/flake.nix index 15b1a35..3e67111 100644 --- a/flake.nix +++ b/flake.nix @@ -65,9 +65,7 @@ forAllCrossSystems ({ system, localSystem, crossSystem }: nixpkgs.lib // (import ./lib { inherit system localSystem crossSystem; - localPackages = nixpkgs.legacyPackages.${localSystem}; - genodepkgs = self; - nixpkgs = nixpkgsFor.${system}; + pkgs = self.legacyPackages.${system}; })); legacyPackages = @@ -140,11 +138,11 @@ checks = # Checks for continous testing let tests = import ./tests; - in - with (forAllCrossSystems ({ system, localSystem, crossSystem }: + in with (forAllCrossSystems ({ system, localSystem, crossSystem }: tests { flake = self; inherit system localSystem crossSystem; + pkgs = self.legacyPackages.${system}; } // { ports = nixpkgsFor.${localSystem}.symlinkJoin { name = "ports"; diff --git a/lib/default.nix b/lib/default.nix index cc57fc5..02a02cf 100644 --- a/lib/default.nix +++ b/lib/default.nix @@ -1,9 +1,11 @@ -{ system, localSystem, crossSystem, genodepkgs, nixpkgs, localPackages }: +{ system, localSystem, crossSystem, pkgs }: let + nixpkgs = pkgs; + localPackages = pkgs.buildPackages.buildPackages; thisSystem = builtins.getAttr system; inherit (nixpkgs) buildPackages; - testPkgs = thisSystem genodepkgs.packages; + testPkgs = pkgs.genodePackages; dhallCachePrelude = '' export XDG_CACHE_HOME=$NIX_BUILD_TOP diff --git a/nixos-modules/base-hw-pc.nix b/nixos-modules/base-hw-pc.nix new file mode 100644 index 0000000..5b50939 --- /dev/null +++ b/nixos-modules/base-hw-pc.nix @@ -0,0 +1,27 @@ +{ config, pkgs, lib, ... }: + +with lib; +let + localPackages = pkgs.buildPackages; + utils = import ../lib { + inherit (config.nixpkgs) system localSystem crossSystem; + inherit pkgs; + }; +in { + genode.core = { + prefix = "hw-pc-"; + supportedSystems = [ "x86_64-genode" ]; + basePackages = with pkgs.genodePackages; [ base-hw-pc rtc_drv ]; + }; + + genode.boot = { + + initrd = "${config.genode.boot.image}/image.elf"; + + image = utils.hwImage "0xffffffc000000000" "0x00200000" + pkgs.genodePackages.base-hw-pc config.system.name { } + config.genode.boot.config; + + }; + +} diff --git a/nixos-modules/base-hw-virt_qemu.nix b/nixos-modules/base-hw-virt_qemu.nix new file mode 100644 index 0000000..8b2b878 --- /dev/null +++ b/nixos-modules/base-hw-virt_qemu.nix @@ -0,0 +1,27 @@ +{ config, pkgs, lib, ... }: + +with lib; +let + localPackages = pkgs.buildPackages; + utils = import ../lib { + inherit (config.nixpkgs) system localSystem crossSystem; + inherit pkgs; + }; +in { + genode.core = { + prefix = "hw-virt_qemu"; + supportedSystems = [ "aarch64-genode" ]; + basePackages = with pkgs.genodePackages; [ base-hw-virt_qemu rtc-dummy ]; + }; + + genode.boot = { + + initrd = "${config.genode.boot.image}/image.elf"; + + image = utils.hwImage "0xffffffc000000000" "0x40000000" + pkgs.genodePackages.base-hw-virt_qemu config.system.name { } + config.genode.boot.config; + + }; + +} diff --git a/nixos-modules/genode-core.nix b/nixos-modules/genode-core.nix new file mode 100644 index 0000000..736c466 --- /dev/null +++ b/nixos-modules/genode-core.nix @@ -0,0 +1,123 @@ +{ config, pkgs, lib, ... }: + +with lib; +let localPackages = pkgs.buildPackages; +in { + options.genode = { + core = { + + prefix = mkOption { + type = types.str; + example = "hw-pc-"; + }; + + supportedSystems = mkOption { + type = types.listOf types.str; + example = [ "i686-genode" "x86_64-genode" ]; + }; + + basePackages = mkOption { type = types.listOf types.package; }; + + }; + + boot = { + + kernel = mkOption { + type = types.path; + default = "${pkgs.genodePackages.bender}/bender"; + }; + + initrd = mkOption { + type = types.str; + default = "${pkgs.genodePackages.bender}/bender"; + description = "Path to an image or a command-line arguments"; + }; + + config = mkOption { + type = types.str; + description = '' + Dhall boot configuration. See + https://git.sr.ht/~ehmry/dhall-genode/tree/master/Boot/package.dhall + ''; + }; + + image = mkOption { + type = types.path; + description = + "Boot image containing the base component binaries and configuration."; + }; + + romModules = mkOption { + type = types.attrsOf types.path; + description = "Attr set of initial ROM modules"; + }; + + }; + + }; + + config = { + + assertions = [{ + assertion = builtins.any (s: s == config.nixpkgs.system) + config.genode.core.supportedSystems; + message = "invalid Genode core for this system"; + }]; + + genode.boot.config = let + + addManifest = drv: + drv // { + manifest = + localPackages.runCommand "${drv.name}.dhall" { inherit drv; } '' + set -eu + echo -n '[' >> $out + find $drv/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out + ${if builtins.elem "lib" drv.outputs then + '' + find ${drv.lib}/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out'' + else + ""} + echo -n ']' >> $out + ''; + }; + + mergeManifests = inputs: + localPackages.writeTextFile { + name = "manifest.dhall"; + text = with builtins; + let + f = head: input: + if hasAttr "manifest" input then + '' + ${head}, { mapKey = "${ + lib.getName input + }", mapValue = ${input.manifest} }'' + else + abort "${input.pname} does not have a manifest"; + in (foldl' f "[" inputs) + "]"; + }; + + storeManifest = + mergeManifests (map addManifest config.genode.init.inputs); + + storeTarball = localPackages.runCommand "store" { } '' + mkdir -p $out + tar cf "$out/store.tar" --absolute-names ${ + toString config.genode.init.inputs + } + ''; + + manifest = mergeManifests (map addManifest + (config.genode.core.basePackages ++ [ storeTarball ] + ++ (with pkgs.genodePackages; [ init vfs cached_fs_rom ]))); + in '' + ${./store-wrapper.dhall} + (${config.genode.init.config}) + $(stat --format '%s' ${storeTarball}/store.tar) + ${storeManifest} ${manifest} + ''; + + }; + +} diff --git a/nixos-modules/genode-init.nix b/nixos-modules/genode-init.nix index 971c6c8..84730ef 100644 --- a/nixos-modules/genode-init.nix +++ b/nixos-modules/genode-init.nix @@ -11,7 +11,8 @@ with lib; config = mkOption { description = "Dhall configuration of this init instance"; - type = types.either types.str types.path; + type = types.nullOr types.str; + default = null; }; inputs = mkOption { @@ -19,6 +20,15 @@ with lib; type = types.listOf types.package; }; + children = mkOption { + type = let + childOptions = { name, ... }: { + name = mkOption { type = types.str; }; + dhallAttrs = mkOption { type = types.str; }; + }; + in types.attrsOf (types.submodule childOptions); + }; + subinits = mkOption { type = types.attrsOf (types.submodule ({ config, options, name, ... }: { options = { diff --git a/nixos-modules/hardware.nix b/nixos-modules/hardware.nix new file mode 100644 index 0000000..0c8dde0 --- /dev/null +++ b/nixos-modules/hardware.nix @@ -0,0 +1,51 @@ +{ config, pkgs, lib, ... }: + +with lib; { + options.genode = { + hardware = { + + nic = mkOption { + default = { }; + example = { eth0.driver = "virtio"; }; + description = "The configuration for each Nic service."; + type = let + + nicOptions = { name, ... }: { + name = mkOption { + + example = "eth0"; + type = types.str; + description = "Name of the Nic service."; + }; + + driver = mkOption { type = types.enum [ "ipxe" "virtio" ]; }; + + ipStack = mkOption { + type = types.enum [ "lwip" "lxip" ]; + default = "lwip"; + }; + + }; + + in types.attrsOf (types.submodule nicOptions); + }; + + }; + }; + + config = { + + genode.init.children = let + drivers = mapAttrsToList (name: interface: { + name = name + "-nic"; + value = { }; + }) config.networking.interfaces; + sockets = mapAttrsToList (name: interface: { + name = name + "-sockets"; + value = { }; + }) config.networking.interfaces; + in builtins.listToAttrs (drivers ++ sockets); + + }; + +} diff --git a/nixos-modules/nova.nix b/nixos-modules/nova.nix new file mode 100644 index 0000000..29115cc --- /dev/null +++ b/nixos-modules/nova.nix @@ -0,0 +1,26 @@ +{ config, pkgs, lib, ... }: + +with lib; +let + localPackages = pkgs.buildPackages; + utils = import ../lib { + inherit (config.nixpkgs) system localSystem crossSystem; + inherit pkgs; + }; +in { + genode.core = { + prefix = "nova-"; + supportedSystems = [ "x86_64-genode" ]; + basePackages = with pkgs.genodePackages; [ base-nova rtc_drv ]; + }; + + genode.boot = { + + initrd = + "'${pkgs.genodePackages.NOVA}/hypervisor-x86_64 arg=iommu novpid serial,${config.genode.boot.image}/image.elf'"; + + image = utils.novaImage config.system.name { } config.genode.boot.config; + + }; + +} diff --git a/nixos-modules/qemu-vm.nix b/nixos-modules/qemu-vm.nix index 42e43f5..339d43a 100644 --- a/nixos-modules/qemu-vm.nix +++ b/nixos-modules/qemu-vm.nix @@ -1,20 +1,11 @@ -# This module creates a virtual machine from the NixOS configuration. -# Building the `config.system.build.vm' attribute gives you a command -# that starts a KVM/QEMU VM running the NixOS configuration defined in -# `config'. The Nix store is shared read-only with the host, which -# makes (re)building VMs very efficient. However, it also means you -# can't reconfigure the guest inside the guest - you need to rebuild -# the VM in the host. On the other hand, the root filesystem is a -# read/writable disk image persistent across VM reboots. - { config, lib, pkgs, ... }: with lib; -with import ../../lib/qemu-flags.nix { inherit pkgs; }; +with import ../tests/lib/qemu-flags.nix { inherit pkgs; }; let - qemu = config.system.build.qemu or pkgs.qemu_test; + qemu = config.system.build.qemu; cfg = config.virtualisation; @@ -31,13 +22,13 @@ let driveExtraOpts = mkOption { type = types.attrsOf types.str; - default = {}; + default = { }; description = "Extra options passed to drive flag."; }; deviceExtraOpts = mkOption { type = types.attrsOf types.str; - default = {}; + default = { }; description = "Extra options passed to device flag."; }; @@ -52,10 +43,11 @@ let }; - driveCmdline = idx: { file, driveExtraOpts, deviceExtraOpts, ... }: + driveCmdline = idx: + { file, driveExtraOpts, deviceExtraOpts, ... }: let drvId = "drive${toString idx}"; - mkKeyValue = generators.mkKeyValueDefault {} "="; + mkKeyValue = generators.mkKeyValueDefault { } "="; mkOpts = opts: concatStringsSep "," (mapAttrsToList mkKeyValue opts); driveOpts = mkOpts (driveExtraOpts // { index = idx; @@ -63,20 +55,15 @@ let "if" = "none"; inherit file; }); - deviceOpts = mkOpts (deviceExtraOpts // { - drive = drvId; - }); - device = - if cfg.qemu.diskInterface == "scsi" then - "-device lsi53c895a -device scsi-hd,${deviceOpts}" - else - "-device virtio-blk-pci,${deviceOpts}"; - in - "-drive ${driveOpts} ${device}"; + deviceOpts = mkOpts (deviceExtraOpts // { drive = drvId; }); + device = if cfg.qemu.diskInterface == "scsi" then + "-device lsi53c895a -device scsi-hd,${deviceOpts}" + else + "-device virtio-blk-pci,${deviceOpts}"; + in "-drive ${driveOpts} ${device}"; drivesCmdLine = drives: concatStringsSep " " (imap1 driveCmdline drives); - # Creates a device name from a 1-based a numerical index, e.g. # * `driveDeviceName 1` -> `/dev/vda` # * `driveDeviceName 2` -> `/dev/vdb` @@ -95,324 +82,279 @@ let addDeviceNames = imap1 (idx: drive: drive // { device = driveDeviceName idx; }); - efiPrefix = - if (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) then "${pkgs.OVMF.fd}/FV/OVMF" - else if pkgs.stdenv.isAarch64 then "${pkgs.OVMF.fd}/FV/AAVMF" - else throw "No EFI firmware available for platform"; + efiPrefix = if (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) then + "${pkgs.OVMF.fd}/FV/OVMF" + else if pkgs.stdenv.isAarch64 then + "${pkgs.OVMF.fd}/FV/AAVMF" + else + throw "No EFI firmware available for platform"; efiFirmware = "${efiPrefix}_CODE.fd"; efiVarsDefault = "${efiPrefix}_VARS.fd"; # Shell script to start the VM. - startVM = - '' - #! ${pkgs.runtimeShell} + startVM = '' + #! ${pkgs.runtimeShell} - NIX_DISK_IMAGE=$(readlink -f ''${NIX_DISK_IMAGE:-${config.virtualisation.diskImage}}) + NIX_DISK_IMAGE=$(readlink -f ''${NIX_DISK_IMAGE:-${config.virtualisation.diskImage}}) - if ! test -e "$NIX_DISK_IMAGE"; then - ${qemu}/bin/qemu-img create -f qcow2 "$NIX_DISK_IMAGE" \ - ${toString config.virtualisation.diskSize}M || exit 1 - fi + if ! test -e "$NIX_DISK_IMAGE"; then + ${qemu}/bin/qemu-img create -f qcow2 "$NIX_DISK_IMAGE" \ + ${toString config.virtualisation.diskSize}M || exit 1 + fi - # Create a directory for storing temporary data of the running VM. - if [ -z "$TMPDIR" -o -z "$USE_TMPDIR" ]; then - TMPDIR=$(mktemp -d nix-vm.XXXXXXXXXX --tmpdir) - fi + # Create a directory for storing temporary data of the running VM. + if [ -z "$TMPDIR" -o -z "$USE_TMPDIR" ]; then + TMPDIR=$(mktemp -d nix-vm.XXXXXXXXXX --tmpdir) + fi - # Create a directory for exchanging data with the VM. - mkdir -p $TMPDIR/xchg + # Create a directory for exchanging data with the VM. + mkdir -p $TMPDIR/xchg - ${if cfg.useBootLoader then '' - # Create a writable copy/snapshot of the boot disk. - # A writable boot disk can be booted from automatically. - ${qemu}/bin/qemu-img create -f qcow2 -b ${bootDisk}/disk.img $TMPDIR/disk.img || exit 1 + ${if cfg.useBootLoader then '' + # Create a writable copy/snapshot of the boot disk. + # A writable boot disk can be booted from automatically. + ${qemu}/bin/qemu-img create -f qcow2 -b ${bootDisk}/disk.img $TMPDIR/disk.img || exit 1 - NIX_EFI_VARS=$(readlink -f ''${NIX_EFI_VARS:-${cfg.efiVars}}) + NIX_EFI_VARS=$(readlink -f ''${NIX_EFI_VARS:-${cfg.efiVars}}) - ${if cfg.useEFIBoot then '' - # VM needs writable EFI vars - if ! test -e "$NIX_EFI_VARS"; then - cp ${bootDisk}/efi-vars.fd "$NIX_EFI_VARS" || exit 1 - chmod 0644 "$NIX_EFI_VARS" || exit 1 - fi - '' else '' - ''} - '' else '' - ''} - - cd $TMPDIR - idx=0 - ${flip concatMapStrings cfg.emptyDiskImages (size: '' - if ! test -e "empty$idx.qcow2"; then - ${qemu}/bin/qemu-img create -f qcow2 "empty$idx.qcow2" "${toString size}M" + ${if cfg.useEFIBoot then '' + # VM needs writable EFI vars + if ! test -e "$NIX_EFI_VARS"; then + cp ${bootDisk}/efi-vars.fd "$NIX_EFI_VARS" || exit 1 + chmod 0644 "$NIX_EFI_VARS" || exit 1 fi - idx=$((idx + 1)) - '')} + '' else + ""} + '' else + ""} - # Start QEMU. - exec ${qemuBinary qemu} \ - -name ${config.system.name} \ - -m ${toString config.virtualisation.memorySize} \ - -smp ${toString config.virtualisation.cores} \ - -device virtio-rng-pci \ - ${concatStringsSep " " config.virtualisation.qemu.networkingOptions} \ - -virtfs local,path=/nix/store,security_model=none,mount_tag=store \ - -virtfs local,path=$TMPDIR/xchg,security_model=none,mount_tag=xchg \ - -virtfs local,path=''${SHARED_DIR:-$TMPDIR/xchg},security_model=none,mount_tag=shared \ - ${drivesCmdLine config.virtualisation.qemu.drives} \ - ${toString config.virtualisation.qemu.options} \ - $QEMU_OPTS \ - "$@" - ''; + cd $TMPDIR + idx=0 + ${flip concatMapStrings cfg.emptyDiskImages (size: '' + if ! test -e "empty$idx.qcow2"; then + ${qemu}/bin/qemu-img create -f qcow2 "empty$idx.qcow2" "${ + toString size + }M" + fi + idx=$((idx + 1)) + '')} + # Start QEMU. + exec ${qemuBinary qemu} \ + -name ${config.system.name} \ + -m ${toString config.virtualisation.memorySize} \ + -smp ${toString config.virtualisation.cores} \ + -device virtio-rng-pci \ + ${concatStringsSep " " config.virtualisation.qemu.networkingOptions} \ + -virtfs local,path=/nix/store,security_model=none,mount_tag=store \ + -virtfs local,path=$TMPDIR/xchg,security_model=none,mount_tag=xchg \ + -virtfs local,path=''${SHARED_DIR:-$TMPDIR/xchg},security_model=none,mount_tag=shared \ + ${drivesCmdLine config.virtualisation.qemu.drives} \ + ${toString config.virtualisation.qemu.options} \ + $QEMU_OPTS \ + "$@" + ''; - regInfo = pkgs.closureInfo { rootPaths = config.virtualisation.pathsInNixDB; }; - + regInfo = + pkgs.closureInfo { rootPaths = config.virtualisation.pathsInNixDB; }; # Generate a hard disk image containing a /boot partition and GRUB # in the MBR. Used when the `useBootLoader' option is set. # Uses `runInLinuxVM` to create the image in a throwaway VM. # See note [Disk layout with `useBootLoader`]. # FIXME: use nixos/lib/make-disk-image.nix. - bootDisk = - pkgs.vmTools.runInLinuxVM ( - pkgs.runCommand "nixos-boot-disk" - { preVM = - '' - mkdir $out - diskImage=$out/disk.img - ${qemu}/bin/qemu-img create -f qcow2 $diskImage "60M" - ${if cfg.useEFIBoot then '' - efiVars=$out/efi-vars.fd - cp ${efiVarsDefault} $efiVars - chmod 0644 $efiVars - '' else '' - ''} - ''; - buildInputs = [ pkgs.utillinux ]; - QEMU_OPTS = "-nographic -serial stdio -monitor none" - + lib.optionalString cfg.useEFIBoot ( - " -drive if=pflash,format=raw,unit=0,readonly=on,file=${efiFirmware}" - + " -drive if=pflash,format=raw,unit=1,file=$efiVars"); - } - '' - # Create a /boot EFI partition with 60M and arbitrary but fixed GUIDs for reproducibility - ${pkgs.gptfdisk}/bin/sgdisk \ - --set-alignment=1 --new=1:34:2047 --change-name=1:BIOSBootPartition --typecode=1:ef02 \ - --set-alignment=512 --largest-new=2 --change-name=2:EFISystem --typecode=2:ef00 \ - --attributes=1:set:1 \ - --attributes=2:set:2 \ - --disk-guid=97FD5997-D90B-4AA3-8D16-C1723AEA73C1 \ - --partition-guid=1:1C06F03B-704E-4657-B9CD-681A087A2FDC \ - --partition-guid=2:970C694F-AFD0-4B99-B750-CDB7A329AB6F \ - --hybrid 2 \ - --recompute-chs /dev/vda + bootDisk = pkgs.vmTools.runInLinuxVM (pkgs.runCommand "nixos-boot-disk" { + preVM = '' + mkdir $out + diskImage=$out/disk.img + ${qemu}/bin/qemu-img create -f qcow2 $diskImage "60M" + ${if cfg.useEFIBoot then '' + efiVars=$out/efi-vars.fd + cp ${efiVarsDefault} $efiVars + chmod 0644 $efiVars + '' else + ""} + ''; + buildInputs = [ pkgs.utillinux ]; + QEMU_OPTS = "-nographic -serial stdio -monitor none" + + lib.optionalString cfg.useEFIBoot + (" -drive if=pflash,format=raw,unit=0,readonly=on,file=${efiFirmware}" + + " -drive if=pflash,format=raw,unit=1,file=$efiVars"); + } '' + # Create a /boot EFI partition with 60M and arbitrary but fixed GUIDs for reproducibility + ${pkgs.gptfdisk}/bin/sgdisk \ + --set-alignment=1 --new=1:34:2047 --change-name=1:BIOSBootPartition --typecode=1:ef02 \ + --set-alignment=512 --largest-new=2 --change-name=2:EFISystem --typecode=2:ef00 \ + --attributes=1:set:1 \ + --attributes=2:set:2 \ + --disk-guid=97FD5997-D90B-4AA3-8D16-C1723AEA73C1 \ + --partition-guid=1:1C06F03B-704E-4657-B9CD-681A087A2FDC \ + --partition-guid=2:970C694F-AFD0-4B99-B750-CDB7A329AB6F \ + --hybrid 2 \ + --recompute-chs /dev/vda - ${optionalString (config.boot.loader.grub.device != "/dev/vda") - # In this throwaway VM, we only have the /dev/vda disk, but the - # actual VM described by `config` (used by `switch-to-configuration` - # below) may set `boot.loader.grub.device` to a different device - # that's nonexistent in the throwaway VM. - # Create a symlink for that device, so that the `grub-install` - # by `switch-to-configuration` will hit /dev/vda anyway. - '' - ln -s /dev/vda ${config.boot.loader.grub.device} - '' - } + ${optionalString (config.boot.loader.grub.device != "/dev/vda") + # In this throwaway VM, we only have the /dev/vda disk, but the + # actual VM described by `config` (used by `switch-to-configuration` + # below) may set `boot.loader.grub.device` to a different device + # that's nonexistent in the throwaway VM. + # Create a symlink for that device, so that the `grub-install` + # by `switch-to-configuration` will hit /dev/vda anyway. + '' + ln -s /dev/vda ${config.boot.loader.grub.device} + ''} - ${pkgs.dosfstools}/bin/mkfs.fat -F16 /dev/vda2 - export MTOOLS_SKIP_CHECK=1 - ${pkgs.mtools}/bin/mlabel -i /dev/vda2 ::boot + ${pkgs.dosfstools}/bin/mkfs.fat -F16 /dev/vda2 + export MTOOLS_SKIP_CHECK=1 + ${pkgs.mtools}/bin/mlabel -i /dev/vda2 ::boot - # Mount /boot; load necessary modules first. - ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/nls/nls_cp437.ko.xz || true - ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/nls/nls_iso8859-1.ko.xz || true - ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/fat/fat.ko.xz || true - ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/fat/vfat.ko.xz || true - ${pkgs.kmod}/bin/insmod ${pkgs.linux}/lib/modules/*/kernel/fs/efivarfs/efivarfs.ko.xz || true - mkdir /boot - mount /dev/vda2 /boot + mkdir /boot + mount /dev/vda2 /boot - ${optionalString config.boot.loader.efi.canTouchEfiVariables '' - mount -t efivarfs efivarfs /sys/firmware/efi/efivars - ''} + ${optionalString config.boot.loader.efi.canTouchEfiVariables '' + mount -t efivarfs efivarfs /sys/firmware/efi/efivars + ''} - # This is needed for GRUB 0.97, which doesn't know about virtio devices. - mkdir /boot/grub - echo '(hd0) /dev/vda' > /boot/grub/device.map + # This is needed for GRUB 0.97, which doesn't know about virtio devices. + mkdir /boot/grub + echo '(hd0) /dev/vda' > /boot/grub/device.map - # This is needed for systemd-boot to find ESP, and udev is not available here to create this - mkdir -p /dev/block - ln -s /dev/vda2 /dev/block/254:2 + # This is needed for systemd-boot to find ESP, and udev is not available here to create this + mkdir -p /dev/block + ln -s /dev/vda2 /dev/block/254:2 - # Set up system profile (normally done by nixos-rebuild / nix-env --set) - mkdir -p /nix/var/nix/profiles - ln -s ${config.system.build.toplevel} /nix/var/nix/profiles/system-1-link - ln -s /nix/var/nix/profiles/system-1-link /nix/var/nix/profiles/system + # Install bootloader + touch /etc/NIXOS + export NIXOS_INSTALL_BOOTLOADER=1 + ${config.system.build.toplevel}/bin/switch-to-configuration boot - # Install bootloader - touch /etc/NIXOS - export NIXOS_INSTALL_BOOTLOADER=1 - ${config.system.build.toplevel}/bin/switch-to-configuration boot - - umount /boot - '' # */ - ); - -in - -{ - imports = [ - ../profiles/qemu-guest.nix - ]; + umount /boot + '' # */ + ); +in { options = { - virtualisation.memorySize = - mkOption { - default = 384; - description = - '' - Memory size (M) of virtual machine. - ''; - }; + virtualisation.memorySize = mkOption { + default = 384; + description = '' + Memory size (M) of virtual machine. + ''; + }; - virtualisation.diskSize = - mkOption { - default = 512; - description = - '' - Disk size (M) of virtual machine. - ''; - }; + virtualisation.diskSize = mkOption { + default = 512; + description = '' + Disk size (M) of virtual machine. + ''; + }; - virtualisation.diskImage = - mkOption { - default = "./${config.system.name}.qcow2"; - description = - '' - Path to the disk image containing the root filesystem. - The image will be created on startup if it does not - exist. - ''; - }; + virtualisation.diskImage = mkOption { + default = "./${config.system.name}.qcow2"; + description = '' + Path to the disk image containing the root filesystem. + The image will be created on startup if it does not + exist. + ''; + }; - virtualisation.bootDevice = - mkOption { - type = types.str; - example = "/dev/vda"; - description = - '' - The disk to be used for the root filesystem. - ''; - }; + virtualisation.bootDevice = mkOption { + type = types.str; + example = "/dev/vda"; + description = '' + The disk to be used for the root filesystem. + ''; + }; - virtualisation.emptyDiskImages = - mkOption { - default = []; - type = types.listOf types.int; - description = - '' - Additional disk images to provide to the VM. The value is - a list of size in megabytes of each disk. These disks are - writeable by the VM. - ''; - }; + virtualisation.emptyDiskImages = mkOption { + default = [ ]; + type = types.listOf types.int; + description = '' + Additional disk images to provide to the VM. The value is + a list of size in megabytes of each disk. These disks are + writeable by the VM. + ''; + }; - virtualisation.graphics = - mkOption { - default = true; - description = - '' - Whether to run QEMU with a graphics window, or in nographic mode. - Serial console will be enabled on both settings, but this will - change the preferred console. - ''; - }; + virtualisation.graphics = mkOption { + default = true; + description = '' + Whether to run QEMU with a graphics window, or in nographic mode. + Serial console will be enabled on both settings, but this will + change the preferred console. + ''; + }; - virtualisation.cores = - mkOption { - default = 1; - type = types.int; - description = - '' - Specify the number of cores the guest is permitted to use. - The number can be higher than the available cores on the - host system. - ''; - }; + virtualisation.cores = mkOption { + default = 1; + type = types.int; + description = '' + Specify the number of cores the guest is permitted to use. + The number can be higher than the available cores on the + host system. + ''; + }; - virtualisation.pathsInNixDB = - mkOption { - default = []; - description = - '' - The list of paths whose closure is registered in the Nix - database in the VM. All other paths in the host Nix store - appear in the guest Nix store as well, but are considered - garbage (because they are not registered in the Nix - database in the guest). - ''; - }; + virtualisation.pathsInNixDB = mkOption { + default = [ ]; + description = '' + The list of paths whose closure is registered in the Nix + database in the VM. All other paths in the host Nix store + appear in the guest Nix store as well, but are considered + garbage (because they are not registered in the Nix + database in the guest). + ''; + }; - virtualisation.vlans = - mkOption { - default = [ 1 ]; - example = [ 1 2 ]; - description = - '' - Virtual networks to which the VM is connected. Each - number N in this list causes - the VM to have a virtual Ethernet interface attached to a - separate virtual network on which it will be assigned IP - address - 192.168.N.M, - where M is the index of this VM - in the list of VMs. - ''; - }; + virtualisation.vlans = mkOption { + default = [ 1 ]; + example = [ 1 2 ]; + description = '' + Virtual networks to which the VM is connected. Each + number N in this list causes + the VM to have a virtual Ethernet interface attached to a + separate virtual network on which it will be assigned IP + address + 192.168.N.M, + where M is the index of this VM + in the list of VMs. + ''; + }; - virtualisation.writableStore = - mkOption { - default = true; # FIXME - description = - '' - If enabled, the Nix store in the VM is made writable by - layering an overlay filesystem on top of the host's Nix - store. - ''; - }; + virtualisation.writableStore = mkOption { + default = true; # FIXME + description = '' + If enabled, the Nix store in the VM is made writable by + layering an overlay filesystem on top of the host's Nix + store. + ''; + }; - virtualisation.writableStoreUseTmpfs = - mkOption { - default = true; - description = - '' - Use a tmpfs for the writable store instead of writing to the VM's - own filesystem. - ''; - }; + virtualisation.writableStoreUseTmpfs = mkOption { + default = true; + description = '' + Use a tmpfs for the writable store instead of writing to the VM's + own filesystem. + ''; + }; - networking.primaryIPAddress = - mkOption { - default = ""; - internal = true; - description = "Primary IP address used in /etc/hosts."; - }; + networking.primaryIPAddress = mkOption { + default = ""; + internal = true; + description = "Primary IP address used in /etc/hosts."; + }; virtualisation.qemu = { - options = - mkOption { - type = types.listOf types.unspecified; - default = []; - example = [ "-vga std" ]; - description = "Options passed to QEMU."; - }; + options = mkOption { + type = types.listOf types.unspecified; + default = [ ]; + example = [ "-vga std" ]; + description = "Options passed to QEMU."; + }; consoles = mkOption { type = types.listOf types.str; - default = let - consoles = [ "${qemuSerialDevice},115200n8" "tty0" ]; + default = let consoles = [ "${qemuSerialDevice},115200n8" "tty0" ]; in if cfg.graphics then consoles else reverseList consoles; example = [ "console=tty1" ]; description = '' @@ -426,94 +368,82 @@ in ''; }; - networkingOptions = - mkOption { - default = [ - "-net nic,netdev=user.0,model=virtio" - "-netdev user,id=user.0\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}" - ]; - type = types.listOf types.str; - description = '' - Networking-related command-line options that should be passed to qemu. - The default is to use userspace networking (slirp). + networkingOptions = mkOption { + default = [ + "-net nic,netdev=user.0,model=virtio" + "-netdev user,id=user.0\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}" + ]; + type = types.listOf types.str; + description = '' + Networking-related command-line options that should be passed to qemu. + The default is to use userspace networking (slirp). - If you override this option, be advised to keep - ''${QEMU_NET_OPTS:+,$QEMU_NET_OPTS} (as seen in the default) - to keep the default runtime behaviour. - ''; - }; + If you override this option, be advised to keep + ''${QEMU_NET_OPTS:+,$QEMU_NET_OPTS} (as seen in the default) + to keep the default runtime behaviour. + ''; + }; - drives = - mkOption { - type = types.listOf (types.submodule driveOpts); - description = "Drives passed to qemu."; - apply = addDeviceNames; - }; + drives = mkOption { + type = types.listOf (types.submodule driveOpts); + description = "Drives passed to qemu."; + apply = addDeviceNames; + }; - diskInterface = - mkOption { - default = "virtio"; - example = "scsi"; - type = types.enum [ "virtio" "scsi" "ide" ]; - description = "The interface used for the virtual hard disks."; - }; + diskInterface = mkOption { + default = "virtio"; + example = "scsi"; + type = types.enum [ "virtio" "scsi" "ide" ]; + description = "The interface used for the virtual hard disks."; + }; - guestAgent.enable = - mkOption { - default = true; - type = types.bool; - description = '' - Enable the Qemu guest agent. - ''; - }; + guestAgent.enable = mkOption { + default = true; + type = types.bool; + description = '' + Enable the Qemu guest agent. + ''; + }; }; - virtualisation.useBootLoader = - mkOption { - default = false; - description = - '' - If enabled, the virtual machine will be booted using the - regular boot loader (i.e., GRUB 1 or 2). This allows - testing of the boot loader. If - disabled (the default), the VM directly boots the NixOS - kernel and initial ramdisk, bypassing the boot loader - altogether. - ''; - }; + virtualisation.useBootLoader = mkOption { + default = false; + description = '' + If enabled, the virtual machine will be booted using the + regular boot loader (i.e., GRUB 1 or 2). This allows + testing of the boot loader. If + disabled (the default), the VM directly boots the NixOS + kernel and initial ramdisk, bypassing the boot loader + altogether. + ''; + }; - virtualisation.useEFIBoot = - mkOption { - default = false; - description = - '' - If enabled, the virtual machine will provide a EFI boot - manager. - useEFIBoot is ignored if useBootLoader == false. - ''; - }; + virtualisation.useEFIBoot = mkOption { + default = false; + description = '' + If enabled, the virtual machine will provide a EFI boot + manager. + useEFIBoot is ignored if useBootLoader == false. + ''; + }; - virtualisation.efiVars = - mkOption { - default = "./${config.system.name}-efi-vars.fd"; - description = - '' - Path to nvram image containing UEFI variables. The will be created - on startup if it does not exist. - ''; - }; + virtualisation.efiVars = mkOption { + default = "./${config.system.name}-efi-vars.fd"; + description = '' + Path to nvram image containing UEFI variables. The will be created + on startup if it does not exist. + ''; + }; - virtualisation.bios = - mkOption { - default = null; - type = types.nullOr types.package; - description = - '' - An alternate BIOS (such as qboot) with which to start the VM. - Should contain a file named bios.bin. - If null, QEMU's builtin SeaBIOS will be used. - ''; - }; + virtualisation.bios = mkOption { + default = null; + type = types.nullOr types.package; + description = '' + An alternate BIOS (such as qboot) with which to start the VM. + Should contain a file named bios.bin. + If null, QEMU's builtin SeaBIOS will be used. + ''; + }; }; @@ -535,92 +465,67 @@ in # If `useBootLoader`, GRUB goes to the second disk, see # note [Disk layout with `useBootLoader`]. - boot.loader.grub.device = mkVMOverride ( - if cfg.useBootLoader - then driveDeviceName 2 # second disk - else cfg.bootDevice - ); + boot.loader.grub.device = mkVMOverride (if cfg.useBootLoader then + driveDeviceName 2 # second disk + else + cfg.bootDevice); - boot.initrd.extraUtilsCommands = - '' - # We need mke2fs in the initrd. - copy_bin_and_libs ${pkgs.e2fsprogs}/bin/mke2fs - ''; + boot.initrd.extraUtilsCommands = '' + # We need mke2fs in the initrd. + copy_bin_and_libs ${pkgs.e2fsprogs}/bin/mke2fs + ''; - boot.initrd.postDeviceCommands = - '' - # If the disk image appears to be empty, run mke2fs to - # initialise. - FSTYPE=$(blkid -o value -s TYPE ${cfg.bootDevice} || true) - if test -z "$FSTYPE"; then - mke2fs -t ext4 ${cfg.bootDevice} - fi - ''; + boot.initrd.postDeviceCommands = '' + # If the disk image appears to be empty, run mke2fs to + # initialise. + FSTYPE=$(blkid -o value -s TYPE ${cfg.bootDevice} || true) + if test -z "$FSTYPE"; then + mke2fs -t ext4 ${cfg.bootDevice} + fi + ''; - boot.initrd.postMountCommands = - '' - # Mark this as a NixOS machine. - mkdir -p $targetRoot/etc - echo -n > $targetRoot/etc/NIXOS + boot.initrd.postMountCommands = '' + # Mark this as a NixOS machine. + mkdir -p $targetRoot/etc + echo -n > $targetRoot/etc/NIXOS - # Fix the permissions on /tmp. - chmod 1777 $targetRoot/tmp + # Fix the permissions on /tmp. + chmod 1777 $targetRoot/tmp - mkdir -p $targetRoot/boot + mkdir -p $targetRoot/boot - ${optionalString cfg.writableStore '' - echo "mounting overlay filesystem on /nix/store..." - mkdir -p 0755 $targetRoot/nix/.rw-store/store $targetRoot/nix/.rw-store/work $targetRoot/nix/store - mount -t overlay overlay $targetRoot/nix/store \ - -o lowerdir=$targetRoot/nix/.ro-store,upperdir=$targetRoot/nix/.rw-store/store,workdir=$targetRoot/nix/.rw-store/work || fail - ''} - ''; - - # After booting, register the closure of the paths in - # `virtualisation.pathsInNixDB' in the Nix database in the VM. This - # allows Nix operations to work in the VM. The path to the - # registration file is passed through the kernel command line to - # allow `system.build.toplevel' to be included. (If we had a direct - # reference to ${regInfo} here, then we would get a cyclic - # dependency.) - boot.postBootCommands = - '' - if [[ "$(cat /proc/cmdline)" =~ regInfo=([^ ]*) ]]; then - ${config.nix.package.out}/bin/nix-store --load-db < ''${BASH_REMATCH[1]} - fi - ''; - - boot.initrd.availableKernelModules = - optional cfg.writableStore "overlay" - ++ optional (cfg.qemu.diskInterface == "scsi") "sym53c8xx"; + ${optionalString cfg.writableStore '' + echo "mounting overlay filesystem on /nix/store..." + mkdir -p 0755 $targetRoot/nix/.rw-store/store $targetRoot/nix/.rw-store/work $targetRoot/nix/store + mount -t overlay overlay $targetRoot/nix/store \ + -o lowerdir=$targetRoot/nix/.ro-store,upperdir=$targetRoot/nix/.rw-store/store,workdir=$targetRoot/nix/.rw-store/work || fail + ''} + ''; virtualisation.bootDevice = mkDefault (driveDeviceName 1); - virtualisation.pathsInNixDB = [ config.system.build.toplevel ]; - # FIXME: Consolidate this one day. virtualisation.qemu.options = mkMerge [ (mkIf (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) [ - "-usb" "-device usb-tablet,bus=usb-bus.0" + "-usb" + "-device usb-tablet,bus=usb-bus.0" ]) (mkIf (pkgs.stdenv.isAarch32 || pkgs.stdenv.isAarch64) [ - "-device virtio-gpu-pci" "-device usb-ehci,id=usb0" "-device usb-kbd" "-device usb-tablet" + "-device virtio-gpu-pci" + "-device usb-ehci,id=usb0" + "-device usb-kbd" + "-device usb-tablet" ]) (mkIf (!cfg.useBootLoader) [ - "-kernel ${config.system.build.toplevel}/kernel" - "-initrd ${config.system.build.toplevel}/initrd" - ''-append "$(cat ${config.system.build.toplevel}/kernel-params) init=${config.system.build.toplevel}/init regInfo=${regInfo}/registration ${consoles} $QEMU_KERNEL_PARAMS"'' + "-kernel ${config.genode.boot.kernel}" + "-initrd ${config.genode.boot.initrd}" ]) (mkIf cfg.useEFIBoot [ "-drive if=pflash,format=raw,unit=0,readonly,file=${efiFirmware}" "-drive if=pflash,format=raw,unit=1,file=$NIX_EFI_VARS" ]) - (mkIf (cfg.bios != null) [ - "-bios ${cfg.bios}/bios.bin" - ]) - (mkIf (!cfg.graphics) [ - "-nographic" - ]) + (mkIf (cfg.bios != null) [ "-bios ${cfg.bios}/bios.bin" ]) + (mkIf (!cfg.graphics) [ "-nographic" ]) ]; virtualisation.qemu.drives = mkMerge [ @@ -646,106 +551,12 @@ in }) cfg.emptyDiskImages) ]; - # Mount the host filesystem via 9P, and bind-mount the Nix store - # of the host into our own filesystem. We use mkVMOverride to - # allow this module to be applied to "normal" NixOS system - # configuration, where the regular value for the `fileSystems' - # attribute should be disregarded for the purpose of building a VM - # test image (since those filesystems don't exist in the VM). - fileSystems = mkVMOverride ( - { "/".device = cfg.bootDevice; - ${if cfg.writableStore then "/nix/.ro-store" else "/nix/store"} = - { device = "store"; - fsType = "9p"; - options = [ "trans=virtio" "version=9p2000.L" "cache=loose" ]; - neededForBoot = true; - }; - "/tmp" = mkIf config.boot.tmpOnTmpfs - { device = "tmpfs"; - fsType = "tmpfs"; - neededForBoot = true; - # Sync with systemd's tmp.mount; - options = [ "mode=1777" "strictatime" "nosuid" "nodev" ]; - }; - "/tmp/xchg" = - { device = "xchg"; - fsType = "9p"; - options = [ "trans=virtio" "version=9p2000.L" ]; - neededForBoot = true; - }; - "/tmp/shared" = - { device = "shared"; - fsType = "9p"; - options = [ "trans=virtio" "version=9p2000.L" ]; - neededForBoot = true; - }; - } // optionalAttrs (cfg.writableStore && cfg.writableStoreUseTmpfs) - { "/nix/.rw-store" = - { fsType = "tmpfs"; - options = [ "mode=0755" ]; - neededForBoot = true; - }; - } // optionalAttrs cfg.useBootLoader - { "/boot" = - # see note [Disk layout with `useBootLoader`] - { device = "${lookupDriveDeviceName "boot" cfg.qemu.drives}2"; # 2 for e.g. `vdb2`, as created in `bootDisk` - fsType = "vfat"; - noCheck = true; # fsck fails on a r/o filesystem - }; - }); - - swapDevices = mkVMOverride [ ]; - boot.initrd.luks.devices = mkVMOverride {}; - - # Don't run ntpd in the guest. It should get the correct time from KVM. - services.timesyncd.enable = false; - - services.qemuGuest.enable = cfg.qemu.guestAgent.enable; - - system.build.vm = pkgs.runCommand "nixos-vm" { preferLocalBuild = true; } - '' - mkdir -p $out/bin - ln -s ${config.system.build.toplevel} $out/system - ln -s ${pkgs.writeScript "run-nixos-vm" startVM} $out/bin/run-${config.system.name}-vm - ''; - - # When building a regular system configuration, override whatever - # video driver the host uses. - services.xserver.videoDrivers = mkVMOverride [ "modesetting" ]; - services.xserver.defaultDepth = mkVMOverride 0; - services.xserver.resolutions = mkVMOverride [ { x = 1024; y = 768; } ]; - services.xserver.monitorSection = - '' - # Set a higher refresh rate so that resolutions > 800x600 work. - HorizSync 30-140 - VertRefresh 50-160 - ''; - - # Wireless won't work in the VM. - networking.wireless.enable = mkVMOverride false; - services.connman.enable = mkVMOverride false; - - # Speed up booting by not waiting for ARP. - networking.dhcpcd.extraConfig = "noarp"; - - networking.usePredictableInterfaceNames = false; - - system.requiredKernelConfig = with config.lib.kernelConfig; - [ (isEnabled "VIRTIO_BLK") - (isEnabled "VIRTIO_PCI") - (isEnabled "VIRTIO_NET") - (isEnabled "EXT4_FS") - (isYes "BLK_DEV") - (isYes "PCI") - (isYes "EXPERIMENTAL") - (isYes "NETDEVICES") - (isYes "NET_CORE") - (isYes "INET") - (isYes "NETWORK_FILESYSTEMS") - ] ++ optional (!cfg.graphics) [ - (isYes "SERIAL_8250_CONSOLE") - (isYes "SERIAL_8250") - ]; + system.build.vm = pkgs.runCommand "nixos-vm" { preferLocalBuild = true; } '' + mkdir -p $out/bin + ln -s ${ + pkgs.writeScript "run-nixos-vm" startVM + } $out/bin/run-${config.system.name}-vm + ''; }; } diff --git a/nixos-modules/store-wrapper.dhall b/nixos-modules/store-wrapper.dhall new file mode 100644 index 0000000..8ee4661 --- /dev/null +++ b/nixos-modules/store-wrapper.dhall @@ -0,0 +1,159 @@ +let Genode = + env:DHALL_GENODE sha256:e90438be23b5100003cf018b783986df67bc6d0e3d35e800677d0d9109ff6aa9 + +let Prelude = Genode.Prelude + +let XML = Prelude.XML + +let Init = Genode.Init + +let Child = Init.Child + +let TextMapType = Prelude.Map.Type Text + +let Children = TextMapType Child.Type + +let Manifest/Type = TextMapType (TextMapType Text) + +let Manifest/toRoutes = + λ(manifest : Manifest/Type) → + Prelude.List.map + (Prelude.Map.Entry Text Text) + Init.ServiceRoute.Type + ( λ(entry : Prelude.Map.Entry Text Text) → + { service = + { name = "ROM" + , label = Init.LabelSelector.Type.Last entry.mapKey + } + , route = + Init.Route.Type.Child + { name = "store_rom" + , label = Some entry.mapValue + , diag = None Bool + } + } + ) + ( Prelude.List.concat + (Prelude.Map.Entry Text Text) + (Prelude.Map.values Text (Prelude.Map.Type Text Text) manifest) + ) + +let parentROMs = + Prelude.List.map + Text + Init.ServiceRoute.Type + ( λ(label : Text) → + { service = + { name = "ROM", label = Init.LabelSelector.Type.Last label } + , route = + Init.Route.Type.Parent { label = Some label, diag = None Bool } + } + ) + +let wrapStore + : Init.Type → Manifest/Type → Child.Type + = λ(init : Init.Type) → + λ(manifest : Manifest/Type) → + Init.toChild + init + Init.Attributes::{ + , exitPropagate = True + , resources = Init.Resources::{ ram = Genode.units.MiB 4 } + , routes = + [ Init.ServiceRoute.parent "IO_MEM" + , Init.ServiceRoute.parent "IO_PORT" + , Init.ServiceRoute.parent "IRQ" + , Init.ServiceRoute.parent "VM" + , Init.ServiceRoute.child "Timer" "timer" + , Init.ServiceRoute.child "Rtc" "rtc" + , Init.ServiceRoute.child "File_system" "store_fs" + ] + # parentROMs + [ "ld.lib.so" + , "init" + , "platform_info" + , "core_log" + , "kernel_log" + , "vfs" + , "libvfs.so" + , "cached_fs_rom" + ] + # Manifest/toRoutes manifest + } + +in λ(subinit : Init.Type) → + λ(storeSize : Natural) → + λ(storeManifest : Manifest/Type) → + λ(bootManifest : Manifest/Type) → + Genode.Boot::{ + , config = Init::{ + , children = + [ { mapKey = "timer" + , mapValue = + Child.flat + Child.Attributes::{ + , binary = "timer_drv" + , provides = [ "Timer" ] + } + } + , { mapKey = "rtc" + , mapValue = + Child.flat + Child.Attributes::{ + , binary = "rtc_drv" + , provides = [ "Rtc" ] + , routes = [ Init.ServiceRoute.parent "IO_PORT" ] + } + } + , { mapKey = "store_fs" + , mapValue = + Child.flat + Child.Attributes::{ + , binary = "vfs" + , config = Init.Config::{ + , content = + [ XML.element + { name = "vfs" + , attributes = XML.emptyAttributes + , content = + [ XML.leaf + { name = "tar" + , attributes = toMap { name = "store.tar" } + } + ] + } + ] + , defaultPolicy = Some Init.Config.DefaultPolicy::{ + , attributes = toMap { root = "/", writeable = "no" } + } + } + , provides = [ "File_system" ] + } + } + , { mapKey = "store_rom" + , mapValue = + Child.flat + Child.Attributes::{ + , binary = "cached_fs_rom" + , provides = [ "ROM" ] + , resources = Init.Resources::{ + , ram = storeSize + Genode.units.MiB 1 + } + , routes = + [ Init.ServiceRoute.child "File_system" "store_fs" ] + } + } + , { mapKey = "init", mapValue = wrapStore subinit storeManifest } + ] + } + , rom = + Genode.BootModules.toRomPaths + ( Prelude.List.concat + (Prelude.Map.Entry Text Text) + ( Prelude.Map.values + Text + (Prelude.Map.Type Text Text) + bootManifest + ) + ) + } diff --git a/tests/default.nix b/tests/default.nix index 053a3fe..5334f66 100644 --- a/tests/default.nix +++ b/tests/default.nix @@ -1,345 +1,88 @@ -{ flake, system, localSystem, crossSystem }: +{ flake, system, localSystem, crossSystem, pkgs }: let apps = flake.apps.${system}; localPackages = flake.legacyPackages.${localSystem}; - genodepkgs = flake.packages.${system}; lib = flake.lib.${system}; nixpkgs = flake.legacyPackages.${system}; legacyPackages = flake.legacyPackages.${system}; -in with import ./lib/build-vms.nix { - inherit flake system localSystem crossSystem; - pkgs = flake.legacyPackages.${system}; - localPackages = flake.inputs.nixpkgs.legacyPackages.${localSystem}; - modulesPath = "${flake.inputs.nixpkgs}/nixos/modules"; -}; -with flake.legacyPackages.${system}; - -let + testingPython = import ./lib/testing-python.nix; testSpecs = map (p: import p) [ ./log.nix + ./lighttpd.nix # ./posix.nix # ./tox-bootstrapd.nix # ./vmm_arm.nix # ./vmm_x86.nix - # ./x86.nix + ./x86.nix ]; # TODO ++ (callTest ./solo5); - testPkgs = genodepkgs; - qemu' = localPackages.qemu; qemuBinary = qemuPkg: { aarch64-genode = "${qemuPkg}/bin/qemu-system-aarch64"; x86_64-genode = "${qemuPkg}/bin/qemu-system-x86_64"; - }.${genodepkgs.stdenv.hostPlatform.system}; + }.${pkgs.stdenv.hostPlatform.system}; # TODO: move the cores into nixos modules cores = [ { prefix = "hw-pc-"; + testingPython = testingPython { + inherit flake system localSystem crossSystem pkgs; + extraConfigurations = [ ../nixos-modules/base-hw-pc.nix ]; + }; specs = [ "x86" "hw" ]; platforms = [ "x86_64-genode" ]; - basePackages = [ testPkgs.base-hw-pc ] - ++ map testPkgs.genodeSources.depot [ "rtc_drv" ]; - makeImage = - lib.hwImage "0xffffffc000000000" "0x00200000" testPkgs.base-hw-pc; - startVM = vmName: image: '' - #! ${localPackages.runtimeShell} - exec ${qemuBinary qemu'} \ - -name ${vmName} \ - -machine q35 \ - -m 384 \ - -netdev user,id=net0 \ - -device virtio-net-pci,netdev=net0 \ - -kernel "${testPkgs.bender}/bender" \ - -initrd "${image}/image.elf" \ - $QEMU_OPTS \ - "$@" - ''; - } - { - prefix = "hw-virt_qemu-"; - specs = [ "aarch64" "hw" ]; - platforms = [ "aarch64-genode" ]; - basePackages = with testPkgs; [ base-hw-virt_qemu rtc-dummy ]; - makeImage = lib.hwImage "0xffffffc000000000" "0x40000000" - testPkgs.base-hw-virt_qemu; - startVM = vmName: image: '' - #! ${localPackages.runtimeShell} - exec ${qemuBinary qemu'} \ - -name ${vmName} \ - -M virt,virtualization=true,gic_version=3 \ - -cpu cortex-a53 \ - -smp 4 \ - -m 384 \ - -kernel "${image}/image.elf" \ - $QEMU_OPTS \ - "$@" - ''; } + /* { + prefix = "hw-virt_qemu-"; + testingPython = testingPython { + inherit flake system localSystem crossSystem pkgs; + extraConfigurations = [ ../nixos-modules/base-hw-virt_qemu.nix ]; + }; + specs = [ "aarch64" "hw" ]; + platforms = [ "aarch64-genode" ]; + startVM = vmName: image: '' + #! ${localPackages.runtimeShell} + exec ${qemuBinary qemu'} \ + -name ${vmName} \ + -M virt,virtualization=true,gic_version=3 \ + -cpu cortex-a53 \ + -smp 4 \ + -m 384 \ + -kernel "${image}/image.elf" \ + $QEMU_OPTS \ + "$@" + ''; + } + */ { prefix = "nova-"; + testingPython = testingPython { + inherit flake system localSystem crossSystem pkgs; + extraConfigurations = [ ../nixos-modules/nova.nix ]; + }; specs = [ "x86" "nova" ]; platforms = [ "x86_64-genode" ]; - basePackages = [ testPkgs.base-nova ] - ++ map testPkgs.genodeSources.depot [ "rtc_drv" ]; - makeImage = lib.novaImage; - startVM = vmName: image: '' - #! ${localPackages.runtimeShell} - exec ${qemuBinary qemu'} \ - -name ${vmName} \ - -machine q35 \ - -m 384 \ - -kernel "${testPkgs.bender}/bender" \ - -initrd "${testPkgs.NOVA}/hypervisor-x86_64 arg=iommu novpid serial,${image}/image.elf" \ - $QEMU_OPTS \ - "$@" - ''; } ]; cores' = builtins.filter (core: - builtins.any (x: x == genodepkgs.stdenv.hostPlatform.system) core.platforms) + builtins.any (x: x == pkgs.stdenv.hostPlatform.system) core.platforms) cores; - testDriver = with localPackages; - let testDriverScript = ./test-driver/test-driver.py; - in stdenv.mkDerivation { - name = "nixos-test-driver"; - - nativeBuildInputs = [ makeWrapper ]; - buildInputs = [ (python3.withPackages (p: [ p.ptpython ])) ]; - checkInputs = with python3Packages; [ pylint mypy ]; - - dontUnpack = true; - - preferLocalBuild = true; - - doCheck = true; - checkPhase = '' - mypy --disallow-untyped-defs \ - --no-implicit-optional \ - --ignore-missing-imports ${testDriverScript} - pylint --errors-only ${testDriverScript} - ''; - - installPhase = '' - mkdir -p $out/bin - cp ${testDriverScript} $out/bin/nixos-test-driver - chmod u+x $out/bin/nixos-test-driver - # TODO: copy user script part into this file (append) - - wrapProgram $out/bin/nixos-test-driver \ - --prefix PATH : "${lib.makeBinPath [ qemu' coreutils ]}" \ - ''; - }; - - defaultTestScript = '' - start_all() - machine.wait_until_serial_output('child "init" exited with exit value 0') - ''; - - makeTest = with localPackages; - { prefix, specs, platforms, basePackages, makeImage, startVM }: - { name ? "unnamed", testScript ? defaultTestScript, - # Skip linting (mainly intended for faster dev cycles) - skipLint ? false, ... }@t: - - let - testDriverName = "genode-test-driver-${name}"; - - # TODO: move buildVM into a nixos module - buildVM = vmName: - { config, inputs, env ? { }, extraPaths ? [ ] }: - let - storeTarball = localPackages.runCommand "store" { } '' - mkdir -p $out - tar cf "$out/store.tar" --absolute-names ${toString inputs} ${ - toString extraPaths - } - ''; - addManifest = drv: - drv // { - manifest = - nixpkgs.runCommand "${drv.name}.dhall" { inherit drv; } '' - set -eu - echo -n '[' >> $out - find $drv/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out - ${if builtins.elem "lib" drv.outputs then - '' - find ${drv.lib}/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out'' - else - ""} - echo -n ']' >> $out - ''; - }; - - storeManifest = lib.mergeManifests (map addManifest inputs); - manifest = lib.mergeManifests (map addManifest (basePackages - ++ [ testPkgs.sotest-producer storeTarball ] - ++ map testPkgs.genodeSources.depot [ - "init" - "vfs" - "cached_fs_rom" - ])); - config' = "${ - ./test-wrapper.dhall - } (${config}) $(stat --format '%s' ${storeTarball}/store.tar) ${storeManifest} ${manifest}"; - env' = { - DHALL_GENODE_TEST = "${./test.dhall}"; - } // env; - - image = makeImage vmName env' config'; - startVM' = startVM vmName image; - in { - script = localPackages.writeScriptBin "run-${vmName}-vm" startVM'; - - config = lib.runDhallCommand (name + ".dhall") env' '' - ${apps.dhall.program} <<< "${config'}" > $out - ''; - - store = storeTarball; - - xml = lib.runDhallCommand (name + ".config") env' - ''${apps.render-init.program} <<< "(${config'}).config" > $out''; - }; - - # nodes = lib.mapAttrs buildVM - # (t.nodes or (if t ? machine then { machine = t.machine; } else { })); - - nodes = buildVirtualNetwork - (t.nodes or (if t ? machine then { machine = t.machine; } else { })); - - testScript' = - # Call the test script with the computed nodes. - if lib.isFunction testScript then - testScript { inherit nodes; } - else - testScript; - - vlans = map (m: m.config.virtualisation.vlans) (lib.attrValues nodes); - - vms = map (m: m.config.system.build.vm) (lib.attrValues nodes); - - # Generate onvenience wrappers for running the test driver - # interactively with the specified network, and for starting the - # VMs from the command line. - driver = - let warn = if skipLint then lib.warn "Linting is disabled!" else lib.id; - in warn (runCommand testDriverName { - buildInputs = [ makeWrapper ]; - testScript = testScript'; - preferLocalBuild = true; - testName = name; - } '' - mkdir -p $out/bin - - echo -n "$testScript" > $out/test-script - ${lib.optionalString (!skipLint) '' - ${python3Packages.black}/bin/black --check --quiet --diff $out/test-script - ''} - - ln -s ${testDriver}/bin/nixos-test-driver $out/bin/ - vms=($(for i in ${toString vms}; do echo $i/bin/run-*-vm; done)) - wrapProgram $out/bin/nixos-test-driver \ - --add-flags "''${vms[*]}" \ - --run "export testScript=\"\$(${coreutils}/bin/cat $out/test-script)\"" - ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-run-vms - wrapProgram $out/bin/nixos-run-vms \ - --add-flags "''${vms[*]}" \ - --set tests 'start_all(); join_all();' - ''); # " - - passMeta = drv: - drv - // lib.optionalAttrs (t ? meta) { meta = (drv.meta or { }) // t.meta; }; - - # Run an automated test suite in the given virtual network. - # `driver' is the script that runs the network. - runTests = driver: - stdenv.mkDerivation { - name = "test-run-${driver.testName}"; - - buildCommand = '' - mkdir -p $out - - LOGFILE=/dev/null tests='exec(os.environ["testScript"])' ${driver}/bin/nixos-test-driver - ''; - }; - - test = passMeta (runTests driver); - - nodeNames = builtins.attrNames nodes; - invalidNodeNames = - lib.filter (node: builtins.match "^[A-z_]([A-z0-9_]+)?$" node == null) - nodeNames; - - in if lib.length invalidNodeNames > 0 then - throw '' - Cannot create machines out of (${ - lib.concatStringsSep ", " invalidNodeNames - })! - All machines are referenced as python variables in the testing framework which will break the - script when special characters are used. - - Please stick to alphanumeric chars and underscores as separation. - '' - else - test // { inherit nodes driver test; }; - testList = let - f = core: - let makeTest' = makeTest core; - in test: + f = core: test: if (test.constraints or (_: true)) core.specs then { name = core.prefix + test.name; - value = makeTest' test; + value = core.testingPython.makeTest test; } else null; in lib.lists.crossLists f [ cores' testSpecs ]; in builtins.listToAttrs (builtins.filter (_: _ != null) testList) - -/* sotest = let - hwTests = with hw; [ multi posix x86 ]; - novaTests = with nova; [ multi posix x86 vmm ]; - allTests = hwTests ++ novaTests; - - projectCfg.boot_items = - - (map (test: { - inherit (test) name; - exec = "bender"; - load = [ "${test.name}.image.elf" ]; - }) hwTests) - - ++ (map (test: { - inherit (test) name; - exec = "bender"; - load = [ "hypervisor serial novga iommu" test.image.name ]; - }) novaTests); - - in localPackages.stdenv.mkDerivation { - name = "sotest"; - buildCommand = '' - mkdir zip; cd zip - cp "${testPkgs.bender}/bender" bender - cp "${testPkgs.NOVA}/hypervisor-x86_64" hypervisor - ${concatStringsSep "\n" - (map (test: "cp ${test.image}/image.elf ${test.name}.image.elf") - allTests)} - mkdir -p $out/nix-support - ${localPackages.zip}/bin/zip "$out/binaries.zip" * - cat << EOF > "$out/project.json" - ${builtins.toJSON projectCfg} - EOF - echo file sotest-binaries $out/binaries.zip >> "$out/nix-support/hydra-build-products" - echo file sotest-config $out/project.json >> "$out/nix-support/hydra-build-products" - ''; - }; -*/ diff --git a/tests/lib/build-vms.nix b/tests/lib/build-vms.nix index ca6e4e3..32b6948 100644 --- a/tests/lib/build-vms.nix +++ b/tests/lib/build-vms.nix @@ -1,6 +1,8 @@ -{ flake, system, localSystem, crossSystem +{ system, localSystem, crossSystem # Nixpkgs, for qemu, lib and more -, localPackages, pkgs, modulesPath }: +, pkgs, modulesPath +# NixOS configuration to add to the VMs +, extraConfigurations ? [ ] }: with pkgs.lib; with import ./qemu-flags.nix { inherit pkgs; }; @@ -9,7 +11,7 @@ rec { inherit pkgs; - qemu = pkgs.qemu_test; + qemu = pkgs.buildPackages.buildPackages.qemu_test; # Build a virtual network from an attribute set `{ machine1 = # config1; ... machineN = configN; }', where `machineX' is the @@ -23,10 +25,11 @@ rec { import "${modulesPath}/../lib/eval-config.nix" { inherit system; - modules = configurations; + modules = configurations ++ extraConfigurations; baseModules = (import "${modulesPath}/module-list.nix") ++ [ - "${modulesPath}/virtualisation/qemu-vm.nix" - "${modulesPath}/testing/test-instrumentation.nix" # !!! should only get added for automated test runs + ../../nixos-modules/genode-core.nix + ../../nixos-modules/genode-init.nix + ../../nixos-modules/qemu-vm.nix { key = "no-manual"; documentation.nixos.enable = false; @@ -40,10 +43,8 @@ rec { _module.args.nodes = nodes; } { - nixpkgs = { - inherit system crossSystem localSystem; - pkgs = flake.legacyPackages.${system}; - }; + system.build.qemu = qemu; + nixpkgs = { inherit system crossSystem localSystem pkgs; }; } ]; }; diff --git a/tests/test-driver/test-driver.py b/tests/lib/test-driver.py similarity index 100% rename from tests/test-driver/test-driver.py rename to tests/lib/test-driver.py diff --git a/tests/lib/testing-python.nix b/tests/lib/testing-python.nix new file mode 100644 index 0000000..45426ea --- /dev/null +++ b/tests/lib/testing-python.nix @@ -0,0 +1,235 @@ +{ flake, system, localSystem, crossSystem, pkgs +# Modules to add to each VM +, extraConfigurations ? [ ] }: + +with import ./build-vms.nix { + inherit system localSystem crossSystem pkgs extraConfigurations; + modulesPath = "${flake.inputs.nixpkgs}/nixos/modules"; +}; +with pkgs.buildPackages.buildPackages; + +rec { + + inherit pkgs; + + testDriver = let testDriverScript = ./test-driver.py; + in stdenv.mkDerivation { + name = "nixos-test-driver"; + + nativeBuildInputs = [ makeWrapper ]; + buildInputs = [ (python3.withPackages (p: [ p.ptpython ])) ]; + checkInputs = with python3Packages; [ pylint mypy ]; + + dontUnpack = true; + + preferLocalBuild = true; + + doCheck = true; + checkPhase = '' + mypy --disallow-untyped-defs \ + --no-implicit-optional \ + --ignore-missing-imports ${testDriverScript} + pylint --errors-only ${testDriverScript} + ''; + + installPhase = '' + mkdir -p $out/bin + cp ${testDriverScript} $out/bin/nixos-test-driver + chmod u+x $out/bin/nixos-test-driver + # TODO: copy user script part into this file (append) + + wrapProgram $out/bin/nixos-test-driver \ + --prefix PATH : "${ + lib.makeBinPath [ qemu_test vde2 netpbm coreutils ] + }" \ + ''; + }; + + # Run an automated test suite in the given virtual network. + # `driver' is the script that runs the network. + runTests = driver: + stdenv.mkDerivation { + name = "vm-test-run-${driver.testName}"; + + requiredSystemFeatures = [ "nixos-test" ]; + + buildCommand = '' + mkdir -p $out + + LOGFILE=/dev/null tests='exec(os.environ["testScript"])' ${driver}/bin/nixos-test-driver + ''; + }; + + defaultTestScript = '' + start_all() + machine.wait_until_serial_output('child "init" exited with exit value 0') + ''; + + makeTest = { testScript ? defaultTestScript, enableOCR ? false, name ? + "unnamed" + # Skip linting (mainly intended for faster dev cycles) + , skipLint ? false, ... }@t: + + let + testDriverName = "genode-test-driver-${name}"; + + nodes = buildVirtualNetwork + (t.nodes or (if t ? machine then { machine = t.machine; } else { })); + + testScript' = + # Call the test script with the computed nodes. + if lib.isFunction testScript then + testScript { inherit nodes; } + else + testScript; + + vlans = map (m: m.config.virtualisation.vlans) (lib.attrValues nodes); + + vms = map (m: m.config.system.build.vm) (lib.attrValues nodes); + + ocrProg = tesseract4.override { enableLanguages = [ "eng" ]; }; + + imagemagick_tiff = imagemagick_light.override { inherit libtiff; }; + + # Generate convenience wrappers for running the test driver + # interactively with the specified network, and for starting the + # VMs from the command line. + driver = + let warn = if skipLint then lib.warn "Linting is disabled!" else lib.id; + in warn (runCommand testDriverName { + buildInputs = [ makeWrapper ]; + testScript = testScript'; + preferLocalBuild = true; + testName = name; + } '' + mkdir -p $out/bin + + echo -n "$testScript" > $out/test-script + + ln -s ${testDriver}/bin/nixos-test-driver $out/bin/ + vms=($(for i in ${toString vms}; do echo $i/bin/run-*-vm; done)) + wrapProgram $out/bin/nixos-test-driver \ + --add-flags "''${vms[*]}" \ + ${ + lib.optionalString enableOCR + "--prefix PATH : '${ocrProg}/bin:${imagemagick_tiff}/bin'" + } \ + --run "export testScript=\"\$(${coreutils}/bin/cat $out/test-script)\"" \ + --set VLANS '${toString vlans}' + ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-run-vms + wrapProgram $out/bin/nixos-run-vms \ + --add-flags "''${vms[*]}" \ + ${lib.optionalString enableOCR "--prefix PATH : '${ocrProg}/bin'"} \ + --set tests 'start_all(); join_all();' \ + --set VLANS '${toString vlans}' \ + ${ + lib.optionalString (builtins.length vms == 1) "--set USE_SERIAL 1" + } + ''); # " + + passMeta = drv: + drv + // lib.optionalAttrs (t ? meta) { meta = (drv.meta or { }) // t.meta; }; + + test = passMeta (runTests driver); + + nodeNames = builtins.attrNames nodes; + invalidNodeNames = + lib.filter (node: builtins.match "^[A-z_]([A-z0-9_]+)?$" node == null) + nodeNames; + + in if lib.length invalidNodeNames > 0 then + throw '' + Cannot create machines out of (${ + lib.concatStringsSep ", " invalidNodeNames + })! + All machines are referenced as python variables in the testing framework which will break the + script when special characters are used. + + Please stick to alphanumeric chars and underscores as separation. + '' + else + test // { inherit nodes driver test; }; + + runInMachine = { drv, machine, preBuild ? "", postBuild ? "", ... # ??? + }: + let + vm = buildVM { } [ + machine + { + key = "run-in-machine"; + networking.hostName = "client"; + nix.readOnlyStore = false; + virtualisation.writableStore = false; + } + ]; + + buildrunner = writeText "vm-build" '' + source $1 + + ${coreutils}/bin/mkdir -p $TMPDIR + cd $TMPDIR + + exec $origBuilder $origArgs + ''; + + testScript = '' + start_all() + client.wait_for_unit("multi-user.target") + ${preBuild} + client.succeed("env -i ${bash}/bin/bash ${buildrunner} /tmp/xchg/saved-env >&2") + ${postBuild} + client.succeed("sync") # flush all data before pulling the plug + ''; + + vmRunCommand = writeText "vm-run" '' + xchg=vm-state-client/xchg + ${coreutils}/bin/mkdir $out + ${coreutils}/bin/mkdir -p $xchg + + for i in $passAsFile; do + i2=''${i}Path + _basename=$(${coreutils}/bin/basename ''${!i2}) + ${coreutils}/bin/cp ''${!i2} $xchg/$_basename + eval $i2=/tmp/xchg/$_basename + ${coreutils}/bin/ls -la $xchg + done + + unset i i2 _basename + export | ${gnugrep}/bin/grep -v '^xchg=' > $xchg/saved-env + unset xchg + + export tests='${testScript}' + ${testDriver}/bin/nixos-test-driver ${vm.config.system.build.vm}/bin/run-*-vm + ''; # */ + + in lib.overrideDerivation drv (attrs: { + requiredSystemFeatures = [ "kvm" ]; + builder = "${bash}/bin/sh"; + args = [ "-e" vmRunCommand ]; + origArgs = attrs.args; + origBuilder = attrs.builder; + }); + + runInMachineWithX = { require ? [ ], ... }@args: + let + client = { ... }: { + inherit require; + imports = [ ../tests/common/auto.nix ]; + virtualisation.memorySize = 1024; + services.xserver.enable = true; + test-support.displayManager.auto.enable = true; + services.xserver.displayManager.defaultSession = "none+icewm"; + services.xserver.windowManager.icewm.enable = true; + }; + in runInMachine ({ + machine = client; + preBuild = '' + client.wait_for_x() + ''; + } // args); + + simpleTest = as: (makeTest as).test; + +} + diff --git a/tests/lighttpd.nix b/tests/lighttpd.nix new file mode 100644 index 0000000..40c5396 --- /dev/null +++ b/tests/lighttpd.nix @@ -0,0 +1,13 @@ +{ + name = "lighttpd"; + nodes = { + webserver = { + imports = [ ../nixos-modules/hardware.nix ]; + services.lighttpd.enable = true; + }; + client = { + imports = [ ../nixos-modules/hardware.nix ]; + genode.hardware.nic.eth0.driver = "virtio"; + }; + }; +} diff --git a/tests/log.nix b/tests/log.nix index 05a7395..278226e 100644 --- a/tests/log.nix +++ b/tests/log.nix @@ -1,9 +1,6 @@ { name = "log"; machine = { config, pkgs, ... }: { - imports = [ - ../nixos-modules/genode-init.nix - ]; genode.init = { config = ./log.dhall; inputs = [ (pkgs.genodeSources.depot "test-log") ]; diff --git a/tests/test-wrapper.dhall b/tests/test-wrapper.dhall index f464f8c..6b5c545 100644 --- a/tests/test-wrapper.dhall +++ b/tests/test-wrapper.dhall @@ -1,4 +1,6 @@ -let Test = ./test.dhall ? env:DHALL_GENODE_TEST +let Test = + ./test.dhall sha256:00e0b73a23e1f131a2e5af36a34bc85b31b4fb6597ea3772dee9c536929ea166 + ? env:DHALL_GENODE_TEST sha256:00e0b73a23e1f131a2e5af36a34bc85b31b4fb6597ea3772dee9c536929ea166 let Genode = Test.Genode diff --git a/tests/test.dhall b/tests/test.dhall index 6c7187b..17f3205 100644 --- a/tests/test.dhall +++ b/tests/test.dhall @@ -1,4 +1,5 @@ -let Genode = env:DHALL_GENODE +let Genode = + env:DHALL_GENODE sha256:e90438be23b5100003cf018b783986df67bc6d0e3d35e800677d0d9109ff6aa9 let Prelude = Genode.Prelude diff --git a/tests/x86.nix b/tests/x86.nix index c4f4051..fa4bc63 100644 --- a/tests/x86.nix +++ b/tests/x86.nix @@ -1,13 +1,12 @@ -{ pkgs, ... }: { +{ name = "x86"; constraints = builtins.any (spec: spec == "x86"); - machine = { - config = ./x86.dhall; - inputs = (map pkgs.genodeSources.depot [ - "acpi_drv" - "platform_drv" - "report_rom" - "test-signal" - ]) ++ (map pkgs.genodeSources.make [ "test/pci" "test/rtc" ]); + machine = { config, pkgs, ... }: { + genode.init = { + config = ./x86.dhall; + inputs = with pkgs.genodePackages; + [ acpi_drv platform_drv report_rom test-signal ] + ++ (map genodeSources.make [ "test/pci" "test/rtc" ]); + }; }; }