{ self, apps, localPackages, genodepkgs, lib, nixpkgs, legacyPackages }: let callTest = path: import path { pkgs = testPkgs; inherit nixpkgs localPackages legacyPackages; }; testFiles = map callTest [ ./log.nix ./posix.nix ./vmm_arm.nix ./vmm_x86.nix ./x86.nix ] ++ (callTest ./solo5); testPkgs = genodepkgs; qemu' = localPackages.qemu; qemuBinary = qemuPkg: { aarch64-genode = "${qemuPkg}/bin/qemu-system-aarch64"; x86_64-genode = "${qemuPkg}/bin/qemu-system-x86_64"; }.${genodepkgs.stdenv.hostPlatform.system}; cores = [ { prefix = "hw-pc-"; specs = [ "x86" "hw" ]; platforms = [ "x86_64-genode" ]; basePackages = [ testPkgs.base-hw-pc ] ++ map testPkgs.genodeSources.depot [ "rtc_drv" ]; makeImage = lib.hwImage "0xffffffc000000000" "0x00200000" testPkgs.base-hw-pc; startVM = vmName: image: '' #! ${localPackages.runtimeShell} exec ${qemuBinary qemu'} \ -name ${vmName} \ -machine q35 \ -m 384 \ -netdev user,id=net0 \ -device virtio-net-pci,netdev=net0 \ -kernel "${testPkgs.bender}/bender" \ -initrd "${image}/image.elf" \ $QEMU_OPTS \ "$@" ''; } { prefix = "hw-virt_qemu-"; specs = [ "aarch64" "hw" ]; platforms = [ "aarch64-genode" ]; basePackages = with testPkgs; [ base-hw-virt_qemu rtc-dummy ]; makeImage = lib.hwImage "0xffffffc000000000" "0x40000000" testPkgs.base-hw-virt_qemu; startVM = vmName: image: '' #! ${localPackages.runtimeShell} exec ${qemuBinary qemu'} \ -name ${vmName} \ -M virt,virtualization=true,gic_version=3 \ -cpu cortex-a53 \ -smp 4 \ -m 384 \ -kernel "${image}/image.elf" \ $QEMU_OPTS \ "$@" ''; } { prefix = "nova-"; specs = [ "x86" "nova" ]; platforms = [ "x86_64-genode" ]; basePackages = [ testPkgs.base-nova ] ++ map testPkgs.genodeSources.depot [ "rtc_drv" ]; makeImage = lib.novaImage; startVM = vmName: image: '' #! ${localPackages.runtimeShell} exec ${qemuBinary qemu'} \ -name ${vmName} \ -machine q35 \ -m 384 \ -kernel "${testPkgs.bender}/bender" \ -initrd "${testPkgs.NOVA}/hypervisor-x86_64 arg=iommu novpid serial,${image}/image.elf" \ $QEMU_OPTS \ "$@" ''; } ]; cores' = builtins.filter (core: builtins.any (x: x == genodepkgs.stdenv.hostPlatform.system) core.platforms) cores; testDriver = with localPackages; let testDriverScript = ./test-driver/test-driver.py; in stdenv.mkDerivation { name = "nixos-test-driver"; nativeBuildInputs = [ makeWrapper ]; buildInputs = [ (python3.withPackages (p: [ p.ptpython ])) ]; checkInputs = with python3Packages; [ pylint mypy ]; dontUnpack = true; preferLocalBuild = true; doCheck = true; checkPhase = '' mypy --disallow-untyped-defs \ --no-implicit-optional \ --ignore-missing-imports ${testDriverScript} pylint --errors-only ${testDriverScript} ''; installPhase = '' mkdir -p $out/bin cp ${testDriverScript} $out/bin/nixos-test-driver chmod u+x $out/bin/nixos-test-driver # TODO: copy user script part into this file (append) wrapProgram $out/bin/nixos-test-driver \ --prefix PATH : "${lib.makeBinPath [ qemu' coreutils ]}" \ ''; }; defaultTestScript = '' start_all() machine.wait_until_serial_output('child "init" exited with exit value 0') ''; makeTest = with localPackages; { prefix, specs, platforms, basePackages, makeImage, startVM }: { name ? "unnamed", testScript ? defaultTestScript, # Skip linting (mainly intended for faster dev cycles) skipLint ? false, ... }@t: let testDriverName = "genode-test-driver-${name}"; buildVM = vmName: { config, inputs, env ? { }, extraPaths ? [ ] }: let storeTarball = localPackages.runCommand "store" { } '' mkdir -p $out tar cf "$out/store.tar" --absolute-names ${toString inputs} ${ toString extraPaths } ''; addManifest = drv: drv // { manifest = nixpkgs.runCommand "${drv.name}.dhall" { inherit drv; } '' set -eu echo -n '[' >> $out find $drv/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out ${if builtins.elem "lib" drv.outputs then '' find ${drv.lib}/ -type f -printf ',{mapKey= "%f",mapValue="%p"}' >> $out'' else ""} echo -n ']' >> $out ''; }; storeManifest = lib.mergeManifests (map addManifest inputs); manifest = lib.mergeManifests (map addManifest (basePackages ++ [ testPkgs.sotest-producer storeTarball ] ++ map testPkgs.genodeSources.depot [ "init" "vfs" "cached_fs_rom" ])); config' = "${ ./test-wrapper.dhall } (${config}) $(stat --format '%s' ${storeTarball}/store.tar) ${storeManifest} ${manifest}"; env' = { DHALL_GENODE = "${testPkgs.dhallGenode}/source.dhall"; DHALL_GENODE_TEST = "${./test.dhall}"; } // env; image = makeImage vmName env' config'; startVM' = startVM vmName image; in { script = localPackages.writeScriptBin "run-${vmName}-vm" startVM'; config = lib.runDhallCommand (name + ".dhall") env' '' ${apps.dhall.program} <<< "${config'}" > $out ''; store = storeTarball; xml = lib.runDhallCommand (name + ".config") env' ''${apps.render-init.program} <<< "(${config'}).config" > $out''; }; nodes = lib.mapAttrs buildVM (t.nodes or (if t ? machine then { machine = t.machine; } else { })); testScript' = # Call the test script with the computed nodes. if lib.isFunction testScript then testScript { inherit nodes; } else testScript; vms = map (node: node.script) (lib.attrValues nodes); # Generate onvenience wrappers for running the test driver # interactively with the specified network, and for starting the # VMs from the command line. driver = let warn = if skipLint then lib.warn "Linting is disabled!" else lib.id; in warn (runCommand testDriverName { buildInputs = [ makeWrapper ]; testScript = testScript'; preferLocalBuild = true; testName = name; } '' mkdir -p $out/bin echo -n "$testScript" > $out/test-script ${lib.optionalString (!skipLint) '' ${python3Packages.black}/bin/black --check --quiet --diff $out/test-script ''} ln -s ${testDriver}/bin/nixos-test-driver $out/bin/ vms=($(for i in ${toString vms}; do echo $i/bin/run-*-vm; done)) wrapProgram $out/bin/nixos-test-driver \ --add-flags "''${vms[*]}" \ --run "export testScript=\"\$(${coreutils}/bin/cat $out/test-script)\"" ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-run-vms wrapProgram $out/bin/nixos-run-vms \ --add-flags "''${vms[*]}" \ --set tests 'start_all(); join_all();' ''); # " passMeta = drv: drv // lib.optionalAttrs (t ? meta) { meta = (drv.meta or { }) // t.meta; }; # Run an automated test suite in the given virtual network. # `driver' is the script that runs the network. runTests = driver: stdenv.mkDerivation { name = "test-run-${driver.testName}"; buildCommand = '' mkdir -p $out LOGFILE=/dev/null tests='exec(os.environ["testScript"])' ${driver}/bin/nixos-test-driver ''; }; test = passMeta (runTests driver); nodeNames = builtins.attrNames nodes; invalidNodeNames = lib.filter (node: builtins.match "^[A-z_]([A-z0-9_]+)?$" node == null) nodeNames; in if lib.length invalidNodeNames > 0 then throw '' Cannot create machines out of (${ lib.concatStringsSep ", " invalidNodeNames })! All machines are referenced as python variables in the testing framework which will break the script when special characters are used. Please stick to alphanumeric chars and underscores as separation. '' else test // { inherit nodes driver test; }; testList = let f = core: let makeTest' = makeTest core; in test: if (test.constraints or (_: true)) core.specs then { name = core.prefix + test.name; value = makeTest' test; } else null; in lib.lists.crossLists f [ cores' testFiles ]; in builtins.listToAttrs (builtins.filter (_: _ != null) testList) /* sotest = let hwTests = with hw; [ multi posix x86 ]; novaTests = with nova; [ multi posix x86 vmm ]; allTests = hwTests ++ novaTests; projectCfg.boot_items = (map (test: { inherit (test) name; exec = "bender"; load = [ "${test.name}.image.elf" ]; }) hwTests) ++ (map (test: { inherit (test) name; exec = "bender"; load = [ "hypervisor serial novga iommu" test.image.name ]; }) novaTests); in localPackages.stdenv.mkDerivation { name = "sotest"; buildCommand = '' mkdir zip; cd zip cp "${testPkgs.bender}/bender" bender cp "${testPkgs.NOVA}/hypervisor-x86_64" hypervisor ${concatStringsSep "\n" (map (test: "cp ${test.image}/image.elf ${test.name}.image.elf") allTests)} mkdir -p $out/nix-support ${localPackages.zip}/bin/zip "$out/binaries.zip" * cat << EOF > "$out/project.json" ${builtins.toJSON projectCfg} EOF echo file sotest-binaries $out/binaries.zip >> "$out/nix-support/hydra-build-products" echo file sotest-config $out/project.json >> "$out/nix-support/hydra-build-products" ''; }; */