Format, move overlay to overlays to match ~/.config/nixpkgs/overlays/

This commit is contained in:
Sandro - 2022-12-04 07:48:45 +01:00
parent a9590e3242
commit 695e095d44
Signed by: sandro
GPG Key ID: 3AF5A43A3EECC2E5
24 changed files with 332 additions and 288 deletions

View File

@ -202,7 +202,7 @@
extraHostRegistry.hosts = import ./host-registry.nix; extraHostRegistry.hosts = import ./host-registry.nix;
hostRegistry = lib.recursiveUpdate zwHostRegistry extraHostRegistry; hostRegistry = lib.recursiveUpdate zwHostRegistry extraHostRegistry;
in { in {
overlay = import ./overlay { overlay = import ./overlays {
inherit nixos-unstable; inherit nixos-unstable;
inherit (inputs) tracer bevy-mandelbrot bevy-julia; inherit (inputs) tracer bevy-mandelbrot bevy-julia;
}; };

View File

@ -33,12 +33,14 @@ with final; {
postFixup = '' postFixup = ''
patchelf --set-rpath ${lib.makeLibraryPath buildInputs} $out/bin/allcolors-rs patchelf --set-rpath ${lib.makeLibraryPath buildInputs} $out/bin/allcolors-rs
''; '';
desktopItems = [ (makeDesktopItem { desktopItems = [
name = "allcolors"; (makeDesktopItem {
desktopName = "Polygon's allcolors-rs"; name = "allcolors";
categories = [ "Game" ]; desktopName = "Polygon's allcolors-rs";
exec = "allcolors-rs"; categories = [ "Game" ];
}) ]; exec = "allcolors-rs";
})
];
}; };
inherit (bevy-julia.packages.${system}) bevy_julia; inherit (bevy-julia.packages.${system}) bevy_julia;
@ -123,10 +125,10 @@ with final; {
broken = true; broken = true;
reason = "haddock runs on affection for 10 hours and more"; reason = "haddock runs on affection for 10 hours and more";
in in
if broken if broken
then throw "tracer-game: ${reason}" then throw "tracer-game: ${reason}"
else tracer.packages.${system}.tracer-game; else tracer.packages.${system}.tracer-game;
# vector-0.23 + mqtt-sink # vector-0.23 + mqtt-sink
vector = pkgs-unstable.callPackage ./vector {}; vector = pkgs-unstable.callPackage ./vector { };
} }

View File

@ -1,19 +1,19 @@
{ {
gis-distance = { gis-distance = {
groups = ["default"]; groups = [ "default" ];
platforms = []; platforms = [ ];
source = { source = {
remotes = ["https://rubygems.org"]; remotes = [ "https://rubygems.org" ];
sha256 = "1kgv1scv25b65d9xfricj1ayd2iry7imgk7qw4mryd91mhriibaf"; sha256 = "1kgv1scv25b65d9xfricj1ayd2iry7imgk7qw4mryd91mhriibaf";
type = "gem"; type = "gem";
}; };
version = "1.1.0"; version = "1.1.0";
}; };
influxdb = { influxdb = {
groups = ["default"]; groups = [ "default" ];
platforms = []; platforms = [ ];
source = { source = {
remotes = ["https://rubygems.org"]; remotes = [ "https://rubygems.org" ];
sha256 = "1l2sjf8kaw3adjjg3l7zg1j735yxdfldf04gl9kjc3hbpdcd7d4w"; sha256 = "1l2sjf8kaw3adjjg3l7zg1j735yxdfldf04gl9kjc3hbpdcd7d4w";
type = "gem"; type = "gem";
}; };

View File

@ -1,5 +1,7 @@
{ fetchFromGitHub, rustPlatform { fetchFromGitHub
, pkg-config, llvmPackages , rustPlatform
, pkg-config
, llvmPackages
, soapysdr-with-plugins , soapysdr-with-plugins
}: }:

View File

@ -1,8 +1,18 @@
{ naersk, fenix { naersk
, curl, nodejs, rustPlatform , fenix
, stdenv, fetchFromGitHub, buildEnv, fetchCrate , curl
, pkg-config, gettext, wasm-pack, binaryen , nodejs
, openssl, postgresql , rustPlatform
, stdenv
, fetchFromGitHub
, buildEnv
, fetchCrate
, pkg-config
, gettext
, wasm-pack
, binaryen
, openssl
, postgresql
}: }:
let let
@ -80,7 +90,8 @@ let
inherit src version; inherit src version;
nativeBuildInputs = [ nativeBuildInputs = [
pkg-config gettext pkg-config
gettext
wasm-bindgen-cli wasm-bindgen-cli
]; ];
buildInputs = [ buildInputs = [
@ -115,7 +126,10 @@ let
pname = "plume-front"; pname = "plume-front";
root = src; root = src;
nativeBuildInputs = [ nativeBuildInputs = [
gettext wasm-pack wasm-bindgen-cli binaryen gettext
wasm-pack
wasm-bindgen-cli
binaryen
]; ];
CARGO_BUILD_TARGET = "wasm32-unknown-unknown"; CARGO_BUILD_TARGET = "wasm32-unknown-unknown";
cargoBuildOptions = x: cargoBuildOptions = x:
@ -133,7 +147,8 @@ let
''; '';
}; };
}; };
in buildEnv { in
buildEnv {
name = "plume-env"; name = "plume-env";
paths = [ plume plume-front plm ]; paths = [ plume plume-front plm ];
passthru = { inherit plume plm; }; passthru = { inherit plume plm; };

View File

@ -1,8 +1,13 @@
{ stdenv, fetchFromGitHub { stdenv
, pkg-config, protobufc , fetchFromGitHub
, ncurses, rrdtool, libusb1 , pkg-config
, libbladeRF, librtlsdr , protobufc
# , libad9361, libiio , ncurses
, rrdtool
, libusb1
, libbladeRF
, librtlsdr
# , libad9361, libiio
}: }:
stdenv.mkDerivation rec { stdenv.mkDerivation rec {
pname = "readsb-protobuf"; pname = "readsb-protobuf";
@ -17,12 +22,16 @@ stdenv.mkDerivation rec {
}; };
nativeBuildInputs = [ nativeBuildInputs = [
pkg-config protobufc pkg-config
protobufc
]; ];
buildInputs = [ buildInputs = [
ncurses rrdtool libusb1 ncurses
libbladeRF librtlsdr rrdtool
libusb1
libbladeRF
librtlsdr
# libad9361 libiio # libad9361 libiio
]; ];

View File

@ -51,7 +51,7 @@ rustPlatform.buildRustPackage {
TZDIR = "${tzdata}/share/zoneinfo"; TZDIR = "${tzdata}/share/zoneinfo";
# needed to dynamically link rdkafka # needed to dynamically link rdkafka
CARGO_FEATURE_DYNAMIC_LINKING=1; CARGO_FEATURE_DYNAMIC_LINKING = 1;
buildNoDefaultFeatures = true; buildNoDefaultFeatures = true;
buildFeatures = features; buildFeatures = features;

View File

@ -5,301 +5,317 @@ let
let let
hostConf = hostRegistry.hosts."${name}"; hostConf = hostRegistry.hosts."${name}";
in in
if hostConf ? ip4 if hostConf ? ip4
then hostConf.ip4 then hostConf.ip4
else if hostConf ? ip6 else if hostConf ? ip6
then hostConf.ip6 then hostConf.ip6
else throw "Host ${name} has no ip4 or ip6 address"; else throw "Host ${name} has no ip4 or ip6 address";
# all the input flakes for `nix copy` to the build machine, # all the input flakes for `nix copy` to the build machine,
# allowing --override-input # allowing --override-input
inputPaths = lib.escapeShellArgs (builtins.attrValues inputs); inputPaths = lib.escapeShellArgs (builtins.attrValues inputs);
in in
lib.attrsets.mapAttrs (system: pkgs: lib.attrsets.mapAttrs
let overlayPkgs = builtins.intersectAttrs (self.overlay {} {}) pkgs; (system: pkgs:
in overlayPkgs // let
{ overlayPkgs = builtins.intersectAttrs (self.overlay { } { }) pkgs;
host-registry = pkgs.runCommand "host-registry" { in
src = builtins.toFile "host-registry.nix" ( overlayPkgs //
lib.generators.toPretty {} hostRegistry {
); host-registry = pkgs.runCommand "host-registry"
} '' {
ln -s $src $out src = builtins.toFile "host-registry.nix" (
''; lib.generators.toPretty { } hostRegistry
);
}
''
ln -s $src $out
'';
list-upgradable = pkgs.writeScriptBin "list-upgradable" '' list-upgradable = pkgs.writeScriptBin "list-upgradable" ''
#! ${pkgs.runtimeShell} #! ${pkgs.runtimeShell}
NORMAL="\033[0m" NORMAL="\033[0m"
RED="\033[0;31m" RED="\033[0;31m"
YELLOW="\033[0;33m" YELLOW="\033[0;33m"
GREEN="\033[0;32m" GREEN="\033[0;32m"
${pkgs.lib.concatMapStringsSep "\n" (name: ${pkgs.lib.concatMapStringsSep "\n" (name:
let let
addr = getHostAddr name; addr = getHostAddr name;
in lib.optionalString (addr != null) '' in lib.optionalString (addr != null) ''
echo -n -e "${name}: $RED" echo -n -e "${name}: $RED"
RUNNING=$(ssh -o PreferredAuthentications=publickey -o StrictHostKeyChecking=accept-new root@"${addr}" "readlink /run/current-system") RUNNING=$(ssh -o PreferredAuthentications=publickey -o StrictHostKeyChecking=accept-new root@"${addr}" "readlink /run/current-system")
if [ $? = 0 ] && [ -n "$RUNNING" ]; then if [ $? = 0 ] && [ -n "$RUNNING" ]; then
CURRENT=$(nix eval --raw ".#nixosConfigurations.${name}.config.system.build.toplevel" 2>/dev/null) CURRENT=$(nix eval --raw ".#nixosConfigurations.${name}.config.system.build.toplevel" 2>/dev/null)
RUNNING_VER=$(basename $RUNNING|rev|cut -d - -f 1|rev) RUNNING_VER=$(basename $RUNNING|rev|cut -d - -f 1|rev)
RUNNING_DATE=$(echo $RUNNING_VER|cut -d . -f 3) RUNNING_DATE=$(echo $RUNNING_VER|cut -d . -f 3)
CURRENT_VER=$(basename $CURRENT|rev|cut -d - -f 1|rev) CURRENT_VER=$(basename $CURRENT|rev|cut -d - -f 1|rev)
CURRENT_DATE=$(echo $CURRENT_VER|cut -d . -f 3) CURRENT_DATE=$(echo $CURRENT_VER|cut -d . -f 3)
if [ "$RUNNING" = "$CURRENT" ]; then if [ "$RUNNING" = "$CURRENT" ]; then
echo -e "$GREEN"current"$NORMAL $RUNNING_VER" echo -e "$GREEN"current"$NORMAL $RUNNING_VER"
elif [ $RUNNING_DATE -gt $CURRENT_DATE ]; then elif [ $RUNNING_DATE -gt $CURRENT_DATE ]; then
echo -e "$GREEN"newer"$NORMAL $RUNNING_VER > $CURRENT_VER" echo -e "$GREEN"newer"$NORMAL $RUNNING_VER > $CURRENT_VER"
elif [ "$RUNNING_VER" = "$CURRENT_VER" ]; then elif [ "$RUNNING_VER" = "$CURRENT_VER" ]; then
echo -e "$YELLOW"modified"$NORMAL $RUNNING_VER" echo -e "$YELLOW"modified"$NORMAL $RUNNING_VER"
elif [ -n "$RUNNING_VER" ]; then elif [ -n "$RUNNING_VER" ]; then
echo -e "$RED"outdated"$NORMAL $RUNNING_VER < $CURRENT_VER" echo -e "$RED"outdated"$NORMAL $RUNNING_VER < $CURRENT_VER"
else else
echo -e "$RED"error"$NORMAL $RUNNING_VER" echo -e "$RED"error"$NORMAL $RUNNING_VER"
fi fi
fi fi
echo -n -e "$NORMAL" echo -n -e "$NORMAL"
'') (builtins.attrNames self.nixosConfigurations)} '') (builtins.attrNames self.nixosConfigurations)}
''; '';
prebuild-all = pkgs.runCommand "prebuild-all" { prebuild-all = pkgs.runCommand "prebuild-all"
preferLocalBuild = true; {
} '' preferLocalBuild = true;
mkdir $out }
''
mkdir $out
${pkgs.lib.concatMapStrings (name: '' ${pkgs.lib.concatMapStrings (name: ''
ln -s ${self.nixosConfigurations."${name}".config.system.build.toplevel} name ln -s ${self.nixosConfigurations."${name}".config.system.build.toplevel} name
'') (builtins.attrNames self.nixosConfigurations)} '') (builtins.attrNames self.nixosConfigurations)}
''; '';
prebuild-all-remote = pkgs.writeScriptBin "prebuild-all" '' prebuild-all-remote = pkgs.writeScriptBin "prebuild-all" ''
#!${pkgs.runtimeShell} -e #!${pkgs.runtimeShell} -e
nix copy --no-check-sigs --to ssh-ng://$1 ${inputPaths} nix copy --no-check-sigs --to ssh-ng://$1 ${inputPaths}
set -x set -x
ssh $1 -- nix build -L --no-link ${ ssh $1 -- nix build -L --no-link ${
pkgs.lib.concatMapStringsSep " " (name: pkgs.lib.concatMapStringsSep " " (name:
"${self}#nixosConfigurations.${name}.config.system.build.toplevel" "${self}#nixosConfigurations.${name}.config.system.build.toplevel"
) (builtins.attrNames self.nixosConfigurations) ) (builtins.attrNames self.nixosConfigurations)
} }
''; '';
} // } //
builtins.foldl' (result: host: result // { builtins.foldl'
# TODO: check if the ethernet address is reachable and if not, (result: host: result // {
# execute wol on a machine in HQ. # TODO: check if the ethernet address is reachable and if not,
"${host}-wake" = pkgs.writeScriptBin "${host}-wake" '' # execute wol on a machine in HQ.
#!${pkgs.runtimeShell} "${host}-wake" = pkgs.writeScriptBin "${host}-wake" ''
exec ${pkgs.wol}/bin/wol ${hostRegistry.hosts."${host}".ether} #!${pkgs.runtimeShell}
''; exec ${pkgs.wol}/bin/wol ${hostRegistry.hosts."${host}".ether}
}) {} (builtins.attrNames (lib.filterAttrs (_: { wol ? false, ... }: wol) hostRegistry.hosts)) // '';
})
{ }
(builtins.attrNames (lib.filterAttrs (_: { wol ? false, ... }: wol) hostRegistry.hosts)) //
builtins.foldl' (result: name: builtins.foldl'
let (result: name:
host = getHostAddr name; let
target = ''root@"${host}"''; host = getHostAddr name;
rebuildArg = "--flake ${self}#${name} --option extra-substituters https://nix-serve.hq.c3d2.de"; target = ''root@"${host}"'';
hostConfig = self.nixosConfigurations."${name}".config; rebuildArg = "--flake ${self}#${name} --option extra-substituters https://nix-serve.hq.c3d2.de";
# let /var/lib/microvm/*/flake point to the flake-update branch so that hostConfig = self.nixosConfigurations."${name}".config;
# `microvm -u $NAME` updates to what hydra built today. # let /var/lib/microvm/*/flake point to the flake-update branch so that
selfRef = "git+https://gitea.c3d2.de/c3d2/nix-config?ref=flake-update"; # `microvm -u $NAME` updates to what hydra built today.
in result // { selfRef = "git+https://gitea.c3d2.de/c3d2/nix-config?ref=flake-update";
# Generate a small script for copying this flake to the in
# remote machine and bulding and switching there. result // {
# Can be run with `nix run c3d2#…-nixos-rebuild switch` # Generate a small script for copying this flake to the
"${name}-nixos-rebuild" = pkgs.writeScriptBin "${name}-nixos-rebuild" '' # remote machine and bulding and switching there.
#!${pkgs.runtimeShell} -ex # Can be run with `nix run c3d2#…-nixos-rebuild switch`
[[ $(ssh ${target} cat /etc/hostname) == ${name} ]] "${name}-nixos-rebuild" = pkgs.writeScriptBin "${name}-nixos-rebuild" ''
nix copy --no-check-sigs --to ssh-ng://${target} ${inputPaths} #!${pkgs.runtimeShell} -ex
ssh ${target} nixos-rebuild ${rebuildArg} "$@" [[ $(ssh ${target} cat /etc/hostname) == ${name} ]]
''; nix copy --no-check-sigs --to ssh-ng://${target} ${inputPaths}
ssh ${target} nixos-rebuild ${rebuildArg} "$@"
'';
"${name}-nixos-rebuild-hydra" = pkgs.writeScriptBin "${name}-nixos-rebuild" '' "${name}-nixos-rebuild-hydra" = pkgs.writeScriptBin "${name}-nixos-rebuild" ''
#!${pkgs.runtimeShell} -e #!${pkgs.runtimeShell} -e
echo Copying Flakes echo Copying Flakes
nix copy --no-check-sigs --to ssh-ng://root@hydra.serv.zentralwerk.org ${inputPaths} nix copy --no-check-sigs --to ssh-ng://root@hydra.serv.zentralwerk.org ${inputPaths}
echo Building on Hydra echo Building on Hydra
ssh root@hydra.serv.zentralwerk.org -- \ ssh root@hydra.serv.zentralwerk.org -- \
nix build -L -o /tmp/nixos-system-${name} \ nix build -L -o /tmp/nixos-system-${name} \
${self}#nixosConfigurations.${name}.config.system.build.toplevel ${self}#nixosConfigurations.${name}.config.system.build.toplevel
echo Built. Obtaining link to data echo Built. Obtaining link to data
TOPLEVEL=$(ssh root@hydra.serv.zentralwerk.org \ TOPLEVEL=$(ssh root@hydra.serv.zentralwerk.org \
readlink /tmp/nixos-system-${name}) readlink /tmp/nixos-system-${name})
echo Checking target ${name} echo Checking target ${name}
ssh ${target} -- bash -e <<EOF ssh ${target} -- bash -e <<EOF
[[ \$(cat /etc/hostname) == ${name} ]] [[ \$(cat /etc/hostname) == ${name} ]]
echo Copying data from Hydra to ${name} echo Copying data from Hydra to ${name}
nix copy --from https://nix-serve.hq.c3d2.de \ nix copy --from https://nix-serve.hq.c3d2.de \
$TOPLEVEL $TOPLEVEL
echo Activation on ${name}: "$@" echo Activation on ${name}: "$@"
nix-env -p /nix/var/nix/profiles/system --set $TOPLEVEL nix-env -p /nix/var/nix/profiles/system --set $TOPLEVEL
$TOPLEVEL/bin/switch-to-configuration "$@" $TOPLEVEL/bin/switch-to-configuration "$@"
EOF EOF
''; '';
"${name}-nixos-rebuild-local" = pkgs.writeScriptBin "${name}-nixos-rebuild" '' "${name}-nixos-rebuild-local" = pkgs.writeScriptBin "${name}-nixos-rebuild" ''
#!${pkgs.runtimeShell} -ex #!${pkgs.runtimeShell} -ex
[[ $1 == build || $(ssh ${target} cat /etc/hostname) == ${name} ]] [[ $1 == build || $(ssh ${target} cat /etc/hostname) == ${name} ]]
${pkgs.nixos-rebuild}/bin/nixos-rebuild ${rebuildArg} --target-host ${target} --use-remote-sudo "$@" ${pkgs.nixos-rebuild}/bin/nixos-rebuild ${rebuildArg} --target-host ${target} --use-remote-sudo "$@"
''; '';
"${name}-cleanup" = pkgs.writeScriptBin "${name}-cleanup" '' "${name}-cleanup" = pkgs.writeScriptBin "${name}-cleanup" ''
#!${pkgs.runtimeShell} -ex #!${pkgs.runtimeShell} -ex
ssh ${target} "time nix-collect-garbage -d && time nix-store --optimise" ssh ${target} "time nix-collect-garbage -d && time nix-store --optimise"
''; '';
"microvm-update-${name}" = pkgs.writeScriptBin "microvm-update-${name}" '' "microvm-update-${name}" = pkgs.writeScriptBin "microvm-update-${name}" ''
#!${pkgs.runtimeShell} -e #!${pkgs.runtimeShell} -e
${lib.optionalString (! builtins.elem (hostConfig.c3d2.deployment.server or null) [ "server9" "server10" ]) '' ${lib.optionalString (! builtins.elem (hostConfig.c3d2.deployment.server or null) [ "server9" "server10" ]) ''
echo "MicroVM must be configured to proper server" >&2 echo "MicroVM must be configured to proper server" >&2
exit 1 exit 1
''} ''}
${hostConfig.system.build.copyToServer} ${inputPaths} ${hostConfig.system.build.copyToServer} ${inputPaths}
${hostConfig.system.build.runOnServer} bash -e <<END ${hostConfig.system.build.runOnServer} bash -e <<END
mkdir -p /var/lib/microvms/${name} mkdir -p /var/lib/microvms/${name}
cd /var/lib/microvms/${name} cd /var/lib/microvms/${name}
chown root:kvm . chown root:kvm .
chmod 0775 . chmod 0775 .
rm -f old rm -f old
[ -e current ] && cp --no-dereference current old [ -e current ] && cp --no-dereference current old
nix build -L \ nix build -L \
-o current \ -o current \
${self}#nixosConfigurations.${name}.config.microvm.declaredRunner ${self}#nixosConfigurations.${name}.config.microvm.declaredRunner
echo '${selfRef}' > flake echo '${selfRef}' > flake
[ -e old ] && nix store diff-closures ./old ./current [ -e old ] && nix store diff-closures ./old ./current
ln -sfT \$PWD/current /nix/var/nix/gcroots/microvm/${name} ln -sfT \$PWD/current /nix/var/nix/gcroots/microvm/${name}
ln -sfT \$PWD/booted /nix/var/nix/gcroots/microvm/booted-${name} ln -sfT \$PWD/booted /nix/var/nix/gcroots/microvm/booted-${name}
ln -sfT \$PWD/old /nix/var/nix/gcroots/microvm/old-${name} ln -sfT \$PWD/old /nix/var/nix/gcroots/microvm/old-${name}
systemctl restart microvm@${name}.service systemctl restart microvm@${name}.service
END END
''; '';
"microvm-update-${name}-local" = pkgs.writeScriptBin "microvm-update-${name}" '' "microvm-update-${name}-local" = pkgs.writeScriptBin "microvm-update-${name}" ''
#!${pkgs.runtimeShell} -e #!${pkgs.runtimeShell} -e
${lib.optionalString (! builtins.elem (hostConfig.c3d2.deployment.server or null) [ "server9" "server10" ]) '' ${lib.optionalString (! builtins.elem (hostConfig.c3d2.deployment.server or null) [ "server9" "server10" ]) ''
echo "MicroVM must be configured to proper server" >&2 echo "MicroVM must be configured to proper server" >&2
exit 1 exit 1
''} ''}
${hostConfig.system.build.copyToServer} ${hostConfig.microvm.declaredRunner} ${hostConfig.system.build.copyToServer} ${hostConfig.microvm.declaredRunner}
${hostConfig.system.build.runOnServer} bash -e <<END ${hostConfig.system.build.runOnServer} bash -e <<END
mkdir -p /var/lib/microvms/${name} mkdir -p /var/lib/microvms/${name}
cd /var/lib/microvms/${name} cd /var/lib/microvms/${name}
chown root:kvm . chown root:kvm .
chmod 0775 . chmod 0775 .
rm -f old rm -f old
[ -e current ] && cp --no-dereference current old [ -e current ] && cp --no-dereference current old
ln -sfT ${hostConfig.microvm.declaredRunner} current ln -sfT ${hostConfig.microvm.declaredRunner} current
echo '${selfRef}' > flake echo '${selfRef}' > flake
[ -e old ] && nix store diff-closures ./old ./current [ -e old ] && nix store diff-closures ./old ./current
ln -sfT \$PWD/current /nix/var/nix/gcroots/microvm/${name} ln -sfT \$PWD/current /nix/var/nix/gcroots/microvm/${name}
ln -sfT \$PWD/booted /nix/var/nix/gcroots/microvm/booted-${name} ln -sfT \$PWD/booted /nix/var/nix/gcroots/microvm/booted-${name}
ln -sfT \$PWD/old /nix/var/nix/gcroots/microvm/old-${name} ln -sfT \$PWD/old /nix/var/nix/gcroots/microvm/old-${name}
systemctl restart microvm@${name}.service systemctl restart microvm@${name}.service
END END
''; '';
"nomad-${name}" = pkgs.writeScriptBin "nomad-${name}" '' "nomad-${name}" = pkgs.writeScriptBin "nomad-${name}" ''
#!${pkgs.runtimeShell} -e #!${pkgs.runtimeShell} -e
${lib.optionalString (hostConfig.c3d2.deployment.server or null == "nomad") '' ${lib.optionalString (hostConfig.c3d2.deployment.server or null == "nomad") ''
echo "MicroVM must be configured for nomad" >&2 echo "MicroVM must be configured for nomad" >&2
exit 1 exit 1
''} ''}
echo Copying Flakes echo Copying Flakes
nix copy --no-check-sigs --to ssh-ng://root@hydra.serv.zentralwerk.org ${secrets} ${self} nix copy --no-check-sigs --to ssh-ng://root@hydra.serv.zentralwerk.org ${secrets} ${self}
echo Building on Hydra echo Building on Hydra
ssh root@hydra.serv.zentralwerk.org -- \ ssh root@hydra.serv.zentralwerk.org -- \
nix build -L -o /tmp/microvm-${name}.job \ nix build -L -o /tmp/microvm-${name}.job \
${self}#nixosConfigurations.${name}.config.system.build.nomadJob ${self}#nixosConfigurations.${name}.config.system.build.nomadJob
echo -n Built. Obtaining path... echo -n Built. Obtaining path...
JOB=$(ssh root@hydra.serv.zentralwerk.org -- \ JOB=$(ssh root@hydra.serv.zentralwerk.org -- \
readlink /tmp/microvm-${name}.job) readlink /tmp/microvm-${name}.job)
echo \ $JOB echo \ $JOB
for h in server9 server10 ; do for h in server9 server10 ; do
echo Sharing with $h echo Sharing with $h
ssh root@$h.cluster.zentralwerk.org -- \ ssh root@$h.cluster.zentralwerk.org -- \
bash -e <<EOF & bash -e <<EOF &
nix copy --from https://nix-serve.hq.c3d2.de $JOB nix copy --from https://nix-serve.hq.c3d2.de $JOB
mkdir -p /glusterfs/fast/microvms/${name} mkdir -p /glusterfs/fast/microvms/${name}
chown microvm:kvm /glusterfs/fast/microvms/${name} chown microvm:kvm /glusterfs/fast/microvms/${name}
chmod 0775 /glusterfs/fast/microvms/${name} chmod 0775 /glusterfs/fast/microvms/${name}
mkdir -p /nix/var/nix/gcroots/microvm mkdir -p /nix/var/nix/gcroots/microvm
rm -f /nix/var/nix/gcroots/microvm/${name} rm -f /nix/var/nix/gcroots/microvm/${name}
ln -sfT $JOB /nix/var/nix/gcroots/microvm/${name} ln -sfT $JOB /nix/var/nix/gcroots/microvm/${name}
EOF EOF
done done
wait wait
echo Now starting the job echo Now starting the job
ssh root@hydra.serv.zentralwerk.org -- \ ssh root@hydra.serv.zentralwerk.org -- \
nomad run -detach $JOB nomad run -detach $JOB
''; '';
}) {} (builtins.attrNames self.nixosConfigurations) // })
{ }
(builtins.attrNames self.nixosConfigurations) //
builtins.foldl' (result: host: builtins.foldl'
let (result: host:
inherit (self.nixosConfigurations.${host}) config; let
in inherit (self.nixosConfigurations.${host}) config;
result // { in
# boot any machine in a microvm result // {
"${host}-vm" = (self.nixosConfigurations.${host} # boot any machine in a microvm
.extendModules { "${host}-vm" = (self.nixosConfigurations.${host}.extendModules {
modules = [ { modules = [{
microvm = { microvm = {
mem = lib.mkForce 2048; mem = lib.mkForce 2048;
hypervisor = lib.mkForce "qemu"; hypervisor = lib.mkForce "qemu";
socket = lib.mkForce null; socket = lib.mkForce null;
shares = lib.mkForce [ { shares = lib.mkForce [{
tag = "ro-store"; tag = "ro-store";
source = "/nix/store"; source = "/nix/store";
mountPoint = "/nix/.ro-store"; mountPoint = "/nix/.ro-store";
} ]; }];
interfaces = lib.mkForce [ { interfaces = lib.mkForce [{
type = "user"; type = "user";
id = "eth0"; id = "eth0";
mac = "02:23:de:ad:be:ef"; mac = "02:23:de:ad:be:ef";
} ]; }];
}; };
boot.isContainer = lib.mkForce false; boot.isContainer = lib.mkForce false;
users.users.root.password = ""; users.users.root.password = "";
fileSystems."/".fsType = lib.mkForce "tmpfs"; fileSystems."/".fsType = lib.mkForce "tmpfs";
services.getty.helpLine = '' services.getty.helpLine = ''
Log in as "root" with an empty password. Log in as "root" with an empty password.
Use "reboot" to shut qemu down. Use "reboot" to shut qemu down.
''; '';
} ] ++ lib.optionals (! config ? microvm) [ }] ++ lib.optionals (! config ? microvm) [
microvm.nixosModules.microvm microvm.nixosModules.microvm
]; ];
}) }).config.microvm.declaredRunner;
.config.microvm.declaredRunner;
"${host}-tftproot" = "${host}-tftproot" =
if config.system.build ? tftproot if config.system.build ? tftproot
then config.system.build.tftproot then config.system.build.tftproot
else lib.trace "No tftproot for ${host}" null; else lib.trace "No tftproot for ${host}" null;
} }
) {} (builtins.attrNames self.nixosConfigurations) )
) self.legacyPackages { }
(builtins.attrNames self.nixosConfigurations)
)
self.legacyPackages