Merge branch 'master' into staging-next

Fixed trivial conflicts caused by removing rec.
nixos-unstable
Jan Tojnar 3 years ago
commit cdf426488b
No known key found for this signature in database
GPG Key ID: 7FAB2A15F7A607A4
  1. 1
      doc/functions.xml
  2. 76
      doc/functions/ocitools.xml
  3. 43
      doc/stdenv.xml
  4. 6
      nixos/doc/manual/configuration/customizing-packages.xml
  5. 2
      nixos/doc/manual/release-notes/rl-1703.xml
  6. 22
      nixos/doc/manual/release-notes/rl-1909.xml
  7. 28
      nixos/maintainers/scripts/ec2/amazon-image.nix
  8. 525
      nixos/maintainers/scripts/ec2/create-amis.sh
  9. 2
      nixos/modules/config/gtk/gtk-icon-cache.nix
  10. 8
      nixos/modules/installer/tools/nix-fallback-paths.nix
  11. 1
      nixos/modules/module-list.nix
  12. 2
      nixos/modules/programs/plotinus.nix
  13. 4
      nixos/modules/programs/plotinus.xml
  14. 1
      nixos/modules/rename.nix
  15. 85
      nixos/modules/services/cluster/kubernetes/addon-manager.nix
  16. 36
      nixos/modules/services/cluster/kubernetes/addons/dashboard.nix
  17. 48
      nixos/modules/services/cluster/kubernetes/apiserver.nix
  18. 39
      nixos/modules/services/cluster/kubernetes/controller-manager.nix
  19. 25
      nixos/modules/services/cluster/kubernetes/default.nix
  20. 73
      nixos/modules/services/cluster/kubernetes/flannel.nix
  21. 93
      nixos/modules/services/cluster/kubernetes/kubelet.nix
  22. 166
      nixos/modules/services/cluster/kubernetes/pki.nix
  23. 37
      nixos/modules/services/cluster/kubernetes/proxy.nix
  24. 34
      nixos/modules/services/cluster/kubernetes/scheduler.nix
  25. 4
      nixos/modules/services/databases/postgresql.nix
  26. 6
      nixos/modules/services/editors/emacs.xml
  27. 1
      nixos/modules/services/misc/zookeeper.nix
  28. 110
      nixos/modules/services/network-filesystems/ceph.nix
  29. 2
      nixos/modules/services/web-servers/darkhttpd.nix
  30. 4
      nixos/modules/services/x11/desktop-managers/enlightenment.nix
  31. 2
      nixos/modules/services/x11/desktop-managers/mate.nix
  32. 6
      nixos/modules/services/x11/desktop-managers/xfce.nix
  33. 4
      nixos/modules/services/x11/desktop-managers/xfce4-14.nix
  34. 14
      nixos/modules/virtualisation/amazon-image.nix
  35. 9
      nixos/modules/virtualisation/amazon-options.nix
  36. 125
      nixos/modules/virtualisation/railcar.nix
  37. 16
      nixos/release.nix
  38. 52
      nixos/tests/ceph.nix
  39. 5
      nixos/tests/kubernetes/base.nix
  40. 3
      nixos/tests/kubernetes/dns.nix
  41. 4
      nixos/tests/kubernetes/rbac.nix
  42. 4
      nixos/tests/quake3.nix
  43. 2
      pkgs/applications/audio/cozy-audiobooks/default.nix
  44. 2
      pkgs/applications/audio/pavucontrol/default.nix
  45. 4
      pkgs/applications/audio/quodlibet/default.nix
  46. 2
      pkgs/applications/audio/sonata/default.nix
  47. 2
      pkgs/applications/editors/leafpad/default.nix
  48. 6
      pkgs/applications/editors/vscode/vscode.nix
  49. 6
      pkgs/applications/editors/vscode/vscodium.nix
  50. 2
      pkgs/applications/graphics/gcolor2/default.nix
  51. 4
      pkgs/applications/graphics/geeqie/default.nix
  52. 2
      pkgs/applications/graphics/mcomix/default.nix
  53. 4
      pkgs/applications/graphics/mtpaint/default.nix
  54. 8
      pkgs/applications/misc/calibre/default.nix
  55. 2
      pkgs/applications/misc/clipit/default.nix
  56. 8
      pkgs/applications/misc/epdfview/default.nix
  57. 4
      pkgs/applications/misc/font-manager/default.nix
  58. 2
      pkgs/applications/misc/girara/default.nix
  59. 2
      pkgs/applications/misc/gksu/default.nix
  60. 2
      pkgs/applications/misc/gmrun/default.nix
  61. 2
      pkgs/applications/misc/grip/default.nix
  62. 4
      pkgs/applications/misc/gtk2fontsel/default.nix
  63. 53
      pkgs/applications/misc/michabo/default.nix
  64. 2
      pkgs/applications/misc/orca/default.nix
  65. 2
      pkgs/applications/misc/pcmanfm/default.nix
  66. 2
      pkgs/applications/misc/pcmanx-gtk2/default.nix
  67. 5
      pkgs/applications/misc/stupidterm/default.nix
  68. 4
      pkgs/applications/misc/taskell/default.nix
  69. 2
      pkgs/applications/misc/zathura/wrapper.nix
  70. 2
      pkgs/applications/networking/browsers/midori/default.nix
  71. 4
      pkgs/applications/networking/browsers/surf/default.nix
  72. 4
      pkgs/applications/networking/browsers/vivaldi/default.nix
  73. 14
      pkgs/applications/networking/cluster/kubernetes/default.nix
  74. 113
      pkgs/applications/networking/cluster/kubernetes/mk-docker-opts.sh
  75. 4
      pkgs/applications/networking/cluster/terraform/default.nix
  76. 2
      pkgs/applications/networking/corebird/default.nix
  77. 2
      pkgs/applications/networking/instant-messengers/dino/default.nix
  78. 6
      pkgs/applications/networking/instant-messengers/discord/default.nix
  79. 7
      pkgs/applications/networking/instant-messengers/signal-desktop/default.nix
  80. 6
      pkgs/applications/networking/irc/weechat/default.nix
  81. 12
      pkgs/applications/networking/irc/weechat/scripts/default.nix
  82. 6
      pkgs/applications/networking/irc/weechat/scripts/wee-slack/default.nix
  83. 64
      pkgs/applications/networking/irc/weechat/scripts/weechat-otr/default.nix
  84. 13
      pkgs/applications/networking/irc/weechat/scripts/weechat-otr/libpath.patch
  85. 36
      pkgs/applications/networking/irc/weechat/scripts/weechat-xmpp/default.nix
  86. 16
      pkgs/applications/networking/irc/weechat/scripts/weechat-xmpp/libpath.patch
  87. 6
      pkgs/applications/networking/irc/weechat/wrapper.nix
  88. 2
      pkgs/applications/networking/mailreaders/astroid/default.nix
  89. 2
      pkgs/applications/networking/modem-manager-gui/default.nix
  90. 2
      pkgs/applications/networking/newsreaders/pan/default.nix
  91. 2
      pkgs/applications/networking/p2p/transmission/default.nix
  92. 2
      pkgs/applications/networking/remote/remmina/default.nix
  93. 2
      pkgs/applications/office/planner/default.nix
  94. 2
      pkgs/applications/science/astronomy/gpredict/default.nix
  95. 23
      pkgs/applications/science/biology/xenomapper/default.nix
  96. 2
      pkgs/applications/search/catfish/default.nix
  97. 2
      pkgs/applications/video/celluloid/default.nix
  98. 2
      pkgs/applications/video/handbrake/default.nix
  99. 4
      pkgs/applications/video/mkvtoolnix/default.nix
  100. 4
      pkgs/applications/video/subtitleeditor/default.nix
  101. Some files were not shown because too many files have changed in this diff Show More

@ -20,4 +20,5 @@
<xi:include href="functions/appimagetools.xml" />
<xi:include href="functions/prefer-remote-fetch.xml" />
<xi:include href="functions/nix-gitignore.xml" />
<xi:include href="functions/ocitools.xml" />
</chapter>

@ -0,0 +1,76 @@
<section xmlns="http://docbook.org/ns/docbook"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xi="http://www.w3.org/2001/XInclude"
xml:id="sec-pkgs-ociTools">
<title>pkgs.ociTools</title>
<para>
<varname>pkgs.ociTools</varname> is a set of functions for creating
containers according to the
<link xlink:href="https://github.com/opencontainers/runtime-spec">OCI
container specification v1.0.0</link>. Beyond that it makes no assumptions
about the container runner you choose to use to run the created container.
</para>
<section xml:id="ssec-pkgs-ociTools-buildContainer">
<title>buildContainer</title>
<para>
This function creates a simple OCI container that runs a single command
inside of it. An OCI container consists of a <varname>config.json</varname>
and a rootfs directory.The nix store of the container will contain all
referenced dependencies of the given command.
</para>
<para>
The parameters of <varname>buildContainer</varname> with an example value
are described below:
</para>
<example xml:id='ex-ociTools-buildContainer'>
<title>Build Container</title>
<programlisting>
buildContainer {
cmd = with pkgs; writeScript "run.sh" ''
#!${bash}/bin/bash
${coreutils}/bin/exec ${bash}/bin/bash
''; <co xml:id='ex-ociTools-buildContainer-1' />
mounts = {
"/data" = {
type = "none";
source = "/var/lib/mydata";
options = [ "bind" ];
};
};<co xml:id='ex-ociTools-buildContainer-2' />
readonly = false; <co xml:id='ex-ociTools-buildContainer-3' />
}
</programlisting>
<calloutlist>
<callout arearefs='ex-ociTools-buildContainer-1'>
<para>
<varname>cmd</varname> specifies the program to run inside the container.
This is the only required argument for <varname>buildContainer</varname>.
All referenced packages inside the derivation will be made available
inside the container
</para>
</callout>
<callout arearefs='ex-ociTools-buildContainer-2'>
<para>
<varname>mounts</varname> specifies additional mount points chosen by the
user. By default only a minimal set of necessary filesystems are mounted
into the container (e.g procfs, cgroupfs)
</para>
</callout>
<callout arearefs='ex-ociTools-buildContainer-3'>
<para>
<varname>readonly</varname> makes the container's rootfs read-only if it is set to true.
The default value is false <literal>false</literal>.
</para>
</callout>
</calloutlist>
</example>
</section>
</section>

@ -2714,6 +2714,49 @@ nativeBuildInputs = [ breakpointHook ];
</note>
</listitem>
</varlistentry>
<varlistentry>
<term>
installShellFiles
</term>
<listitem>
<para>
This hook helps with installing manpages and shell completion files. It
exposes 2 shell functions <literal>installManPage</literal> and
<literal>installShellCompletion</literal> that can be used from your
<literal>postInstall</literal> hook.
</para>
<para>
The <literal>installManPage</literal> function takes one or more paths
to manpages to install. The manpages must have a section suffix, and may
optionally be compressed (with <literal>.gz</literal> suffix). This
function will place them into the correct directory.
</para>
<para>
The <literal>installShellCompletion</literal> function takes one or more
paths to shell completion files. By default it will autodetect the shell
type from the completion file extension, but you may also specify it by
passing one of <literal>--bash</literal>, <literal>--fish</literal>, or
<literal>--zsh</literal>. These flags apply to all paths listed after
them (up until another shell flag is given). Each path may also have a
custom installation name provided by providing a flag <literal>--name
NAME</literal> before the path. If this flag is not provided, zsh
completions will be renamed automatically such that
<literal>foobar.zsh</literal> becomes <literal>_foobar</literal>.
<programlisting>
nativeBuildInputs = [ installShellFiles ];
postInstall = ''
installManPage doc/foobar.1 doc/barfoo.3
# explicit behavior
installShellCompletion --bash --name foobar.bash share/completions.bash
installShellCompletion --fish --name foobar.fish share/completions.fish
installShellCompletion --zsh --name _foobar share/completions.zsh
# implicit behavior
installShellCompletion share/completions/foobar.{bash,fish,zsh}
'';
</programlisting>
</para>
</listitem>
</varlistentry>
<varlistentry>
<term>
libiconv, libintl

@ -24,8 +24,8 @@
<para>
Apart from high-level options, it’s possible to tweak a package in almost
arbitrary ways, such as changing or disabling dependencies of a package. For
instance, the Emacs package in Nixpkgs by default has a dependency on GTK+ 2.
If you want to build it against GTK+ 3, you can specify that as follows:
instance, the Emacs package in Nixpkgs by default has a dependency on GTK 2.
If you want to build it against GTK 3, you can specify that as follows:
<programlisting>
<xref linkend="opt-environment.systemPackages"/> = [ (pkgs.emacs.override { gtk = pkgs.gtk3; }) ];
</programlisting>
@ -33,7 +33,7 @@
function that produces Emacs, with the original arguments amended by the set
of arguments specified by you. So here the function argument
<varname>gtk</varname> gets the value <literal>pkgs.gtk3</literal>, causing
Emacs to depend on GTK+ 3. (The parentheses are necessary because in Nix,
Emacs to depend on GTK 3. (The parentheses are necessary because in Nix,
function application binds more weakly than list construction, so without
them, <xref linkend="opt-environment.systemPackages"/> would be a list with
two elements.)

@ -730,7 +730,7 @@ in
</listitem>
<listitem>
<para>
<literal>jre</literal> now defaults to GTK+ UI by default. This improves
<literal>jre</literal> now defaults to GTK UI by default. This improves
visual consistency and makes Java follow system font style, improving the
situation on HighDPI displays. This has a cost of increased closure size;
for server and other headless workloads it's recommended to use

@ -422,6 +422,12 @@
It was not useful except for debugging purposes and was confusingly set as default in some circumstances.
</para>
</listitem>
<listitem>
<para>
The WeeChat plugin <literal>pkgs.weechatScripts.weechat-xmpp</literal> has been removed as it doesn't receive
any updates from upstream and depends on outdated Python2-based modules.
</para>
</listitem>
</itemizedlist>
</section>
@ -710,6 +716,22 @@
<literal>nix-shell -p altcoins.dogecoin</literal>, etc.
</para>
</listitem>
<listitem>
<para>
Ceph has been upgraded to v14.2.1.
See the <link xlink:href="https://ceph.com/releases/v14-2-0-nautilus-released/">release notes</link> for details.
The mgr dashboard as well as osds backed by loop-devices is no longer explicitly supported by the package and module.
Note: There's been some issues with python-cherrypy, which is used by the dashboard
and prometheus mgr modules (and possibly others), hence 0000-dont-check-cherrypy-version.patch.
</para>
</listitem>
<listitem>
<para>
<literal>pkgs.weechat</literal> is now compiled against <literal>pkgs.python3</literal>.
Weechat also recommends <link xlink:href="https://weechat.org/scripts/python3/">to use Python3
in their docs.</link>
</para>
</listitem>
</itemizedlist>
</section>
</section>

@ -17,7 +17,7 @@ in {
name = mkOption {
type = types.str;
description = "The name of the generated derivation";
default = "nixos-disk-image";
default = "nixos-amazon-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
};
contents = mkOption {
@ -42,7 +42,7 @@ in {
format = mkOption {
type = types.enum [ "raw" "qcow2" "vpc" ];
default = "qcow2";
default = "vpc";
description = "The image format to output";
};
};
@ -51,7 +51,9 @@ in {
inherit lib config;
inherit (cfg) contents format name;
pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
partitionTableType = if config.ec2.hvm then "legacy" else "none";
partitionTableType = if config.ec2.efi then "efi"
else if config.ec2.hvm then "legacy"
else "none";
diskSize = cfg.sizeMB;
fsType = "ext4";
configFile = pkgs.writeText "configuration.nix"
@ -61,7 +63,27 @@ in {
${optionalString config.ec2.hvm ''
ec2.hvm = true;
''}
${optionalString config.ec2.efi ''
ec2.efi = true;
''}
}
'';
postVM = ''
extension=''${diskImage##*.}
friendlyName=$out/${cfg.name}.$extension
mv "$diskImage" "$friendlyName"
diskImage=$friendlyName
mkdir -p $out/nix-support
echo "file ${cfg.format} $diskImage" >> $out/nix-support/hydra-build-products
${pkgs.jq}/bin/jq -n \
--arg label ${lib.escapeShellArg config.system.nixos.label} \
--arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
--arg logical_bytes "$(${pkgs.qemu}/bin/qemu-img info --output json "$diskImage" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
--arg file "$diskImage" \
'$ARGS.named' \
> $out/nix-support/image-info.json
'';
};
}

@ -1,279 +1,296 @@
#!/usr/bin/env nix-shell
#! nix-shell -i bash -p qemu ec2_ami_tools jq ec2_api_tools awscli
#!nix-shell -p awscli -p jq -p qemu -i bash
# Uploads and registers NixOS images built from the
# <nixos/release.nix> amazonImage attribute. Images are uploaded and
# registered via a home region, and then copied to other regions.
# The home region requires an s3 bucket, and a "vmimport" IAM role
# with access to the S3 bucket. Configuration of the vmimport role is
# documented in
# https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html
# set -x
set -euo pipefail
# configuration
state_dir=/home/deploy/amis/ec2-images
home_region=eu-west-1
bucket=nixos-amis
regions=(eu-west-1 eu-west-2 eu-west-3 eu-central-1
us-east-1 us-east-2 us-west-1 us-west-2
ca-central-1
ap-southeast-1 ap-southeast-2 ap-northeast-1 ap-northeast-2
ap-south-1 ap-east-1
sa-east-1)
log() {
echo "$@" >&2
}
if [ -z "$1" ]; then
log "Usage: ./upload-amazon-image.sh IMAGE_OUTPUT"
exit 1
fi
# result of the amazon-image from nixos/release.nix
store_path=$1
if [ ! -e "$store_path" ]; then
log "Store path: $store_path does not exist, fetching..."
nix-store --realise "$store_path"
fi
if [ ! -d "$store_path" ]; then
log "store_path: $store_path is not a directory. aborting"
exit 1
fi
read_image_info() {
if [ ! -e "$store_path/nix-support/image-info.json" ]; then
log "Image missing metadata"
exit 1
fi
jq -r "$1" "$store_path/nix-support/image-info.json"
}
# We handle a single image per invocation, store all attributes in
# globals for convenience.
image_label=$(read_image_info .label)
image_system=$(read_image_info .system)
image_file=$(read_image_info .file)
image_logical_bytes=$(read_image_info .logical_bytes)
# Derived attributes
image_logical_gigabytes=$((($image_logical_bytes-1)/1024/1024/1024+1)) # Round to the next GB
case "$image_system" in
aarch64-linux)
amazon_arch=arm64
;;
x86_64-linux)
amazon_arch=x86_64
;;
*)
log "Unknown system: $image_system"
exit 1
esac
image_name="NixOS-${image_label}-${image_system}"
image_description="NixOS ${image_label} ${image_system}"
log "Image Details:"
log " Name: $image_name"
log " Description: $image_description"
log " Size (gigabytes): $image_logical_gigabytes"
log " System: $image_system"
log " Amazon Arch: $amazon_arch"
read_state() {
local state_key=$1
local type=$2
cat "$state_dir/$state_key.$type" 2>/dev/null || true
}
write_state() {
local state_key=$1
local type=$2
local val=$3
mkdir -p $state_dir
echo "$val" > "$state_dir/$state_key.$type"
}
wait_for_import() {
local region=$1
local task_id=$2
local state snapshot_id
log "Waiting for import task $task_id to be completed"
while true; do
read state progress snapshot_id < <(
aws ec2 describe-import-snapshot-tasks --region $region --import-task-ids "$task_id" | \
jq -r '.ImportSnapshotTasks[].SnapshotTaskDetail | "\(.Status) \(.Progress) \(.SnapshotId)"'
)
log " ... state=$state progress=$progress snapshot_id=$snapshot_id"
case "$state" in
active)
sleep 10
;;
completed)
echo "$snapshot_id"
return
;;
*)
log "Unexpected snapshot import state: '${state}'"
exit 1
;;
esac
done
}
wait_for_image() {
local region=$1
local ami_id=$2
local state
log "Waiting for image $ami_id to be available"
while true; do
read state < <(
aws ec2 describe-images --image-ids "$ami_id" --region $region | \
jq -r ".Images[].State"
)
log " ... state=$state"
case "$state" in
pending)
sleep 10
;;
available)
return
;;
*)
log "Unexpected AMI state: '${state}'"
exit 1
;;
esac
done
}
make_image_public() {
local region=$1
local ami_id=$2
# To start with do: nix-shell -p awscli --run "aws configure"
wait_for_image $region "$ami_id"
set -e
set -o pipefail
log "Making image $ami_id public"
version=$(nix-instantiate --eval --strict '<nixpkgs>' -A lib.version | sed s/'"'//g)
major=${version:0:5}
echo "NixOS version is $version ($major)"
aws ec2 modify-image-attribute \
--image-id "$ami_id" --region "$region" --launch-permission 'Add={Group=all}' >&2
}
stateDir=/home/deploy/amis/ec2-image-$version
echo "keeping state in $stateDir"
mkdir -p $stateDir
upload_image() {
local region=$1
rm -f ec2-amis.nix
local aws_path=${image_file#/}
types="hvm"
stores="ebs"
regions="eu-west-1 eu-west-2 eu-west-3 eu-central-1 us-east-1 us-east-2 us-west-1 us-west-2 ca-central-1 ap-southeast-1 ap-southeast-2 ap-northeast-1 ap-northeast-2 sa-east-1 ap-south-1"
local state_key="$region.$image_label.$image_system"
local task_id=$(read_state "$state_key" task_id)
local snapshot_id=$(read_state "$state_key" snapshot_id)
local ami_id=$(read_state "$state_key" ami_id)
for type in $types; do
link=$stateDir/$type
imageFile=$link/nixos.qcow2
system=x86_64-linux
arch=x86_64
if [ -z "$task_id" ]; then
log "Checking for image on S3"
if ! aws s3 ls --region "$region" "s3://${bucket}/${aws_path}" >&2; then
log "Image missing from aws, uploading"
aws s3 cp --region $region "$image_file" "s3://${bucket}/${aws_path}" >&2
fi
# Build the image.
if ! [ -L $link ]; then
if [ $type = pv ]; then hvmFlag=false; else hvmFlag=true; fi
log "Importing image from S3 path s3://$bucket/$aws_path"
echo "building image type '$type'..."
nix-build -o $link \
'<nixpkgs/nixos>' \
-A config.system.build.amazonImage \
--arg configuration "{ imports = [ <nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix> ]; ec2.hvm = $hvmFlag; }"
task_id=$(aws ec2 import-snapshot --disk-container "{
\"Description\": \"nixos-image-${image_label}-${image_system}\",
\"Format\": \"vhd\",
\"UserBucket\": {
\"S3Bucket\": \"$bucket\",
\"S3Key\": \"$aws_path\"
}
}" --region $region | jq -r '.ImportTaskId')
write_state "$state_key" task_id "$task_id"
fi
for store in $stores; do
bucket=nixos-amis
bucketDir="$version-$type-$store"
prevAmi=
prevRegion=
for region in $regions; do
name=nixos-$version-$arch-$type-$store
description="NixOS $system $version ($type-$store)"
amiFile=$stateDir/$region.$type.$store.ami-id
if ! [ -e $amiFile ]; then
echo "doing $name in $region..."
if [ -n "$prevAmi" ]; then
ami=$(aws ec2 copy-image \
--region "$region" \
--source-region "$prevRegion" --source-image-id "$prevAmi" \
--name "$name" --description "$description" | jq -r '.ImageId')
if [ "$ami" = null ]; then break; fi
else
if [ $store = s3 ]; then
# Bundle the image.
imageDir=$stateDir/$type-bundled
# Convert the image to raw format.
rawFile=$stateDir/$type.raw
if ! [ -e $rawFile ]; then
qemu-img convert -f qcow2 -O raw $imageFile $rawFile.tmp
mv $rawFile.tmp $rawFile
fi
if ! [ -d $imageDir ]; then
rm -rf $imageDir.tmp
mkdir -p $imageDir.tmp
ec2-bundle-image \
-d $imageDir.tmp \
-i $rawFile --arch $arch \
--user "$AWS_ACCOUNT" -c "$EC2_CERT" -k "$EC2_PRIVATE_KEY"
mv $imageDir.tmp $imageDir
fi
# Upload the bundle to S3.
if ! [ -e $imageDir/uploaded ]; then
echo "uploading bundle to S3..."
ec2-upload-bundle \
-m $imageDir/$type.raw.manifest.xml \
-b "$bucket/$bucketDir" \
-a "$AWS_ACCESS_KEY_ID" -s "$AWS_SECRET_ACCESS_KEY" \
--location EU
touch $imageDir/uploaded
fi
extraFlags="--image-location $bucket/$bucketDir/$type.raw.manifest.xml"
else
# Convert the image to vhd format so we don't have
# to upload a huge raw image.
vhdFile=$stateDir/$type.vhd
if ! [ -e $vhdFile ]; then
qemu-img convert -f qcow2 -O vpc $imageFile $vhdFile.tmp
mv $vhdFile.tmp $vhdFile
fi
vhdFileLogicalBytes="$(qemu-img info "$vhdFile" | grep ^virtual\ size: | cut -f 2 -d \( | cut -f 1 -d \ )"
vhdFileLogicalGigaBytes=$(((vhdFileLogicalBytes-1)/1024/1024/1024+1)) # Round to the next GB
echo "Disk size is $vhdFileLogicalBytes bytes. Will be registered as $vhdFileLogicalGigaBytes GB."
taskId=$(cat $stateDir/$region.$type.task-id 2> /dev/null || true)
volId=$(cat $stateDir/$region.$type.vol-id 2> /dev/null || true)
snapId=$(cat $stateDir/$region.$type.snap-id 2> /dev/null || true)
# Import the VHD file.
if [ -z "$snapId" -a -z "$volId" -a -z "$taskId" ]; then
echo "importing $vhdFile..."
taskId=$(ec2-import-volume $vhdFile --no-upload -f vhd \
-O "$AWS_ACCESS_KEY_ID" -W "$AWS_SECRET_ACCESS_KEY" \
-o "$AWS_ACCESS_KEY_ID" -w "$AWS_SECRET_ACCESS_KEY" \
--region "$region" -z "${region}a" \
--bucket "$bucket" --prefix "$bucketDir/" \
| tee /dev/stderr \
| sed 's/.*\(import-vol-[0-9a-z]\+\).*/\1/ ; t ; d')
echo -n "$taskId" > $stateDir/$region.$type.task-id
fi
if [ -z "$snapId" -a -z "$volId" ]; then
ec2-resume-import $vhdFile -t "$taskId" --region "$region" \
-O "$AWS_ACCESS_KEY_ID" -W "$AWS_SECRET_ACCESS_KEY" \
-o "$AWS_ACCESS_KEY_ID" -w "$AWS_SECRET_ACCESS_KEY"
fi
# Wait for the volume creation to finish.
if [ -z "$snapId" -a -z "$volId" ]; then
echo "waiting for import to finish..."
while true; do
volId=$(aws ec2 describe-conversion-tasks --conversion-task-ids "$taskId" --region "$region" | jq -r .ConversionTasks[0].ImportVolume.Volume.Id)
if [ "$volId" != null ]; then break; fi
sleep 10
done
echo -n "$volId" > $stateDir/$region.$type.vol-id
fi
# Delete the import task.
if [ -n "$volId" -a -n "$taskId" ]; then
echo "removing import task..."
ec2-delete-disk-image -t "$taskId" --region "$region" \
-O "$AWS_ACCESS_KEY_ID" -W "$AWS_SECRET_ACCESS_KEY" \
-o "$AWS_ACCESS_KEY_ID" -w "$AWS_SECRET_ACCESS_KEY" || true
rm -f $stateDir/$region.$type.task-id
fi
# Create a snapshot.
if [ -z "$snapId" ]; then
echo "creating snapshot..."
# FIXME: this can fail with InvalidVolume.NotFound. Eventual consistency yay.
snapId=$(aws ec2 create-snapshot --volume-id "$volId" --region "$region" --description "$description" | jq -r .SnapshotId)
if [ "$snapId" = null ]; then exit 1; fi
echo -n "$snapId" > $stateDir/$region.$type.snap-id
fi
# Wait for the snapshot to finish.
echo "waiting for snapshot to finish..."
while true; do
status=$(aws ec2 describe-snapshots --snapshot-ids "$snapId" --region "$region" | jq -r .Snapshots[0].State)
if [ "$status" = completed ]; then break; fi
sleep 10
done
# Delete the volume.
if [ -n "$volId" ]; then
echo "deleting volume..."
aws ec2 delete-volume --volume-id "$volId" --region "$region" || true
rm -f $stateDir/$region.$type.vol-id
fi
blockDeviceMappings="DeviceName=/dev/sda1,Ebs={SnapshotId=$snapId,VolumeSize=$vhdFileLogicalGigaBytes,DeleteOnTermination=true,VolumeType=gp2}"
extraFlags=""
if [ $type = pv ]; then
extraFlags+=" --root-device-name /dev/sda1"
else
extraFlags+=" --root-device-name /dev/sda1"
extraFlags+=" --sriov-net-support simple"
extraFlags+=" --ena-support"
fi
blockDeviceMappings+=" DeviceName=/dev/sdb,VirtualName=ephemeral0"
blockDeviceMappings+=" DeviceName=/dev/sdc,VirtualName=ephemeral1"
blockDeviceMappings+=" DeviceName=/dev/sdd,VirtualName=ephemeral2"
blockDeviceMappings+=" DeviceName=/dev/sde,VirtualName=ephemeral3"
fi
if [ $type = hvm ]; then
extraFlags+=" --sriov-net-support simple"
extraFlags+=" --ena-support"
fi
# Register the AMI.
if [ $type = pv ]; then
kernel=$(aws ec2 describe-images --owner amazon --filters "Name=name,Values=pv-grub-hd0_1.05-$arch.gz" | jq -r .Images[0].ImageId)
if [ "$kernel" = null ]; then break; fi
echo "using PV-GRUB kernel $kernel"
extraFlags+=" --virtualization-type paravirtual --kernel $kernel"
else
extraFlags+=" --virtualization-type hvm"
fi
ami=$(aws ec2 register-image \
--name "$name" \
--description "$description" \
--region "$region" \
--architecture "$arch" \
--block-device-mappings $blockDeviceMappings \
$extraFlags | jq -r .ImageId)
if [ "$ami" = null ]; then break; fi
fi
echo -n "$ami" > $amiFile
echo "created AMI $ami of type '$type' in $region..."
else
ami=$(cat $amiFile)
fi
echo "region = $region, type = $type, store = $store, ami = $ami"
if [ -z "$prevAmi" ]; then
prevAmi="$ami"
prevRegion="$region"
fi
done
if [ -z "$snapshot_id" ]; then
snapshot_id=$(wait_for_import "$region" "$task_id")
write_state "$state_key" snapshot_id "$snapshot_id"
fi
done
if [ -z "$ami_id" ]; then
log "Registering snapshot $snapshot_id as AMI"
local block_device_mappings=(
"DeviceName=/dev/sda1,Ebs={SnapshotId=$snapshot_id,VolumeSize=$image_logical_gigabytes,DeleteOnTermination=true,VolumeType=gp2}"
)
local extra_flags=(
--root-device-name /dev/sda1
--sriov-net-support simple
--ena-support
--virtualization-type hvm
)
block_device_mappings+=(DeviceName=/dev/sdb,VirtualName=ephemeral0)
block_device_mappings+=(DeviceName=/dev/sdc,VirtualName=ephemeral1)
block_device_mappings+=(DeviceName=/dev/sdd,VirtualName=ephemeral2)
block_device_mappings+=(DeviceName=/dev/sde,VirtualName=ephemeral3)
ami_id=$(
aws ec2 register-image \
--name "$image_name" \
--description "$image_description" \
--region $region \
--architecture $amazon_arch \
--block-device-mappings "${block_device_mappings[@]}" \
"${extra_flags[@]}" \
| jq -r '.ImageId'
)
write_state "$state_key" ami_id "$ami_id"
fi
done
make_image_public $region "$ami_id"
for type in $types; do
link=$stateDir/$type
system=x86_64-linux
arch=x86_64
echo "$ami_id"
}
for store in $stores; do
copy_to_region() {
local region=$1
local from_region=$2
local from_ami_id=$3
for region in $regions; do
state_key="$region.$image_label.$image_system"
ami_id=$(read_state "$state_key" ami_id)
name=nixos-$version-$arch-$type-$store
amiFile=$stateDir/$region.$type.$store.ami-id
ami=$(cat $amiFile)
if [ -z "$ami_id" ]; then
log "Copying $from_ami_id to $region"
ami_id=$(
aws ec2 copy-image \
--region "$region" \
--source-region "$from_region" \
--source-image-id "$from_ami_id" \
--name "$image_name" \
--description "$image_description" \
| jq -r '.ImageId'
)
echo "region = $region, type = $type, store = $store, ami = $ami"
write_state "$state_key" ami_id "$ami_id"
fi
echo -n "waiting for AMI..."
while true; do
status=$(aws ec2 describe-images --image-ids "$ami" --region "$region" | jq -r .Images[0].State)
if [ "$status" = available ]; then break; fi
sleep 10
echo -n '.'
done
echo
make_image_public $region "$ami_id"
echo "$ami_id"
}
# Make the image public.
aws ec2 modify-image-attribute \
--image-id "$ami" --region "$region" --launch-permission 'Add={Group=all}'
upload_all() {
home_image_id=$(upload_image "$home_region")
jq -n \
--arg key "$home_region.$image_system" \
--arg value "$home_image_id" \
'$ARGS.named'
echo " \"$major\".$region.$type-$store = \"$ami\";" >> ec2-amis.nix
done
for region in "${regions[@]}"; do
if [ "$region" = "$home_region" ]; then
continue
fi
copied_image_id=$(copy_to_region "$region" "$home_region" "$home_image_id")
jq -n \
--arg key "$region.$image_system" \
--arg value "$copied_image_id" \
'$ARGS.named'
done
}
done
upload_all | jq --slurp from_entries

@ -7,7 +7,7 @@ with lib;
type = types.bool;
default = config.services.xserver.enable;
description = ''
Whether to build icon theme caches for GTK+ applications.
Whether to build icon theme caches for GTK applications.
'';
};
};

@ -1,6 +1,6 @@
{
x86_64-linux = "/nix/store/hbhdjn5ik3byg642d1m11k3k3s0kn3py-nix-2.2.2";
i686-linux = "/nix/store/fz5cikwvj3n0a6zl44h6l2z3cin64mda-nix-2.2.2";
aarch64-linux = "/nix/store/2gba4cyl4wvxzfbhmli90jy4n5aj0kjj-nix-2.2.2";
x86_64-darwin = "/nix/store/87i4fp46jfw9yl8c7i9gx75m5yph7irl-nix-2.2.2";
x86_64-linux = "/nix/store/3ds3cgji9vjxdbgp10av6smyym1126d1-nix-2.3";
i686-linux = "/nix/store/ln1ndqvfpc9cdl03vqxi6kvlxm9wfv9g-nix-2.3";
aarch64-linux = "/nix/store/n8a1rwzrp20qcr2c4hvyn6c5q9zx8csw-nix-2.3";
x86_64-darwin = "/nix/store/jq6npmpld02sz4rgniz0qrsdfnm6j17a-nix-2.3";
}

@ -948,6 +948,7 @@
./virtualisation/openvswitch.nix
./virtualisation/parallels-guest.nix
./virtualisation/qemu-guest-agent.nix
./virtualisation/railcar.nix
./virtualisation/rkt.nix
./virtualisation/virtualbox-guest.nix
./virtualisation/virtualbox-host.nix

@ -18,7 +18,7 @@ in
enable = mkOption {
default = false;
description = ''
Whether to enable the Plotinus GTK+3 plugin. Plotinus provides a
Whether to enable the Plotinus GTK 3 plugin. Plotinus provides a
popup (triggered by Ctrl-Shift-P) to search the menus of a
compatible application.
'';

@ -13,10 +13,10 @@
<link xlink:href="https://github.com/p-e-w/plotinus"/>
</para>
<para>
Plotinus is a searchable command palette in every modern GTK+ application.
Plotinus is a searchable command palette in every modern GTK application.
</para>
<para>
When in a GTK+3 application and Plotinus is enabled, you can press
When in a GTK 3 application and Plotinus is enabled, you can press
<literal>Ctrl+Shift+P</literal> to open the command palette. The command
palette provides a searchable list of of all menu items in the application.
</para>

@ -34,6 +34,7 @@ with lib;
(mkRenamedOptionModule [ "services" "kubernetes" "etcd" "caFile" ] [ "services" "kubernetes" "apiserver" "etcd" "caFile" ])
(mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "applyManifests" ] "")
(mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "cadvisorPort" ] "")
(mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "allowPrivileged" ] "")
(mkRenamedOptionModule [ "services" "kubernetes" "proxy" "address" ] ["services" "kubernetes" "proxy" "bindAddress"])
(mkRemovedOptionModule [ "services" "kubernetes" "verbose" ] "")
(mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ])

@ -62,50 +62,19 @@ in
'';
};
enable = mkEnableOption "Kubernetes addon manager";
kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager";
bootstrapAddonsKubeconfig = top.lib.mkKubeConfigOptions "Kubernetes addon manager bootstrap";
enable = mkEnableOption "Whether to enable Kubernetes addon manager.";
};
###### implementation
config = let
addonManagerPaths = filter (a: a != null) [
cfg.kubeconfig.caFile
cfg.kubeconfig.certFile
cfg.kubeconfig.keyFile
];
bootstrapAddonsPaths = filter (a: a != null) [
cfg.bootstrapAddonsKubeconfig.caFile
cfg.bootstrapAddonsKubeconfig.certFile
cfg.bootstrapAddonsKubeconfig.keyFile
];
in mkIf cfg.enable {
config = mkIf cfg.enable {
environment.etc."kubernetes/addons".source = "${addons}/";
#TODO: Get rid of kube-addon-manager in the future for the following reasons
# - it is basically just a shell script wrapped around kubectl
# - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
# - it is designed to be used with k8s system components only
# - it would be better with a more Nix-oriented way of managing addons
systemd.services.kube-addon-manager = {
description = "Kubernetes addon manager";
wantedBy = [ "kubernetes.target" ];
after = [ "kube-node-online.target" ];
before = [ "kubernetes.target" ];
environment = {
ADDON_PATH = "/etc/kubernetes/addons/";
KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager" cfg.kubeconfig;
};
path = with pkgs; [ gawk kubectl ];
preStart = ''
until kubectl -n kube-system get serviceaccounts/default 2>/dev/null; do
echo kubectl -n kube-system get serviceaccounts/default: exit status $?
sleep 2
done
'';
after = [ "kube-apiserver.service" ];
environment.ADDON_PATH = "/etc/kubernetes/addons/";
path = [ pkgs.gawk ];
serviceConfig = {
Slice = "kubernetes.slice";
ExecStart = "${top.package}/bin/kube-addons";
@ -115,52 +84,8 @@ in
Restart = "on-failure";
RestartSec = 10;
};
unitConfig.ConditionPathExists = addonManagerPaths;
};
systemd.paths.kube-addon-manager = {
wantedBy = [ "kube-addon-manager.service" ];
pathConfig = {
PathExists = addonManagerPaths;
PathChanged = addonManagerPaths;
};
};
services.kubernetes.addonManager.kubeconfig.server = mkDefault top.apiserverAddress;
systemd.services.kube-addon-manager-bootstrap = mkIf (top.apiserver.enable && top.addonManager.bootstrapAddons != {}) {
wantedBy = [ "kube-control-plane-online.target" ];
after = [ "kube-apiserver.service" ];
before = [ "kube-control-plane-online.target" ];
path = [ pkgs.kubectl ];
environment = {
KUBECONFIG = top.lib.mkKubeConfig "kube-addon-manager-bootstrap" cfg.bootstrapAddonsKubeconfig;
};
preStart = with pkgs; let
files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
cfg.bootstrapAddons;
in ''
until kubectl auth can-i '*' '*' -q 2>/dev/null; do
echo kubectl auth can-i '*' '*': exit status $?
sleep 2
done
kubectl apply -f ${concatStringsSep " \\\n -f " files}
'';
script = "echo Ok";
unitConfig.ConditionPathExists = bootstrapAddonsPaths;
};
systemd.paths.kube-addon-manager-bootstrap = {
wantedBy = [ "kube-addon-manager-bootstrap.service" ];
pathConfig = {
PathExists = bootstrapAddonsPaths;
PathChanged = bootstrapAddonsPaths;
};
};
services.kubernetes.addonManager.bootstrapAddonsKubeconfig.server = mkDefault top.apiserverAddress;
services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled
(let
name = system:kube-addon-manager;

@ -169,23 +169,6 @@ in {
};
};
kubernetes-dashboard-cm = {
apiVersion = "v1";
kind = "ConfigMap";
metadata = {
labels = {
k8s-app = "kubernetes-dashboard";
# Allows editing resource and makes sure it is created first.
"addonmanager.kubernetes.io/mode" = "EnsureExists";
};
name = "kubernetes-dashboard-settings";
namespace = "kube-system";
};
};
};
services.kubernetes.addonManager.bootstrapAddons = mkMerge [{
kubernetes-dashboard-sa = {
apiVersion = "v1";
kind = "ServiceAccount";
@ -227,9 +210,20 @@ in {
};
type = "Opaque";
};
}
(optionalAttrs cfg.rbac.enable
kubernetes-dashboard-cm = {
apiVersion = "v1";
kind = "ConfigMap";
metadata = {
labels = {
k8s-app = "kubernetes-dashboard";
# Allows editing resource and makes sure it is created first.
"addonmanager.kubernetes.io/mode" = "EnsureExists";
};
name = "kubernetes-dashboard-settings";
namespace = "kube-system";
};
};
} // (optionalAttrs cfg.rbac.enable
(let
subjects = [{
kind = "ServiceAccount";
@ -329,6 +323,6 @@ in {
inherit subjects;
};
})
))];
));
};
}

@ -290,32 +290,11 @@ in
###### implementation
config = mkMerge [
(let
apiserverPaths = filter (a: a != null) [
cfg.clientCaFile
cfg.etcd.caFile
cfg.etcd.certFile
cfg.etcd.keyFile
cfg.kubeletClientCaFile
cfg.kubeletClientCertFile
cfg.kubeletClientKeyFile
cfg.serviceAccountKeyFile
cfg.tlsCertFile
cfg.tlsKeyFile
];
etcdPaths = filter (a: a != null) [
config.services.etcd.trustedCaFile
config.services.etcd.certFile
config.services.etcd.keyFile
];
in mkIf cfg.enable {
(mkIf cfg.enable {
systemd.services.kube-apiserver = {
description = "Kubernetes APIServer Service";
wantedBy = [ "kube-control-plane-online.target" ];
after = [ "certmgr.service" ];
before = [ "kube-control-plane-online.target" ];
wantedBy = [ "kubernetes.target" ];
after = [ "network.target" ];
serviceConfig = {
Slice = "kubernetes.slice";
ExecStart = ''${top.package}/bin/kube-apiserver \
@ -386,15 +365,6 @@ in
Restart = "on-failure";
RestartSec = 5;
};
unitConfig.ConditionPathExists = apiserverPaths;
};
systemd.paths.kube-apiserver = mkIf top.apiserver.enable {
wantedBy = [ "kube-apiserver.service" ];
pathConfig = {
PathExists = apiserverPaths;
PathChanged = apiserverPaths;
};
};
services.etcd = {
@ -408,18 +378,6 @@ in
initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
};
systemd.services.etcd = {
unitConfig.ConditionPathExists = etcdPaths;
};
systemd.paths.etcd = {
wantedBy = [ "etcd.service" ];
pathConfig = {
PathExists = etcdPaths;
PathChanged = etcdPaths;
};
};
services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
apiserver-kubelet-api-admin-crb = {

@ -104,31 +104,11 @@ in
};
###### implementation
config = let
controllerManagerPaths = filter (a: a != null) [
cfg.kubeconfig.caFile
cfg.kubeconfig.certFile
cfg.kubeconfig.keyFile
cfg.rootCaFile
cfg.serviceAccountKeyFile
cfg.tlsCertFile
cfg.tlsKeyFile
];
in mkIf cfg.enable {
systemd.services.kube-controller-manager = rec {
config = mkIf cfg.enable {
systemd.services.kube-controller-manager = {
description = "Kubernetes Controller Manager Service";
wantedBy = [ "kube-control-plane-online.target" ];
wantedBy = [ "kubernetes.target" ];
after = [ "kube-apiserver.service" ];
before = [ "kube-control-plane-online.target" ];
environment.KUBECONFIG = top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig;
preStart = ''
until kubectl auth can-i get /api -q 2>/dev/null; do
echo kubectl auth can-i get /api: exit status $?
sleep 2
done
'';
serviceConfig = {
RestartSec = "30s";
Restart = "on-failure";
@ -140,7 +120,7 @@ in
"--cluster-cidr=${cfg.clusterCidr}"} \
${optionalString (cfg.featureGates != [])
"--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
--kubeconfig=${environment.KUBECONFIG} \
--kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \
--leader-elect=${boolToString cfg.leaderElect} \
${optionalString (cfg.rootCaFile!=null)
"--root-ca-file=${cfg.rootCaFile}"} \
@ -161,16 +141,7 @@ in
User = "kubernetes";
Group = "kubernetes";
};
path = top.path ++ [ pkgs.kubectl ];
unitConfig.ConditionPathExists = controllerManagerPaths;
};
systemd.paths.kube-controller-manager = {
wantedBy = [ "kube-controller-manager.service" ];
pathConfig = {
PathExists = controllerManagerPaths;
PathChanged = controllerManagerPaths;
};
path = top.path;
};
services.kubernetes.pki.certs = with top.lib; {

@ -256,29 +256,6 @@ in {
wantedBy = [ "multi-user.target" ];
};
systemd.targets.kube-control-plane-online = {
wantedBy = [ "kubernetes.target" ];
before = [ "kubernetes.target" ];
};
systemd.services.kube-control-plane-online = {
description = "Kubernetes control plane is online";
wantedBy = [ "kube-control-plane-online.target" ];
after = [ "kube-scheduler.service" "kube-controller-manager.service" ];
before = [ "kube-control-plane-online.target" ];
path = [ pkgs.curl ];
preStart = ''
until curl -Ssf ${cfg.apiserverAddress}/healthz do
echo curl -Ssf ${cfg.apiserverAddress}/healthz: exit status $?
sleep 3
done
'';