diff --git a/nixos/doc/manual/release-notes/rl-2405.section.md b/nixos/doc/manual/release-notes/rl-2405.section.md index e2f99c20cfc8b..ca301141c186f 100644 --- a/nixos/doc/manual/release-notes/rl-2405.section.md +++ b/nixos/doc/manual/release-notes/rl-2405.section.md @@ -8,7 +8,10 @@ In addition to numerous new and upgraded packages, this release has the followin -- Create the first release note entry in this section! +- `amazonImage`, `amazonImageZfs`, `amazonImageAutomaticSize` has been removed from nixpkgs, this means effectively that no AMI will be published + for this release by the release managers of 24.05 and further versions of NixOS. This has been done because our Amazon images receive actually very little + maintenance and it has became a burden for the release management community to try to get someone involved. To re-introduce them, consider reverting the PRs + and adding yourself as a maintainer and reaching out to the release management community. ## New Services {#sec-release-24.05-new-services} diff --git a/nixos/maintainers/scripts/ec2/amazon-image-zfs.nix b/nixos/maintainers/scripts/ec2/amazon-image-zfs.nix deleted file mode 100644 index 32dd96a7cb7e4..0000000000000 --- a/nixos/maintainers/scripts/ec2/amazon-image-zfs.nix +++ /dev/null @@ -1,12 +0,0 @@ -{ - imports = [ ./amazon-image.nix ]; - ec2.zfs = { - enable = true; - datasets = { - "tank/system/root".mount = "/"; - "tank/system/var".mount = "/var"; - "tank/local/nix".mount = "/nix"; - "tank/user/home".mount = "/home"; - }; - }; -} diff --git a/nixos/maintainers/scripts/ec2/amazon-image.nix b/nixos/maintainers/scripts/ec2/amazon-image.nix deleted file mode 100644 index d12339bca1f8f..0000000000000 --- a/nixos/maintainers/scripts/ec2/amazon-image.nix +++ /dev/null @@ -1,160 +0,0 @@ -{ config, lib, pkgs, ... }: - -with lib; - -let - cfg = config.amazonImage; - amiBootMode = if config.ec2.efi then "uefi" else "legacy-bios"; - -in { - - imports = [ ../../../modules/virtualisation/amazon-image.nix ]; - - # Amazon recommends setting this to the highest possible value for a good EBS - # experience, which prior to 4.15 was 255. - # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html#timeout-nvme-ebs-volumes - config.boot.kernelParams = - let timeout = - if pkgs.lib.versionAtLeast config.boot.kernelPackages.kernel.version "4.15" - then "4294967295" - else "255"; - in [ "nvme_core.io_timeout=${timeout}" ]; - - options.amazonImage = { - name = mkOption { - type = types.str; - description = lib.mdDoc "The name of the generated derivation"; - default = "nixos-amazon-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}"; - }; - - contents = mkOption { - example = literalExpression '' - [ { source = pkgs.memtest86 + "/memtest.bin"; - target = "boot/memtest.bin"; - } - ] - ''; - default = []; - description = lib.mdDoc '' - This option lists files to be copied to fixed locations in the - generated image. Glob patterns work. - ''; - }; - - sizeMB = mkOption { - type = with types; either (enum [ "auto" ]) int; - default = 3072; - example = 8192; - description = lib.mdDoc "The size in MB of the image"; - }; - - format = mkOption { - type = types.enum [ "raw" "qcow2" "vpc" ]; - default = "vpc"; - description = lib.mdDoc "The image format to output"; - }; - }; - - config.system.build.amazonImage = let - configFile = pkgs.writeText "configuration.nix" - '' - { modulesPath, ... }: { - imports = [ "''${modulesPath}/virtualisation/amazon-image.nix" ]; - ${optionalString config.ec2.efi '' - ec2.efi = true; - ''} - ${optionalString config.ec2.zfs.enable '' - ec2.zfs.enable = true; - networking.hostId = "${config.networking.hostId}"; - ''} - } - ''; - - zfsBuilder = import ../../../lib/make-multi-disk-zfs-image.nix { - inherit lib config configFile; - inherit (cfg) contents format name; - pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package - - includeChannel = true; - - bootSize = 1000; # 1G is the minimum EBS volume - - rootSize = cfg.sizeMB; - rootPoolProperties = { - ashift = 12; - autoexpand = "on"; - }; - - datasets = config.ec2.zfs.datasets; - - postVM = '' - extension=''${rootDiskImage##*.} - friendlyName=$out/${cfg.name} - rootDisk="$friendlyName.root.$extension" - bootDisk="$friendlyName.boot.$extension" - mv "$rootDiskImage" "$rootDisk" - mv "$bootDiskImage" "$bootDisk" - - mkdir -p $out/nix-support - echo "file ${cfg.format} $bootDisk" >> $out/nix-support/hydra-build-products - echo "file ${cfg.format} $rootDisk" >> $out/nix-support/hydra-build-products - - ${pkgs.jq}/bin/jq -n \ - --arg system_label ${lib.escapeShellArg config.system.nixos.label} \ - --arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \ - --arg root_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$rootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \ - --arg boot_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$bootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \ - --arg boot_mode "${amiBootMode}" \ - --arg root "$rootDisk" \ - --arg boot "$bootDisk" \ - '{} - | .label = $system_label - | .boot_mode = $boot_mode - | .system = $system - | .disks.boot.logical_bytes = $boot_logical_bytes - | .disks.boot.file = $boot - | .disks.root.logical_bytes = $root_logical_bytes - | .disks.root.file = $root - ' > $out/nix-support/image-info.json - ''; - }; - - extBuilder = import ../../../lib/make-disk-image.nix { - inherit lib config configFile; - - inherit (cfg) contents format name; - pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package - - fsType = "ext4"; - partitionTableType = if config.ec2.efi then "efi" else "legacy+gpt"; - - diskSize = cfg.sizeMB; - - postVM = '' - extension=''${diskImage##*.} - friendlyName=$out/${cfg.name}.$extension - mv "$diskImage" "$friendlyName" - diskImage=$friendlyName - - mkdir -p $out/nix-support - echo "file ${cfg.format} $diskImage" >> $out/nix-support/hydra-build-products - - ${pkgs.jq}/bin/jq -n \ - --arg system_label ${lib.escapeShellArg config.system.nixos.label} \ - --arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \ - --arg logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$diskImage" | ${pkgs.jq}/bin/jq '."virtual-size"')" \ - --arg boot_mode "${amiBootMode}" \ - --arg file "$diskImage" \ - '{} - | .label = $system_label - | .boot_mode = $boot_mode - | .system = $system - | .logical_bytes = $logical_bytes - | .file = $file - | .disks.root.logical_bytes = $logical_bytes - | .disks.root.file = $file - ' > $out/nix-support/image-info.json - ''; - }; - in if config.ec2.zfs.enable then zfsBuilder else extBuilder; -} diff --git a/nixos/maintainers/scripts/ec2/create-amis.sh b/nixos/maintainers/scripts/ec2/create-amis.sh deleted file mode 100755 index 0c1656efaf1ca..0000000000000 --- a/nixos/maintainers/scripts/ec2/create-amis.sh +++ /dev/null @@ -1,362 +0,0 @@ -#!/usr/bin/env nix-shell -#!nix-shell -p awscli -p jq -p qemu -i bash -# shellcheck shell=bash -# -# Future Deprecation? -# This entire thing should probably be replaced with a generic terraform config - -# Uploads and registers NixOS images built from the -# amazonImage attribute. Images are uploaded and -# registered via a home region, and then copied to other regions. - -# The home region requires an s3 bucket, and an IAM role named "vmimport" -# (by default) with access to the S3 bucket. The name can be -# configured with the "service_role_name" variable. Configuration of the -# vmimport role is documented in -# https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html - -# set -x -set -euo pipefail - -var () { true; } - -# configuration -var ${state_dir:=$HOME/amis/ec2-images} -var ${home_region:=eu-west-1} -var ${bucket:=nixos-amis} -var ${service_role_name:=vmimport} - -# Output of the command: -# > aws ec2 describe-regions --all-regions --query "Regions[].{Name:RegionName}" --output text | sort -var ${regions:= - af-south-1 - ap-east-1 - ap-northeast-1 - ap-northeast-2 - ap-northeast-3 - ap-south-1 - ap-southeast-1 - ap-southeast-2 - ap-southeast-3 - ca-central-1 - eu-central-1 - eu-north-1 - eu-south-1 - eu-west-1 - eu-west-2 - eu-west-3 - me-south-1 - sa-east-1 - us-east-1 - us-east-2 - us-west-1 - us-west-2 - } - -regions=($regions) - -log() { - echo "$@" >&2 -} - -if [ "$#" -ne 1 ]; then - log "Usage: ./upload-amazon-image.sh IMAGE_OUTPUT" - exit 1 -fi - -# result of the amazon-image from nixos/release.nix -store_path=$1 - -if [ ! -e "$store_path" ]; then - log "Store path: $store_path does not exist, fetching..." - nix-store --realise "$store_path" -fi - -if [ ! -d "$store_path" ]; then - log "store_path: $store_path is not a directory. aborting" - exit 1 -fi - -read_image_info() { - if [ ! -e "$store_path/nix-support/image-info.json" ]; then - log "Image missing metadata" - exit 1 - fi - jq -r "$1" "$store_path/nix-support/image-info.json" -} - -# We handle a single image per invocation, store all attributes in -# globals for convenience. -zfs_disks=$(read_image_info .disks) -is_zfs_image= -if jq -e .boot <<< "$zfs_disks"; then - is_zfs_image=1 - zfs_boot=".disks.boot" -fi -image_label="$(read_image_info .label)${is_zfs_image:+-ZFS}" -image_system=$(read_image_info .system) -image_files=( $(read_image_info ".disks.root.file") ) - -image_logical_bytes=$(read_image_info "${zfs_boot:-.disks.root}.logical_bytes") - -if [[ -n "$is_zfs_image" ]]; then - image_files+=( $(read_image_info .disks.boot.file) ) -fi - -# Derived attributes - -image_logical_gigabytes=$(((image_logical_bytes-1)/1024/1024/1024+1)) # Round to the next GB - -case "$image_system" in - aarch64-linux) - amazon_arch=arm64 - ;; - x86_64-linux) - amazon_arch=x86_64 - ;; - *) - log "Unknown system: $image_system" - exit 1 -esac - -image_name="NixOS-${image_label}-${image_system}" -image_description="NixOS ${image_label} ${image_system}" - -log "Image Details:" -log " Name: $image_name" -log " Description: $image_description" -log " Size (gigabytes): $image_logical_gigabytes" -log " System: $image_system" -log " Amazon Arch: $amazon_arch" - -read_state() { - local state_key=$1 - local type=$2 - - cat "$state_dir/$state_key.$type" 2>/dev/null || true -} - -write_state() { - local state_key=$1 - local type=$2 - local val=$3 - - mkdir -p "$state_dir" - echo "$val" > "$state_dir/$state_key.$type" -} - -wait_for_import() { - local region=$1 - local task_id=$2 - local state snapshot_id - log "Waiting for import task $task_id to be completed" - while true; do - read -r state message snapshot_id < <( - aws ec2 describe-import-snapshot-tasks --region "$region" --import-task-ids "$task_id" | \ - jq -r '.ImportSnapshotTasks[].SnapshotTaskDetail | "\(.Status) \(.StatusMessage) \(.SnapshotId)"' - ) - log " ... state=$state message=$message snapshot_id=$snapshot_id" - case "$state" in - active) - sleep 10 - ;; - completed) - echo "$snapshot_id" - return - ;; - *) - log "Unexpected snapshot import state: '${state}'" - log "Full response: " - aws ec2 describe-import-snapshot-tasks --region "$region" --import-task-ids "$task_id" >&2 - exit 1 - ;; - esac - done -} - -wait_for_image() { - local region=$1 - local ami_id=$2 - local state - log "Waiting for image $ami_id to be available" - - while true; do - read -r state < <( - aws ec2 describe-images --image-ids "$ami_id" --region "$region" | \ - jq -r ".Images[].State" - ) - log " ... state=$state" - case "$state" in - pending) - sleep 10 - ;; - available) - return - ;; - *) - log "Unexpected AMI state: '${state}'" - exit 1 - ;; - esac - done -} - - -make_image_public() { - local region=$1 - local ami_id=$2 - - wait_for_image "$region" "$ami_id" - - log "Making image $ami_id public" - - aws ec2 modify-image-attribute \ - --image-id "$ami_id" --region "$region" --launch-permission 'Add={Group=all}' >&2 -} - -upload_image() { - local region=$1 - - for image_file in "${image_files[@]}"; do - local aws_path=${image_file#/} - - if [[ -n "$is_zfs_image" ]]; then - local suffix=${image_file%.*} - suffix=${suffix##*.} - fi - - local state_key="$region.$image_label${suffix:+.${suffix}}.$image_system" - local task_id - task_id=$(read_state "$state_key" task_id) - local snapshot_id - snapshot_id=$(read_state "$state_key" snapshot_id) - local ami_id - ami_id=$(read_state "$state_key" ami_id) - - if [ -z "$task_id" ]; then - log "Checking for image on S3" - if ! aws s3 ls --region "$region" "s3://${bucket}/${aws_path}" >&2; then - log "Image missing from aws, uploading" - aws s3 cp --region "$region" "$image_file" "s3://${bucket}/${aws_path}" >&2 - fi - - log "Importing image from S3 path s3://$bucket/$aws_path" - - task_id=$(aws ec2 import-snapshot --role-name "$service_role_name" --disk-container "{ - \"Description\": \"nixos-image-${image_label}-${image_system}\", - \"Format\": \"vhd\", - \"UserBucket\": { - \"S3Bucket\": \"$bucket\", - \"S3Key\": \"$aws_path\" - } - }" --region "$region" | jq -r '.ImportTaskId') - - write_state "$state_key" task_id "$task_id" - fi - - if [ -z "$snapshot_id" ]; then - snapshot_id=$(wait_for_import "$region" "$task_id") - write_state "$state_key" snapshot_id "$snapshot_id" - fi - done - - if [ -z "$ami_id" ]; then - log "Registering snapshot $snapshot_id as AMI" - - local block_device_mappings=( - "DeviceName=/dev/xvda,Ebs={SnapshotId=$snapshot_id,VolumeSize=$image_logical_gigabytes,DeleteOnTermination=true,VolumeType=gp3}" - ) - - if [[ -n "$is_zfs_image" ]]; then - local root_snapshot_id=$(read_state "$region.$image_label.root.$image_system" snapshot_id) - - local root_image_logical_bytes=$(read_image_info ".disks.root.logical_bytes") - local root_image_logical_gigabytes=$(((root_image_logical_bytes-1)/1024/1024/1024+1)) # Round to the next GB - - block_device_mappings+=( - "DeviceName=/dev/xvdb,Ebs={SnapshotId=$root_snapshot_id,VolumeSize=$root_image_logical_gigabytes,DeleteOnTermination=true,VolumeType=gp3}" - ) - fi - - - local extra_flags=( - --root-device-name /dev/xvda - --sriov-net-support simple - --ena-support - --virtualization-type hvm - ) - - block_device_mappings+=("DeviceName=/dev/sdb,VirtualName=ephemeral0") - block_device_mappings+=("DeviceName=/dev/sdc,VirtualName=ephemeral1") - block_device_mappings+=("DeviceName=/dev/sdd,VirtualName=ephemeral2") - block_device_mappings+=("DeviceName=/dev/sde,VirtualName=ephemeral3") - - ami_id=$( - aws ec2 register-image \ - --name "$image_name" \ - --description "$image_description" \ - --region "$region" \ - --architecture $amazon_arch \ - --block-device-mappings "${block_device_mappings[@]}" \ - --boot-mode $(read_image_info .boot_mode) \ - "${extra_flags[@]}" \ - | jq -r '.ImageId' - ) - - write_state "$state_key" ami_id "$ami_id" - fi - - [[ -v PRIVATE ]] || make_image_public "$region" "$ami_id" - - echo "$ami_id" -} - -copy_to_region() { - local region=$1 - local from_region=$2 - local from_ami_id=$3 - - state_key="$region.$image_label.$image_system" - ami_id=$(read_state "$state_key" ami_id) - - if [ -z "$ami_id" ]; then - log "Copying $from_ami_id to $region" - ami_id=$( - aws ec2 copy-image \ - --region "$region" \ - --source-region "$from_region" \ - --source-image-id "$from_ami_id" \ - --name "$image_name" \ - --description "$image_description" \ - | jq -r '.ImageId' - ) - - write_state "$state_key" ami_id "$ami_id" - fi - - [[ -v PRIVATE ]] || make_image_public "$region" "$ami_id" - - echo "$ami_id" -} - -upload_all() { - home_image_id=$(upload_image "$home_region") - jq -n \ - --arg key "$home_region.$image_system" \ - --arg value "$home_image_id" \ - '$ARGS.named' - - for region in "${regions[@]}"; do - if [ "$region" = "$home_region" ]; then - continue - fi - copied_image_id=$(copy_to_region "$region" "$home_region" "$home_image_id") - - jq -n \ - --arg key "$region.$image_system" \ - --arg value "$copied_image_id" \ - '$ARGS.named' - done -} - -upload_all | jq --slurp from_entries diff --git a/nixos/release-combined.nix b/nixos/release-combined.nix index 9b4b92be6f3ac..760c0efd17145 100644 --- a/nixos/release-combined.nix +++ b/nixos/release-combined.nix @@ -49,7 +49,6 @@ in rec { [ "nixos.channel" ] (onFullSupported "nixos.dummy") (onAllSupported "nixos.iso_minimal") - (onSystems ["x86_64-linux" "aarch64-linux"] "nixos.amazonImage") (onFullSupported "nixos.iso_plasma5") (onFullSupported "nixos.iso_gnome") (onFullSupported "nixos.manual") diff --git a/nixos/release-small.nix b/nixos/release-small.nix index 6204dc731ad96..4e80e62df0315 100644 --- a/nixos/release-small.nix +++ b/nixos/release-small.nix @@ -32,7 +32,7 @@ let in rec { nixos = { - inherit (nixos') channel manual options iso_minimal amazonImage dummy; + inherit (nixos') channel manual options iso_minimal dummy; tests = { inherit (nixos'.tests) acme @@ -113,7 +113,6 @@ in rec { (map onSupported [ "nixos.dummy" "nixos.iso_minimal" - "nixos.amazonImage" "nixos.manual" "nixos.tests.acme" "nixos.tests.boot.uefiCdrom" diff --git a/nixos/release.nix b/nixos/release.nix index a1b4508ca40b6..e12b67a8215ea 100644 --- a/nixos/release.nix +++ b/nixos/release.nix @@ -263,55 +263,6 @@ in rec { }).config.system.build.tarball) ); - # A disk image that can be imported to Amazon EC2 and registered as an AMI - amazonImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system: - - with import ./.. { inherit system; }; - - hydraJob ((import lib/eval-config.nix { - inherit system; - modules = - [ configuration - versionModule - ./maintainers/scripts/ec2/amazon-image.nix - ]; - }).config.system.build.amazonImage) - - ); - amazonImageZfs = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system: - - with import ./.. { inherit system; }; - - hydraJob ((import lib/eval-config.nix { - inherit system; - modules = - [ configuration - versionModule - ./maintainers/scripts/ec2/amazon-image-zfs.nix - ]; - }).config.system.build.amazonImage) - - ); - - - # Test job for https://github.com/NixOS/nixpkgs/issues/121354 to test - # automatic sizing without blocking the channel. - amazonImageAutomaticSize = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system: - - with import ./.. { inherit system; }; - - hydraJob ((import lib/eval-config.nix { - inherit system; - modules = - [ configuration - versionModule - ./maintainers/scripts/ec2/amazon-image.nix - ({ ... }: { amazonImage.sizeMB = "auto"; }) - ]; - }).config.system.build.amazonImage) - - ); - # An image that can be imported into lxd and used for container creation lxdContainerImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system: diff --git a/nixos/tests/all-tests.nix b/nixos/tests/all-tests.nix index 44e99203856d5..7e8981e4c487a 100644 --- a/nixos/tests/all-tests.nix +++ b/nixos/tests/all-tests.nix @@ -255,8 +255,6 @@ in { drbd = handleTest ./drbd.nix {}; earlyoom = handleTestOn ["x86_64-linux"] ./earlyoom.nix {}; early-mount-options = handleTest ./early-mount-options.nix {}; - ec2-config = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-config or {}; - ec2-nixops = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-nixops or {}; ecryptfs = handleTest ./ecryptfs.nix {}; fscrypt = handleTest ./fscrypt.nix {}; fastnetmon-advanced = runTest ./fastnetmon-advanced.nix; diff --git a/nixos/tests/ec2.nix b/nixos/tests/ec2.nix deleted file mode 100644 index e649761d029df..0000000000000 --- a/nixos/tests/ec2.nix +++ /dev/null @@ -1,156 +0,0 @@ -{ system ? builtins.currentSystem, - config ? {}, - pkgs ? import ../.. { inherit system config; } -}: - -with import ../lib/testing-python.nix { inherit system pkgs; }; -with pkgs.lib; - -with import common/ec2.nix { inherit makeTest pkgs; }; - -let - imageCfg = (import ../lib/eval-config.nix { - inherit system; - modules = [ - ../maintainers/scripts/ec2/amazon-image.nix - ../modules/testing/test-instrumentation.nix - ../modules/profiles/qemu-guest.nix - { - # Hack to make the partition resizing work in QEMU. - boot.initrd.postDeviceCommands = mkBefore '' - ln -s vda /dev/xvda - ln -s vda1 /dev/xvda1 - ''; - - # In a NixOS test the serial console is occupied by the "backdoor" - # (see testing/test-instrumentation.nix) and is incompatible with - # the configuration in virtualisation/amazon-image.nix. - systemd.services."serial-getty@ttyS0".enable = mkForce false; - - # Needed by nixos-rebuild due to the lack of network - # access. Determined by trial and error. - system.extraDependencies = with pkgs; ( [ - # Needed for a nixos-rebuild. - busybox - cloud-utils - desktop-file-utils - libxslt.bin - mkinitcpio-nfs-utils - stdenv - stdenvNoCC - texinfo - unionfs-fuse - xorg.lndir - - # These are used in the configure-from-userdata tests - # for EC2. Httpd and valgrind are requested by the - # configuration. - apacheHttpd - apacheHttpd.doc - apacheHttpd.man - valgrind.doc - ]); - } - ]; - }).config; - image = "${imageCfg.system.build.amazonImage}/${imageCfg.amazonImage.name}.vhd"; - - sshKeys = import ./ssh-keys.nix pkgs; - snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text; - snakeOilPrivateKeyFile = pkgs.writeText "private-key" snakeOilPrivateKey; - snakeOilPublicKey = sshKeys.snakeOilPublicKey; - -in { - boot-ec2-nixops = makeEc2Test { - name = "nixops-userdata"; - inherit image; - sshPublicKey = snakeOilPublicKey; # That's right folks! My user's key is also the host key! - - userData = '' - SSH_HOST_ED25519_KEY_PUB:${snakeOilPublicKey} - SSH_HOST_ED25519_KEY:${replaceStrings ["\n"] ["|"] snakeOilPrivateKey} - ''; - script = '' - machine.start() - machine.wait_for_file("/etc/ec2-metadata/user-data") - machine.wait_for_unit("sshd.service") - - machine.succeed("grep unknown /etc/ec2-metadata/ami-manifest-path") - - # We have no keys configured on the client side yet, so this should fail - machine.fail("ssh -o BatchMode=yes localhost exit") - - # Let's install our client private key - machine.succeed("mkdir -p ~/.ssh") - - machine.copy_from_host_via_shell( - "${snakeOilPrivateKeyFile}", "~/.ssh/id_ed25519" - ) - machine.succeed("chmod 600 ~/.ssh/id_ed25519") - - # We haven't configured the host key yet, so this should still fail - machine.fail("ssh -o BatchMode=yes localhost exit") - - # Add the host key; ssh should finally succeed - machine.succeed( - "echo localhost,127.0.0.1 ${snakeOilPublicKey} > ~/.ssh/known_hosts" - ) - machine.succeed("ssh -o BatchMode=yes localhost exit") - - # Test whether the root disk was resized. - blocks, block_size = map(int, machine.succeed("stat -c %b:%S -f /").split(":")) - GB = 1024 ** 3 - assert 9.7 * GB <= blocks * block_size <= 10 * GB - - # Just to make sure resizing is idempotent. - machine.shutdown() - machine.start() - machine.wait_for_file("/etc/ec2-metadata/user-data") - ''; - }; - - boot-ec2-config = makeEc2Test { - name = "config-userdata"; - meta.broken = true; # amazon-init wants to download from the internet while building the system - inherit image; - sshPublicKey = snakeOilPublicKey; - - # ### https://nixos.org/channels/nixos-unstable nixos - userData = '' - { pkgs, ... }: - - { - imports = [ - - - - ]; - environment.etc.testFile = { - text = "whoa"; - }; - - networking.hostName = "ec2-test-vm"; # required by services.httpd - - services.httpd = { - enable = true; - adminAddr = "test@example.org"; - virtualHosts.localhost.documentRoot = "''${pkgs.valgrind.doc}/share/doc/valgrind/html"; - }; - networking.firewall.allowedTCPPorts = [ 80 ]; - } - ''; - script = '' - machine.start() - - # amazon-init must succeed. if it fails, make the test fail - # immediately instead of timing out in wait_for_file. - machine.wait_for_unit("amazon-init.service") - - machine.wait_for_file("/etc/testFile") - assert "whoa" in machine.succeed("cat /etc/testFile") - - machine.wait_for_unit("httpd.service") - assert "Valgrind" in machine.succeed("curl http://localhost") - ''; - }; -}