Introduce a framework for automated VM tests.

This commit is contained in:
r-vdp 2023-04-23 03:00:58 +02:00 committed by Ramses
parent 2f8a9ba967
commit 22684b6ed6
17 changed files with 2194 additions and 350 deletions

View file

@ -13,13 +13,11 @@ in
,
}:
let
pkgs = nixpkgs.legacyPackages.${system};
inherit (self.packages.${system}) system-manager;
# Module that sets additional module arguments
extraArgsModule = { lib, config, pkgs, ... }: {
_file = "lib.nix: extraArgsModule";
_module.args = {
pkgs = nixpkgs.legacyPackages.${system};
pkgs = nixpkgs.legacyPackages.${config.nixpkgs.hostPlatform};
utils = import "${nixos}/lib/utils.nix" {
inherit lib config pkgs;
};
@ -34,7 +32,10 @@ in
] ++ modules;
}).config;
# Get the system as it was defined in the modules.
system = config.nixpkgs.hostPlatform;
pkgs = nixpkgs.legacyPackages.${system};
inherit (self.packages.${system}) system-manager;
returnIfNoAssertions = drv:
let
@ -173,4 +174,229 @@ in
(linkFarmBinEntryFromDrv preActivationAssertionScript)
]
);
images = {
ubuntu_22_10_cloudimg = {
name = "ubuntu-22.10-server-cloudimg-amd64.img";
release = "20230302";
hash = "sha256-9hjGjlUQoXZfAYTwsEjHE3Zawd6qqrVc6VXshthNS44=";
};
ubuntu_20_04_cloudimg = {
name = "ubuntu-20.04-server-cloudimg-amd64.img";
release = "";
hash = "";
};
};
# Careful since we do not have the nix store yet when this service runs,
# so we cannot use pkgs.writeTest or pkgs.writeShellScript for instance,
# since their results would refer to the store
mount_store = { pkgs }:
pkgs.writeText "mount-store.service" ''
[Service]
Type = oneshot
ExecStart = mkdir -p /nix/.ro-store
ExecStart = mount -t 9p -o defaults,trans=virtio,version=9p2000.L,cache=loose nix-store /nix/.ro-store
ExecStart = mkdir -p -m 0755 /nix/.rw-store/ /nix/store
ExecStart = mount -t tmpfs tmpfs /nix/.rw-store
ExecStart = mkdir -p -m 0755 /nix/.rw-store/store /nix/.rw-store/work
ExecStart = mount -t overlay overlay /nix/store -o lowerdir=/nix/.ro-store,upperdir=/nix/.rw-store/store,workdir=/nix/.rw-store/work
[Install]
WantedBy = multi-user.target
'';
# Backdoor service that exposes a root shell through a socket to the test instrumentation framework
backdoor = { pkgs }:
pkgs.writeText "backdoor.service" ''
[Unit]
Requires = dev-hvc0.device dev-ttyS0.device mount-store.service
After = dev-hvc0.device dev-ttyS0.device mount-store.service
[Service]
ExecStart = ${pkgs.writeShellScript "backdoor-start-script" ''
set -euo pipefail
export USER=root
export HOME=/root
export DISPLAY=:0.0
# TODO: do we actually need to source /etc/profile ?
# Unbound vars cause the service to crash
#source /etc/profile
# Don't use a pager when executing backdoor
# actions. Because we use a tty, commands like systemctl
# or nix-store get confused into thinking they're running
# interactively.
export PAGER=
cd /tmp
exec < /dev/hvc0 > /dev/hvc0
while ! exec 2> /dev/ttyS0; do sleep 0.1; done
echo "connecting to host..." >&2
stty -F /dev/hvc0 raw -echo # prevent nl -> cr/nl conversion
# This line is essential since it signals to the test driver that the
# shell is ready.
# See: the connect method in the Machine class.
echo "Spawning backdoor root shell..."
# Passing the terminal device makes bash run non-interactively.
# Otherwise we get errors on the terminal because bash tries to
# setup things like job control.
PS1= exec /usr/bin/env bash --norc /dev/hvc0
''}
KillSignal = SIGHUP
[Install]
WantedBy = multi-user.target
'';
prepareUbuntuImage = { hostPkgs, nodeConfig, image }:
let
pkgs = hostPkgs;
img = pkgs.fetchurl {
inherit (image) hash;
url = "https://cloud-images.ubuntu.com/releases/kinetic/release-${image.release}/${image.name}";
};
in
pkgs.runCommand "configure-vm" { } ''
# We will modify the VM image, so we need a mutable copy
install -m777 ${img} ./img.qcow2
# Copy the service files here, since otherwise they end up in the VM
# wwith their paths including the nix hash
cp ${self.lib.backdoor { inherit pkgs; }} backdoor.service
cp ${self.lib.mount_store { inherit pkgs; }} mount-store.service
${lib.concatStringsSep " \\\n" [
"${pkgs.guestfs-tools}/bin/virt-customize"
"-a ./img.qcow2"
"--smp 2"
"--memsize 256"
"--no-network"
"--copy-in backdoor.service:/etc/systemd/system"
"--copy-in mount-store.service:/etc/systemd/system"
''--link ${nodeConfig.systemConfig}:/system-manager-profile''
"--run"
(pkgs.writeShellScript "run-script" ''
# Clear the root password
passwd -d root
# Don't spawn ttys on these devices, they are used for test instrumentation
systemctl mask serial-getty@ttyS0.service
systemctl mask serial-getty@hvc0.service
# Speed up the boot process
systemctl mask snapd.service
systemctl mask snapd.socket
systemctl mask snapd.seeded.service
systemctl enable backdoor.service
'')
]};
cp ./img.qcow2 $out
'';
make-vm-test =
{ system
, modules
}:
let
hostPkgs = nixpkgs.legacyPackages.${system};
config = (lib.evalModules {
specialArgs = { system-manager = self; };
modules = [
../test/nix/test-driver/modules
{
_file = "inline module in lib.nix";
inherit hostPkgs;
}
] ++ modules;
}).config;
nodes = map runVmScript (lib.attrValues config.nodes);
runVmScript = node:
# The test driver extracts the name of the node from the name of the
# VM script, so it's important here to stick to the naming scheme expected
# by the test driver.
hostPkgs.writeShellScript "run-${node.system.name}-vm" ''
set -eo pipefail
export PATH=${lib.makeBinPath [ hostPkgs.coreutils ]}''${PATH:+:}$PATH
# Create a directory for storing temporary data of the running VM.
if [ -z "$TMPDIR" ] || [ -z "$USE_TMPDIR" ]; then
TMPDIR=$(mktemp -d nix-vm.XXXXXXXXXX --tmpdir)
fi
# Create a directory for exchanging data with the VM.
mkdir -p "$TMPDIR/xchg"
cd "$TMPDIR"
# Start QEMU.
# We might need to be smarter about the QEMU binary to run when we want to
# support architectures other than x86_64.
# See qemu-common.nix in nixpkgs.
${lib.concatStringsSep "\\\n " [
"exec ${lib.getBin hostPkgs.qemu_test}/bin/qemu-kvm"
"-device virtio-rng-pci"
"-cpu max"
"-name ${node.system.name}"
"-m ${toString node.virtualisation.memorySize}"
"-smp ${toString node.virtualisation.cpus}"
"-drive file=${node.virtualisation.rootImage},format=qcow2"
"-device virtio-net-pci,netdev=net0"
"-netdev user,id=net0"
"-virtfs local,security_model=passthrough,id=fsdev1,path=/nix/store,readonly=on,mount_tag=nix-store"
(lib.concatStringsSep "\\\n "
(lib.mapAttrsToList
(tag: share: "-virtfs local,path=${share.source},security_model=none,mount_tag=${tag}")
node.virtualisation.sharedDirectories))
"-snapshot"
"-nographic"
"$QEMU_OPTS"
"$@"
]};
'';
# We vendor the test-driver until github.com/NixOS/nixpkgs#228220 gets merged
#test-driver = pkgs.callPackage "${nixpkgs}/nixos/lib/test-driver" { };
test-driver = hostPkgs.callPackage ../test/nixos-test-driver { };
runTest = { nodes, vlans, testScript, extraDriverArgs }: ''
${lib.getBin test-driver}/bin/nixos-test-driver \
${extraDriverArgs} \
--start-scripts ${lib.concatStringsSep " " nodes} \
--vlans ${lib.concatStringsSep " " vlans} \
-- ${hostPkgs.writeText "test-script" config.testScript}
'';
defaultTest = { extraDriverArgs ? "" }: runTest {
inherit extraDriverArgs nodes;
inherit (config) testScript;
vlans = [ "1" ];
};
in
hostPkgs.stdenv.mkDerivation (finalAttrs: {
name = "system-manager-vm-test";
requiredSystemFeatures = [ "kvm" "nixos-test" ];
buildCommand = ''
${defaultTest {}}
touch $out
'';
passthru = {
runVM = hostPkgs.writeShellScriptBin "run-vm"
(defaultTest {
extraDriverArgs = "--interactive";
});
};
});
}

View file

@ -20,6 +20,7 @@
hostPlatform = lib.mkOption {
type = types.str;
example = "x86_64-linux";
default = throw "the option nixpkgs.hostPlatform needs to be set.";
};
};
@ -131,5 +132,19 @@
'';
};
};
# Can we make sure that this does not get relaunched when activating a new profile?
# Otherwise we get an infinite loop.
systemd.services.reactivate-system-manager = {
enable = false;
# TODO should we activate earlier?
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Type = "oneshot";
};
script = ''
/nix/var/nix/profiles/system-manager-profiles/system-manager/bin/activate
'';
};
};
}

View file

@ -125,6 +125,10 @@ in
config = {
systemd = {
targets.system-manager = {
wantedBy = [ "default.target" ];
};
timers =
lib.mapAttrs
(name: service: