Merge pull request #87 from numtide/nix-vm-test

Use nix-vm-test for our tests
This commit is contained in:
Ramses 2024-04-29 14:32:22 +02:00 committed by GitHub
commit 36577b4627
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 186 additions and 584 deletions

1
.gitignore vendored
View file

@ -8,3 +8,4 @@
result result
.pre-commit-config.yaml .pre-commit-config.yaml
.direnv .direnv
**/.nixos-test-history

22
flake.lock generated
View file

@ -114,6 +114,27 @@
"type": "github" "type": "github"
} }
}, },
"nix-vm-test": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1714393121,
"narHash": "sha256-A6PosvO4ideG5ACRlqW9/Z4cQUFVCrrHBXQxMoB3sTM=",
"owner": "numtide",
"repo": "nix-vm-test",
"rev": "95e2b50b0544a44dea3c85d33d6cff492ac8ee44",
"type": "github"
},
"original": {
"owner": "numtide",
"ref": "extra-paths-to-register",
"repo": "nix-vm-test",
"type": "github"
}
},
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1712439257, "lastModified": 1712439257,
@ -177,6 +198,7 @@
"crane": "crane", "crane": "crane",
"devshell": "devshell", "devshell": "devshell",
"flake-utils": "flake-utils_2", "flake-utils": "flake-utils_2",
"nix-vm-test": "nix-vm-test",
"nixpkgs": "nixpkgs", "nixpkgs": "nixpkgs",
"pre-commit-hooks": "pre-commit-hooks", "pre-commit-hooks": "pre-commit-hooks",
"rust-overlay": "rust-overlay", "rust-overlay": "rust-overlay",

View file

@ -32,6 +32,11 @@
url = "github:numtide/treefmt-nix"; url = "github:numtide/treefmt-nix";
inputs.nixpkgs.follows = "nixpkgs"; inputs.nixpkgs.follows = "nixpkgs";
}; };
nix-vm-test = {
url = "github:numtide/nix-vm-test/extra-paths-to-register";
inputs.nixpkgs.follows = "nixpkgs";
};
}; };
outputs = outputs =
@ -43,8 +48,8 @@
, devshell , devshell
, treefmt-nix , treefmt-nix
, pre-commit-hooks , pre-commit-hooks
, , ...
}: }@inputs:
{ {
lib = import ./nix/lib.nix { lib = import ./nix/lib.nix {
inherit nixpkgs self; inherit nixpkgs self;
@ -248,6 +253,7 @@
pkgs.lib.optionalAttrs enableVmTests (import ./test/nix/modules { pkgs.lib.optionalAttrs enableVmTests (import ./test/nix/modules {
inherit system; inherit system;
inherit (pkgs) lib; inherit (pkgs) lib;
inherit (inputs) nix-vm-test;
system-manager = self; system-manager = self;
}); });
}) })

View file

@ -91,259 +91,28 @@ in
in in
returnIfNoAssertions toplevel; returnIfNoAssertions toplevel;
# Careful since we do not have the nix store yet when this service runs,
# so we cannot use pkgs.writeTest or pkgs.writeShellScript for instance,
# since their results would refer to the store
mount_store = { pkgs, pathsToRegister }:
let
pathRegistrationInfo = "${pkgs.closureInfo { rootPaths = pathsToRegister; }}/registration";
in
pkgs.writeText "mount-store.service" ''
[Service]
Type = oneshot
ExecStart = mkdir -p /nix/.ro-store
ExecStart = mount -t 9p -o defaults,trans=virtio,version=9p2000.L,cache=loose,msize=${toString (256 * 1024 * 1024)} nix-store /nix/.ro-store
ExecStart = mkdir -p -m 0755 /nix/.rw-store/ /nix/store
ExecStart = mount -t tmpfs tmpfs /nix/.rw-store
ExecStart = mkdir -p -m 0755 /nix/.rw-store/store /nix/.rw-store/work
ExecStart = mount -t overlay overlay /nix/store -o lowerdir=/nix/.ro-store,upperdir=/nix/.rw-store/store,workdir=/nix/.rw-store/work
# Register the required paths in the nix DB.
# The store has been mounted at this point, to we can use writeShellScript now.
ExecStart = ${pkgs.writeShellScript "execstartpost-script" ''
${lib.getBin pkgs.nix}/bin/nix-store --load-db < ${pathRegistrationInfo}
''}
[Install]
WantedBy = multi-user.target
'';
# Backdoor service that exposes a root shell through a socket to the test instrumentation framework
backdoor = { pkgs }:
pkgs.writeText "backdoor.service" ''
[Unit]
Requires = dev-hvc0.device dev-ttyS0.device mount-store.service
After = dev-hvc0.device dev-ttyS0.device mount-store.service
# Keep this unit active when we switch to rescue mode for instance
IgnoreOnIsolate = true
[Service]
ExecStart = ${pkgs.writeShellScript "backdoor-start-script" ''
set -euo pipefail
export USER=root
export HOME=/root
export DISPLAY=:0.0
# TODO: do we actually need to source /etc/profile ?
# Unbound vars cause the service to crash
#source /etc/profile
# Don't use a pager when executing backdoor
# actions. Because we use a tty, commands like systemctl
# or nix-store get confused into thinking they're running
# interactively.
export PAGER=
cd /tmp
exec < /dev/hvc0 > /dev/hvc0
while ! exec 2> /dev/ttyS0; do sleep 0.1; done
echo "connecting to host..." >&2
stty -F /dev/hvc0 raw -echo # prevent nl -> cr/nl conversion
# This line is essential since it signals to the test driver that the
# shell is ready.
# See: the connect method in the Machine class.
echo "Spawning backdoor root shell..."
# Passing the terminal device makes bash run non-interactively.
# Otherwise we get errors on the terminal because bash tries to
# setup things like job control.
PS1= exec /usr/bin/env bash --norc /dev/hvc0
''}
KillSignal = SIGHUP
[Install]
WantedBy = multi-user.target
'';
prepareUbuntuImage = { hostPkgs, nodeConfig, image, extraPathsToRegister ? [ ] }:
let
pkgs = hostPkgs;
img = pkgs.fetchurl {
inherit (image) hash;
url = "https://cloud-images.ubuntu.com/releases/${image.releaseName}/release-${image.releaseTimeStamp}/${image.name}";
};
resultImg = "./image.qcow2";
# The nix store paths that need to be added to the nix DB for this node.
pathsToRegister = [ nodeConfig.systemConfig ] ++ extraPathsToRegister;
in
pkgs.runCommand "${image.name}-system-manager-vm-test.qcow2" { } ''
# We will modify the VM image, so we need a mutable copy
install -m777 ${img} ${resultImg}
# Copy the service files here, since otherwise they end up in the VM
# with their paths including the nix hash
cp ${self.lib.backdoor { inherit pkgs; }} backdoor.service
cp ${self.lib.mount_store { inherit pkgs pathsToRegister; }} mount-store.service
#export LIBGUESTFS_DEBUG=1 LIBGUESTFS_TRACE=1
${lib.concatStringsSep " \\\n" [
"${pkgs.guestfs-tools}/bin/virt-customize"
"-a ${resultImg}"
"--smp 2"
"--memsize 256"
"--no-network"
"--copy-in backdoor.service:/etc/systemd/system"
"--copy-in mount-store.service:/etc/systemd/system"
''--link ${nodeConfig.systemConfig}:/system-manager-profile''
"--run"
(pkgs.writeShellScript "run-script" ''
# Clear the root password
passwd -d root
# Don't spawn ttys on these devices, they are used for test instrumentation
systemctl mask serial-getty@ttyS0.service
systemctl mask serial-getty@hvc0.service
# Speed up the boot process
systemctl mask snapd.service
systemctl mask snapd.socket
systemctl mask snapd.seeded.service
# We have no network in the test VMs, avoid an error on bootup
systemctl mask ssh.service
systemctl mask ssh.socket
systemctl enable backdoor.service
'')
]};
cp ${resultImg} $out
'';
mkTestPreamble = mkTestPreamble =
{ node { node
, profile , profile
, action , action
}: '' }: ''
${node}.succeed("/${profile}/bin/${action} 2>&1 | tee /tmp/output.log") ${node}.succeed("${profile}/bin/${action} 2>&1 | tee /tmp/output.log")
${node}.succeed("! grep -F 'ERROR' /tmp/output.log") ${node}.succeed("! grep -F 'ERROR' /tmp/output.log")
''; '';
activateProfileSnippet = { node, profile ? "system-manager-profile" }: activateProfileSnippet = { node, profile }:
self.lib.mkTestPreamble { self.lib.mkTestPreamble {
inherit node profile; inherit node profile;
action = "activate"; action = "activate";
}; };
deactivateProfileSnippet = { node, profile ? "system-manager-profile" }: deactivateProfileSnippet = { node, profile }:
self.lib.mkTestPreamble { self.lib.mkTestPreamble {
inherit node profile; inherit node profile;
action = "deactivate"; action = "deactivate";
}; };
prepopulateProfileSnippet = { node, profile ? "system-manager-profile" }: prepopulateProfileSnippet = { node, profile }:
self.lib.mkTestPreamble { self.lib.mkTestPreamble {
inherit node profile; inherit node profile;
action = "prepopulate"; action = "prepopulate";
}; };
make-vm-test =
name:
{ system
, modules
}:
let
hostPkgs = nixpkgs.legacyPackages.${system};
config = (lib.evalModules {
specialArgs = { system-manager = self; };
modules = [
../test/nix/test-driver/modules
{
_file = "${self.lib.printAttrPos (builtins.unsafeGetAttrPos "a" { a = null; })}: inline module";
inherit hostPkgs;
}
] ++ modules;
}).config;
nodes = map runVmScript (lib.attrValues config.nodes);
runVmScript = node:
# The test driver extracts the name of the node from the name of the
# VM script, so it's important here to stick to the naming scheme expected
# by the test driver.
hostPkgs.writeShellScript "run-${node.system.name}-vm" ''
set -eo pipefail
export PATH=${lib.makeBinPath [ hostPkgs.coreutils ]}''${PATH:+:}$PATH
# Create a directory for storing temporary data of the running VM.
if [ -z "$TMPDIR" ] || [ -z "$USE_TMPDIR" ]; then
TMPDIR=$(mktemp -d nix-vm.XXXXXXXXXX --tmpdir)
fi
# Create a directory for exchanging data with the VM.
mkdir -p "$TMPDIR/xchg"
cd "$TMPDIR"
# Start QEMU.
# We might need to be smarter about the QEMU binary to run when we want to
# support architectures other than x86_64.
# See qemu-common.nix in nixpkgs.
${lib.concatStringsSep "\\\n " [
"exec ${lib.getBin hostPkgs.qemu_test}/bin/qemu-kvm"
"-device virtio-rng-pci"
"-cpu max"
"-name ${node.system.name}"
"-m ${toString node.virtualisation.memorySize}"
"-smp ${toString node.virtualisation.cpus}"
"-drive file=${node.virtualisation.rootImage},format=qcow2"
"-device virtio-net-pci,netdev=net0"
"-netdev user,id=net0"
"-virtfs local,security_model=passthrough,id=fsdev1,path=/nix/store,readonly=on,mount_tag=nix-store"
(lib.concatStringsSep "\\\n "
(lib.mapAttrsToList
(tag: share: "-virtfs local,path=${share.source},security_model=none,mount_tag=${tag}")
node.virtualisation.sharedDirectories))
"-snapshot"
"-nographic"
"$QEMU_OPTS"
"$@"
]};
'';
test-driver = hostPkgs.callPackage "${nixpkgs}/nixos/lib/test-driver" { };
runTest = { nodes, vlans, testScript, extraDriverArgs }: ''
${lib.getBin test-driver}/bin/nixos-test-driver \
${extraDriverArgs} \
--start-scripts ${lib.concatStringsSep " " nodes} \
--vlans ${lib.concatStringsSep " " vlans} \
-- ${hostPkgs.writeText "test-script" config.testScript}
'';
defaultTest = { extraDriverArgs ? "" }: runTest {
inherit extraDriverArgs nodes;
inherit (config) testScript;
vlans = [ "1" ];
};
in
hostPkgs.stdenv.mkDerivation (finalAttrs: {
inherit name;
requiredSystemFeatures = [ "kvm" "nixos-test" ];
buildCommand = ''
${defaultTest {}}
touch $out
'';
passthru = {
driverInteractive = hostPkgs.writeShellScriptBin "run-vm"
(defaultTest {
extraDriverArgs = "--interactive";
});
};
});
} }

View file

@ -1,62 +0,0 @@
{
"ubuntu": {
"x86_64-linux": {
"ubuntu_23_04_cloudimg": {
"name": "ubuntu-23.04-server-cloudimg-amd64.img",
"releaseName": "lunar",
"releaseTimeStamp": "20230829",
"hash": "sha256-t51lSk9M7jaR+72i70iWtbutrkDGCgdjxd385rIe/08="
},
"ubuntu_22_10_cloudimg": {
"name": "ubuntu-22.10-server-cloudimg-amd64.img",
"releaseName": "kinetic",
"releaseTimeStamp": "20230716",
"hash": "sha256-ZMgnM7t0bhOSxU957aWF0VMwQIAa/RPU8EvfIYAkVno="
},
"ubuntu_22_04_cloudimg": {
"name": "ubuntu-22.04-server-cloudimg-amd64.img",
"releaseName": "jammy",
"releaseTimeStamp": "20230828",
"hash": "sha256-C9onYpn8XxiXGOVvZVeRc0deyl/8ImPWgBuJfISLKPg="
},
"ubuntu_20_04_cloudimg": {
"name": "ubuntu-20.04-server-cloudimg-amd64.img",
"releaseName": "focal",
"releaseTimeStamp": "20230908",
"hash": "sha256-N/folU4v6aAGy62vIfj3M70STZb/TOTzP1fw+mS7xiE="
}
},
"aarch64-linux": {
"ubuntu_23_04_cloudimg": {
"name": "ubuntu-23.04-server-cloudimg-arm64.img",
"releaseName": "lunar",
"releaseTimeStamp": "20230502",
"hash": ""
},
"ubuntu_22_10_cloudimg": {
"name": "ubuntu-22.10-server-cloudimg-arm64.img",
"releaseName": "kinetic",
"releaseTimeStamp": "20230716",
"hash": ""
},
"ubuntu_22_04_cloudimg": {
"name": "ubuntu-22.04-server-cloudimg-arm64.img",
"releaseName": "jammy",
"releaseTimeStamp": "20230427",
"hash": "sha256-9vkeg5VumVBxj4TaLd0SgJEWjw11pcP7SBz5zd1V0EE="
},
"ubuntu_20_04_cloudimg": {
"name": "ubuntu-20.04-server-cloudimg-arm64.img",
"releaseName": "focal",
"releaseTimeStamp": "20230420",
"hash": "sha256-YUtW3oMHz4Hw7WeIu6ksx+/mUfxp7cCSSETvY6KGwU4="
}
}
}
}

View file

@ -1,11 +1,44 @@
{ lib { lib
, system-manager , system-manager
, system , system
, nix-vm-test
}: }:
let let
images = lib.importJSON ../images.json; forEachUbuntuImage =
forEachUbuntuImage = lib.flip lib.mapAttrs' images.ubuntu.${system}; name:
{ modules
, testScriptFunction
, extraPathsToRegister ? [ ]
, projectTest ? test: test.sandboxed
}:
let
ubuntu = nix-vm-test.lib.${system}.ubuntu;
in
lib.listToAttrs (lib.flip map (lib.attrNames ubuntu.images)
(imageVersion:
let
toplevel = (system-manager.lib.makeSystemConfig {
modules = modules ++ [
({ lib, pkgs, ... }: {
options.hostPkgs = lib.mkOption { type = lib.types.raw; readOnly = true; };
config.hostPkgs = pkgs;
})
];
});
inherit (toplevel.config) hostPkgs;
in
lib.nameValuePair "ubuntu-${imageVersion}-${name}"
(projectTest
(ubuntu.${imageVersion} {
testScript = testScriptFunction { inherit toplevel hostPkgs; };
extraPathsToRegister = extraPathsToRegister ++ [
toplevel
];
sharedDirs = { };
}))
)
);
# To test reload and restart, we include two services, one that can be reloaded # To test reload and restart, we include two services, one that can be reloaded
# and one that cannot. # and one that cannot.
@ -84,212 +117,149 @@ let
}) })
]; ];
}; };
in in
forEachUbuntuImage forEachUbuntuImage "example"
(imgName: image: lib.nameValuePair
"vm-test-example-${imgName}"
(system-manager.lib.make-vm-test "vm-test-example-${imgName}" {
inherit system;
modules = [
({ config, ... }:
let
inherit (config) hostPkgs;
in
{ {
nodes = {
node1 = { config, ... }: {
modules = [ modules = [
(testModule "old") (testModule "old")
../../../examples/example.nix ../../../examples/example.nix
]; ];
extraPathsToRegister = [ newConfig ];
virtualisation.rootImage = system-manager.lib.prepareUbuntuImage { testScriptFunction = { toplevel, ... }: ''
inherit hostPkgs image;
nodeConfig = config;
};
};
};
testScript = ''
# Start all machines in parallel # Start all machines in parallel
start_all() start_all()
node1.wait_for_unit("default.target") vm.wait_for_unit("default.target")
node1.succeed("touch /etc/foo_test") vm.succeed("touch /etc/foo_test")
node1.succeed("/system-manager-profile/bin/activate 2>&1 | tee /tmp/output.log") vm.succeed("${toplevel}/bin/activate 2>&1 | tee /tmp/output.log")
node1.succeed("grep -F 'Error while creating file in /etc: Unmanaged path already exists in filesystem, please remove it and run system-manager again: /etc/foo_test' /tmp/output.log") vm.succeed("grep -F 'Error while creating file in /etc: Unmanaged path already exists in filesystem, please remove it and run system-manager again: /etc/foo_test' /tmp/output.log")
node1.succeed("rm /etc/foo_test") vm.succeed("rm /etc/foo_test")
${system-manager.lib.activateProfileSnippet { node = "node1"; }} ${system-manager.lib.activateProfileSnippet { node = "vm"; profile = toplevel; }}
node1.wait_for_unit("system-manager.target") vm.wait_for_unit("system-manager.target")
node1.succeed("systemctl status service-9.service") vm.succeed("systemctl status service-9.service")
node1.succeed("test -f /etc/baz/bar/foo2") vm.succeed("test -f /etc/baz/bar/foo2")
node1.succeed("test -f /etc/a/nested/example/foo3") vm.succeed("test -f /etc/a/nested/example/foo3")
node1.succeed("test -f /etc/foo.conf") vm.succeed("test -f /etc/foo.conf")
node1.succeed("grep -F 'launch_the_rockets = true' /etc/foo.conf") vm.succeed("grep -F 'launch_the_rockets = true' /etc/foo.conf")
node1.fail("grep -F 'launch_the_rockets = false' /etc/foo.conf") vm.fail("grep -F 'launch_the_rockets = false' /etc/foo.conf")
node1.succeed("test -d /var/tmp/system-manager") vm.succeed("test -d /var/tmp/system-manager")
${system-manager.lib.activateProfileSnippet { node = "node1"; profile = newConfig; }} ${system-manager.lib.activateProfileSnippet { node = "vm"; profile = newConfig; }}
node1.succeed("systemctl status new-service.service") vm.succeed("systemctl status new-service.service")
node1.fail("systemctl status service-9.service") vm.fail("systemctl status service-9.service")
node1.fail("test -f /etc/a/nested/example/foo3") vm.fail("test -f /etc/a/nested/example/foo3")
node1.fail("test -f /etc/baz/bar/foo2") vm.fail("test -f /etc/baz/bar/foo2")
node1.fail("test -f /etc/systemd/system/nginx.service") vm.fail("test -f /etc/systemd/system/nginx.service")
node1.succeed("test -f /etc/foo_new") vm.succeed("test -f /etc/foo_new")
node1.succeed("test -d /var/tmp/system-manager") vm.succeed("test -d /var/tmp/system-manager")
node1.succeed("touch /var/tmp/system-manager/foo1") vm.succeed("touch /var/tmp/system-manager/foo1")
# Simulate a reboot, to check that the services defined with # Simulate a reboot, to check that the services defined with
# system-manager start correctly after a reboot. # system-manager start correctly after a reboot.
# TODO: can we find an easy way to really reboot the VM and not # TODO: can we find an easy way to really reboot the VM and not
# loose the root FS state? # loose the root FS state?
node1.systemctl("isolate rescue.target") vm.systemctl("isolate rescue.target")
# We need to send a return character to dismiss the rescue-mode prompt # We need to send a return character to dismiss the rescue-mode prompt
node1.send_key("ret") vm.send_key("ret")
node1.systemctl("isolate default.target") vm.systemctl("isolate default.target")
node1.wait_for_unit("default.target") vm.wait_for_unit("default.target")
node1.succeed("systemctl status new-service.service") vm.succeed("systemctl status new-service.service")
node1.fail("systemctl status service-9.service") vm.fail("systemctl status service-9.service")
node1.fail("test -f /etc/a/nested/example/foo3") vm.fail("test -f /etc/a/nested/example/foo3")
node1.fail("test -f /etc/baz/bar/foo2") vm.fail("test -f /etc/baz/bar/foo2")
node1.succeed("test -f /etc/foo_new") vm.succeed("test -f /etc/foo_new")
${system-manager.lib.deactivateProfileSnippet { node = "node1"; profile = newConfig; }} ${system-manager.lib.deactivateProfileSnippet { node = "vm"; profile = newConfig; }}
node1.fail("systemctl status new-service.service") vm.fail("systemctl status new-service.service")
node1.fail("test -f /etc/foo_new") vm.fail("test -f /etc/foo_new")
#node1.fail("test -f /var/tmp/system-manager/foo1") #vm.fail("test -f /var/tmp/system-manager/foo1")
'';
})
];
})
)
//
forEachUbuntuImage
(imgName: image: lib.nameValuePair
"vm-test-prepopulate-${imgName}"
(system-manager.lib.make-vm-test "vm-test-prepopulate-${imgName}" {
inherit system;
modules = [
({ config, ... }:
let
inherit (config) hostPkgs;
in
{
nodes = {
node1 = { config, ... }: {
modules = [
../../../examples/example.nix
];
virtualisation.rootImage = system-manager.lib.prepareUbuntuImage {
inherit hostPkgs image;
nodeConfig = config;
};
};
};
testScript = ''
# Start all machines in parallel
start_all()
node1.wait_for_unit("default.target")
${system-manager.lib.prepopulateProfileSnippet { node = "node1"; }}
node1.systemctl("daemon-reload")
# Simulate a reboot, to check that the services defined with
# system-manager start correctly after a reboot.
# TODO: can we find an easy way to really reboot the VM and not
# loose the root FS state?
node1.systemctl("isolate rescue.target")
# We need to send a return character to dismiss the rescue-mode prompt
node1.send_key("ret")
node1.systemctl("isolate default.target")
node1.wait_for_unit("system-manager.target")
node1.succeed("systemctl status service-9.service")
node1.succeed("test -f /etc/baz/bar/foo2")
node1.succeed("test -f /etc/a/nested/example/foo3")
node1.succeed("test -f /etc/foo.conf")
node1.succeed("grep -F 'launch_the_rockets = true' /etc/foo.conf")
node1.fail("grep -F 'launch_the_rockets = false' /etc/foo.conf")
${system-manager.lib.activateProfileSnippet { node = "node1"; profile = newConfig; }}
node1.succeed("systemctl status new-service.service")
node1.fail("systemctl status service-9.service")
node1.fail("test -f /etc/a/nested/example/foo3")
node1.fail("test -f /etc/baz/bar/foo2")
node1.succeed("test -f /etc/foo_new")
${system-manager.lib.deactivateProfileSnippet { node = "node1"; profile = newConfig; }}
node1.fail("systemctl status new-service.service")
node1.fail("test -f /etc/foo_new")
''; '';
} }
)
];
})
)
// //
forEachUbuntuImage forEachUbuntuImage "prepopulate" {
(imgName: image: lib.nameValuePair
"vm-test-system-path-${imgName}"
(system-manager.lib.make-vm-test "vm-test-system-path-${imgName}" {
inherit system;
modules = [
({ config, ... }:
let
inherit (config) hostPkgs;
in
{
nodes = {
node1 = { config, ... }: {
modules = [ modules = [
(testModule "old")
../../../examples/example.nix ../../../examples/example.nix
]; ];
extraPathsToRegister = [ newConfig ];
virtualisation.rootImage = system-manager.lib.prepareUbuntuImage { testScriptFunction = { toplevel, ... }: ''
inherit hostPkgs image;
nodeConfig = config;
};
};
};
testScript = ''
# Start all machines in parallel # Start all machines in parallel
start_all() start_all()
node1.wait_for_unit("default.target")
node1.fail("bash --login -c '$(which rg)'") vm.wait_for_unit("default.target")
node1.fail("bash --login -c '$(which fd)'")
${system-manager.lib.activateProfileSnippet { node = "node1"; }} ${system-manager.lib.prepopulateProfileSnippet { node = "vm"; profile = toplevel; }}
vm.systemctl("daemon-reload")
node1.wait_for_unit("system-manager.target") # Simulate a reboot, to check that the services defined with
node1.wait_for_unit("system-manager-path.service") # system-manager start correctly after a reboot.
# TODO: can we find an easy way to really reboot the VM and not
# loose the root FS state?
vm.systemctl("isolate rescue.target")
# We need to send a return character to dismiss the rescue-mode prompt
vm.send_key("ret")
vm.systemctl("isolate default.target")
vm.wait_for_unit("system-manager.target")
node1.fail("bash --login -c '$(which fish)'") vm.succeed("systemctl status service-9.service")
node1.succeed("bash --login -c 'realpath $(which rg) | grep -F ${hostPkgs.ripgrep}/bin/rg'") vm.succeed("test -f /etc/baz/bar/foo2")
node1.succeed("bash --login -c 'realpath $(which fd) | grep -F ${hostPkgs.fd}/bin/fd'") vm.succeed("test -f /etc/a/nested/example/foo3")
vm.succeed("test -f /etc/foo.conf")
vm.succeed("grep -F 'launch_the_rockets = true' /etc/foo.conf")
vm.fail("grep -F 'launch_the_rockets = false' /etc/foo.conf")
${system-manager.lib.activateProfileSnippet { node = "node1"; profile = newConfig; }} ${system-manager.lib.activateProfileSnippet { node = "vm"; profile = newConfig; }}
vm.succeed("systemctl status new-service.service")
vm.fail("systemctl status service-9.service")
vm.fail("test -f /etc/a/nested/example/foo3")
vm.fail("test -f /etc/baz/bar/foo2")
vm.succeed("test -f /etc/foo_new")
node1.fail("bash --login -c '$(which rg)'") ${system-manager.lib.deactivateProfileSnippet { node = "vm"; profile = newConfig; }}
node1.fail("bash --login -c '$(which fd)'") vm.fail("systemctl status new-service.service")
node1.succeed("bash --login -c 'realpath $(which fish) | grep -F ${hostPkgs.fish}/bin/fish'") vm.fail("test -f /etc/foo_new")
''; '';
}) }
//
forEachUbuntuImage "system-path" {
modules = [
(testModule "old")
../../../examples/example.nix
]; ];
}) extraPathsToRegister = [ newConfig ];
) testScriptFunction = { toplevel, hostPkgs, ... }: ''
# Start all machines in parallel
start_all()
vm.wait_for_unit("default.target")
vm.fail("bash --login -c '$(which rg)'")
vm.fail("bash --login -c '$(which fd)'")
${system-manager.lib.activateProfileSnippet { node = "vm"; profile = toplevel; }}
vm.wait_for_unit("system-manager.target")
vm.wait_for_unit("system-manager-path.service")
#vm.fail("bash --login -c '$(which fish)'")
vm.succeed("bash --login -c 'realpath $(which rg) | grep -F ${hostPkgs.ripgrep}/bin/rg'")
vm.succeed("bash --login -c 'realpath $(which fd) | grep -F ${hostPkgs.fd}/bin/fd'")
${system-manager.lib.activateProfileSnippet { node = "vm"; profile = newConfig; }}
vm.fail("bash --login -c '$(which rg)'")
vm.fail("bash --login -c '$(which fd)'")
vm.succeed("bash --login -c 'realpath $(which fish) | grep -F ${hostPkgs.fish}/bin/fish'")
'';
}

View file

@ -1,104 +0,0 @@
{ lib, system-manager, ... }:
let
inherit (lib) types;
pkgsType = lib.mkOptionType {
name = "nixpkgs";
description = "An evaluation of Nixpkgs; the top level attribute set of packages";
check = builtins.isAttrs;
};
nodeOptions = { config, name, ... }: {
options = {
system.name = lib.mkOption {
type = types.str;
default = name;
};
modules = lib.mkOption {
type = types.listOf types.deferredModule;
};
systemConfig = lib.mkOption {
type = types.package;
internal = true;
readOnly = true;
};
virtualisation = {
rootImage = lib.mkOption {
type = types.package;
};
memorySize = lib.mkOption {
type = types.ints.between 256 (1024 * 128);
default = 1024;
};
cpus = lib.mkOption {
type = types.ints.between 1 1024;
default = 2;
};
# TODO: implement this properly, or remove the option
# See: nixos/lib/testing/network.nix
vlans = lib.mkOption {
type = types.ints.between 1 1024;
default = 1;
};
sharedDirectories = lib.mkOption {
type = types.attrsOf
(types.submodule {
options = {
source = lib.mkOption {
type = types.str;
};
target = lib.mkOption {
type = types.str;
};
};
});
default = { };
};
};
};
config = {
# Include these shared directories by default, they are used by the test driver.
virtualisation.sharedDirectories = {
xchg = {
source = ''"$TMPDIR"/xchg'';
target = "/tmp/xchg";
};
shared = {
source = ''"''${SHARED_DIR:-$TMPDIR/xchg}"'';
target = "/tmp/shared";
};
};
systemConfig = system-manager.lib.makeSystemConfig {
inherit (config) modules;
};
};
};
in
{
options = {
hostPkgs = lib.mkOption {
type = pkgsType;
};
nodes = lib.mkOption {
type = types.attrsOf (types.submodule nodeOptions);
default = { };
};
testScript = lib.mkOption {
type = types.str;
};
};
}