Add a pre-populate subcommand. (#11)

This commit is contained in:
Ramses 2023-05-08 18:35:43 +02:00 committed by GitHub
parent 9254ace18f
commit cead991210
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 237 additions and 60 deletions

View file

@ -110,6 +110,12 @@ in
"$@"
'';
prepopulateScript = pkgs.writeShellScript "prepopulate" ''
${system-manager}/bin/system-manager pre-populate \
--store-path "$(dirname $(realpath $(dirname ''${0})))" \
"$@"
'';
deactivationScript = pkgs.writeShellScript "deactivate" ''
${system-manager}/bin/system-manager deactivate "$@"
'';
@ -170,6 +176,7 @@ in
(linkFarmEntryFromDrv servicesPath)
(linkFarmEntryFromDrv etcPath)
(linkFarmBinEntryFromDrv activationScript)
(linkFarmBinEntryFromDrv prepopulateScript)
(linkFarmBinEntryFromDrv deactivationScript)
(linkFarmBinEntryFromDrv registerProfileScript)
(linkFarmBinEntryFromDrv preActivationAssertionScript)
@ -311,19 +318,22 @@ in
inherit (image) hash;
url = "https://cloud-images.ubuntu.com/releases/${image.releaseName}/release-${image.releaseTimeStamp}/${image.name}";
};
resultImg = "./image.qcow2";
in
pkgs.runCommand "configure-vm" { } ''
pkgs.runCommand "vm-image.qcow2" { } ''
# We will modify the VM image, so we need a mutable copy
install -m777 ${img} ./img.qcow2
install -m777 ${img} ${resultImg}
# Copy the service files here, since otherwise they end up in the VM
# wwith their paths including the nix hash
# with their paths including the nix hash
cp ${self.lib.backdoor { inherit pkgs; }} backdoor.service
cp ${self.lib.mount_store { inherit pkgs; }} mount-store.service
#export LIBGUESTFS_DEBUG=1 LIBGUESTFS_TRACE=1
${lib.concatStringsSep " \\\n" [
"${pkgs.guestfs-tools}/bin/virt-customize"
"-a ./img.qcow2"
"-a ${resultImg}"
"--smp 2"
"--memsize 256"
"--no-network"
@ -347,7 +357,7 @@ in
'')
]};
cp ./img.qcow2 $out
cp ${resultImg} $out
'';
make-vm-test =

View file

@ -104,7 +104,51 @@ pub fn activate(store_path: &StorePath, ephemeral: bool) -> Result<()> {
}
}
.write_to_file(state_file)?;
Ok(())
}
pub fn prepopulate(store_path: &StorePath, ephemeral: bool) -> Result<()> {
log::info!("Pre-populating system-manager profile: {store_path}");
if ephemeral {
log::info!("Running in ephemeral mode");
}
log::info!("Running pre-activation assertions...");
if !run_preactivation_assertions(store_path)?.success() {
anyhow::bail!("Failure in pre-activation assertions.");
}
let state_file = &get_state_file()?;
let old_state = State::from_file(state_file)?;
log::info!("Activating etc files...");
match etc_files::activate(store_path, old_state.file_tree, ephemeral) {
Ok(etc_tree) => {
log::info!("Registering systemd services...");
match services::get_active_services(store_path, old_state.services) {
Ok(services) => State {
file_tree: etc_tree,
services,
},
Err(ActivationError::WithPartialResult { result, source }) => {
log::error!("Error during activation: {source:?}");
State {
file_tree: etc_tree,
services: result,
}
}
}
}
Err(ActivationError::WithPartialResult { result, source }) => {
log::error!("Error during activation: {source:?}");
State {
file_tree: result,
..old_state
}
}
}
.write_to_file(state_file)?;
Ok(())
}

View file

@ -30,14 +30,10 @@ fn print_services(services: &Services) -> String {
out
}
pub fn activate(
pub fn get_active_services(
store_path: &StorePath,
old_services: Services,
ephemeral: bool,
) -> ServiceActivationResult {
verify_systemd_dir(ephemeral)
.map_err(|e| ActivationError::with_partial_result(old_services.clone(), e))?;
log::info!("Reading new service definitions...");
let file = fs::File::open(
Path::new(&store_path.store_path)
@ -49,6 +45,18 @@ pub fn activate(
let services: Services = serde_json::from_reader(reader)
.map_err(|e| ActivationError::with_partial_result(old_services.clone(), e))?;
log::debug!("{}", print_services(&services));
Ok(services)
}
pub fn activate(
store_path: &StorePath,
old_services: Services,
ephemeral: bool,
) -> ServiceActivationResult {
verify_systemd_dir(ephemeral)
.map_err(|e| ActivationError::with_partial_result(old_services.clone(), e))?;
let services = get_active_services(store_path, old_services.clone())?;
let services_to_stop = old_services.clone().relative_complement(services.clone());
let services_to_reload = get_services_to_reload(services.clone(), old_services.clone());

View file

@ -30,14 +30,14 @@ struct Args {
#[derive(clap::Args, Debug)]
struct BuildArgs {
#[arg(long = "flake", name = "FLAKE_URI")]
/// The flake defining the system-manager profile
/// The flake URI defining the system-manager profile
flake_uri: String,
}
#[derive(clap::Args, Debug)]
struct GenerateArgs {
#[arg(long = "flake", name = "FLAKE_URI")]
/// The flake defining the system-manager profile
/// The flake URI defining the system-manager profile
flake_uri: Option<String>,
#[arg(long)]
@ -71,6 +71,13 @@ enum Action {
#[command(flatten)]
activation_args: ActivationArgs,
},
PrePopulate {
#[arg(long)]
/// The store path containing the system-manager profile to activate
store_path: StorePath,
#[command(flatten)]
activation_args: ActivationArgs,
},
/// Build a new system-manager profile without registering it as a nix profile
Build {
#[command(flatten)]
@ -117,6 +124,13 @@ fn go(args: Args) -> Result<()> {
copy_closure(&store_path, &target_host)?;
activate(&store_path, ephemeral, &target_host, use_remote_sudo)
}
Action::PrePopulate {
store_path,
activation_args: ActivationArgs { ephemeral },
} => {
copy_closure(&store_path, &target_host)?;
prepopulate(&store_path, ephemeral, &target_host, use_remote_sudo)
}
Action::Build {
build_args: BuildArgs { flake_uri },
} => build(&flake_uri, &target_host),
@ -225,6 +239,26 @@ fn activate(
}
}
fn prepopulate(
store_path: &StorePath,
ephemeral: bool,
target_host: &Option<String>,
use_remote_sudo: bool,
) -> Result<()> {
if let Some(target_host) = target_host {
invoke_remote_script(
&store_path.store_path,
"pre-populate",
target_host,
use_remote_sudo,
)?;
Ok(())
} else {
check_root()?;
system_manager::activate::prepopulate(store_path, ephemeral)
}
}
fn deactivate(
store_path: Option<StorePath>,
target_host: &Option<String>,

View file

@ -3,13 +3,14 @@
, system
}:
lib.flip lib.mapAttrs' system-manager.lib.images.ubuntu.${system} (imgName: image:
let
forEachUbuntuImage = lib.flip lib.mapAttrs' system-manager.lib.images.ubuntu.${system};
newConfig = system-manager.lib.makeSystemConfig {
modules = [
({ lib, pkgs, ... }: {
config = {
nixpkgs.hostPlatform = "x86_64-linux";
nixpkgs.hostPlatform = system;
services.nginx.enable = true;
@ -20,6 +21,7 @@ let
'';
};
};
systemd.services.new-service = {
enable = true;
description = "new-service";
@ -28,7 +30,7 @@ let
RemainAfterExit = true;
ExecReload = "${lib.getBin pkgs.coreutils}/bin/true";
};
wantedBy = [ "system-manager.target" ];
wantedBy = [ "system-manager.target" "default.target" ];
script = ''
sleep 2
'';
@ -38,7 +40,9 @@ let
];
};
in
lib.nameValuePair
forEachUbuntuImage
(imgName: image: lib.nameValuePair
"example-${imgName}"
(system-manager.lib.make-vm-test {
inherit system;
@ -84,6 +88,22 @@ lib.nameValuePair
node1.wait_until_fails("cat /etc/baz/bar/foo2")
node1.wait_for_file("/etc/foo_new")
# Simulate a reboot, to check that the services defined with
# system-manager start correctly after a reboot.
# TODO: can we find an easy way to really reboot the VM and not
# loose the root FS state?
node1.systemctl("isolate rescue.target")
# We need to send a return character to dismiss the rescue-mode prompt
node1.send_key("ret")
node1.systemctl("isolate default.target")
node1.wait_for_unit("default.target")
node1.wait_for_unit("new-service.service")
node1.wait_until_fails("systemctl status service-9.service")
node1.wait_until_fails("cat /etc/a/nested/example/foo3")
node1.wait_until_fails("cat /etc/baz/bar/foo2")
node1.wait_for_file("/etc/foo_new")
node1.execute("${newConfig}/bin/deactivate")
node1.wait_until_fails("systemctl status new-service.service")
node1.wait_until_fails("cat /etc/foo_new")
@ -91,4 +111,65 @@ lib.nameValuePair
})
];
})
)
)
//
forEachUbuntuImage
(imgName: image: lib.nameValuePair
"prepopulate-${imgName}"
(system-manager.lib.make-vm-test {
inherit system;
modules = [
({ config, ... }:
let
inherit (config) hostPkgs;
in
{
nodes = {
node1 = { config, ... }: {
modules = [
../../../examples/example.nix
];
virtualisation.rootImage = system-manager.lib.prepareUbuntuImage {
inherit hostPkgs image;
nodeConfig = config;
};
};
};
testScript = ''
# Start all machines in parallel
start_all()
node1.wait_for_unit("default.target")
node1.execute("/system-manager-profile/bin/prepopulate")
node1.systemctl("daemon-reload")
node1.systemctl("start default.target")
node1.wait_for_unit("system-manager.target")
node1.wait_for_unit("service-9.service")
node1.wait_for_file("/etc/baz/bar/foo2")
node1.wait_for_file("/etc/a/nested/example/foo3")
node1.wait_for_file("/etc/foo.conf")
node1.succeed("grep -F 'launch_the_rockets = true' /etc/foo.conf")
node1.fail("grep -F 'launch_the_rockets = false' /etc/foo.conf")
node1.execute("${newConfig}/bin/activate")
node1.wait_for_unit("new-service.service")
node1.wait_until_fails("systemctl status service-9.service")
node1.wait_until_fails("cat /etc/a/nested/example/foo3")
node1.wait_until_fails("cat /etc/baz/bar/foo2")
node1.wait_for_file("/etc/foo_new")
node1.execute("${newConfig}/bin/deactivate")
node1.wait_until_fails("systemctl status new-service.service")
node1.wait_until_fails("cat /etc/foo_new")
'';
}
)
];
})
)