Merge pull request #171382 from Patryk27/nixos/improve-lxd-tests

nixos/lxd: improve tests
main
Arnout Engelen 2 years ago committed by GitHub
commit 3275c08534
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      nixos/maintainers/scripts/lxd/lxd-image.nix
  2. 1
      nixos/tests/all-tests.nix
  3. 24
      nixos/tests/common/lxd/config.yaml
  4. 63
      nixos/tests/lxd-image-server.nix
  5. 89
      nixos/tests/lxd-image.nix
  6. 134
      nixos/tests/lxd.nix
  7. 1
      pkgs/tools/admin/lxd/default.nix
  8. 3
      pkgs/tools/virtualization/lxd-image-server/default.nix
  9. 2
      pkgs/top-level/release-lib.nix

@ -27,7 +27,7 @@ with lib;
networking.useDHCP = false;
networking.interfaces.eth0.useDHCP = true;
# As this is intended as a stadalone image, undo some of the minimal profile stuff
# As this is intended as a standalone image, undo some of the minimal profile stuff
documentation.enable = true;
documentation.nixos.enable = true;
environment.noXlibs = false;

@ -284,7 +284,6 @@ in
loki = handleTest ./loki.nix {};
lvm2 = handleTest ./lvm2 {};
lxd = handleTest ./lxd.nix {};
lxd-image = handleTest ./lxd-image.nix {};
lxd-nftables = handleTest ./lxd-nftables.nix {};
lxd-image-server = handleTest ./lxd-image-server.nix {};
#logstash = handleTest ./logstash.nix {};

@ -0,0 +1,24 @@
storage_pools:
- name: default
driver: dir
config:
source: /var/lxd-pool
networks:
- name: lxdbr0
type: bridge
config:
ipv4.address: auto
ipv6.address: none
profiles:
- name: default
devices:
eth0:
name: eth0
network: lxdbr0
type: nic
root:
path: /
pool: default
type: disk

@ -1,54 +1,21 @@
import ./make-test-python.nix ({ pkgs, ...} :
import ./make-test-python.nix ({ pkgs, lib, ... } :
let
# Since we don't have access to the internet during the tests, we have to
# pre-fetch lxd containers beforehand.
#
# I've chosen to import Alpine Linux, because its image is turbo-tiny and,
# generally, sufficient for our tests.
alpine-meta = pkgs.fetchurl {
url = "https://tarballs.nixos.org/alpine/3.12/lxd.tar.xz";
hash = "sha256-1tcKaO9lOkvqfmG/7FMbfAEToAuFy2YMewS8ysBKuLA=";
};
alpine-rootfs = pkgs.fetchurl {
url = "https://tarballs.nixos.org/alpine/3.12/rootfs.tar.xz";
hash = "sha256-Tba9sSoaiMtQLY45u7p5DMqXTSDgs/763L/SQp0bkCA=";
lxd-image = import ../release.nix {
configuration = {
# Building documentation makes the test unnecessarily take a longer time:
documentation.enable = lib.mkForce false;
};
};
lxd-config = pkgs.writeText "config.yaml" ''
storage_pools:
- name: default
driver: dir
config:
source: /var/lxd-pool
networks:
- name: lxdbr0
type: bridge
config:
ipv4.address: auto
ipv6.address: none
profiles:
- name: default
devices:
eth0:
name: eth0
network: lxdbr0
type: nic
root:
path: /
pool: default
type: disk
'';
lxd-image-metadata = lxd-image.lxdMeta.${pkgs.system};
lxd-image-rootfs = lxd-image.lxdImage.${pkgs.system};
in {
name = "lxd-image-server";
meta = with pkgs.lib.maintainers; {
maintainers = [ mkg20001 ];
maintainers = [ mkg20001 patryk27 ];
};
nodes.machine = { lib, ... }: {
@ -100,20 +67,20 @@ in {
# lxd expects the pool's directory to already exist
machine.succeed("mkdir /var/lxd-pool")
machine.succeed(
"cat ${lxd-config} | lxd init --preseed"
"cat ${./common/lxd/config.yaml} | lxd init --preseed"
)
machine.succeed(
"lxc image import ${alpine-meta} ${alpine-rootfs} --alias alpine"
"lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs}/*/*.tar.xz --alias nixos"
)
loc = "/var/www/simplestreams/images/iats/alpine/amd64/default/v1"
loc = "/var/www/simplestreams/images/iats/nixos/amd64/default/v1"
with subtest("push image to server"):
machine.succeed("lxc launch alpine test")
machine.succeed("lxc stop test")
machine.succeed("lxc launch nixos test")
machine.sleep(5)
machine.succeed("lxc stop -f test")
machine.succeed("lxc publish --public test --alias=testimg")
machine.succeed("lxc image export testimg")
machine.succeed("ls >&2")

@ -1,89 +0,0 @@
# This test ensures that the nixOS lxd images builds and functions properly
# It has been extracted from `lxd.nix` to seperate failures of just the image and the lxd software
import ./make-test-python.nix ({ pkgs, ...} : let
release = import ../release.nix {
/* configuration = {
environment.systemPackages = with pkgs; [ stdenv ]; # inject stdenv so rebuild test works
}; */
};
metadata = release.lxdMeta.${pkgs.system};
image = release.lxdImage.${pkgs.system};
lxd-config = pkgs.writeText "config.yaml" ''
storage_pools:
- name: default
driver: dir
config:
source: /var/lxd-pool
networks:
- name: lxdbr0
type: bridge
config:
ipv4.address: auto
ipv6.address: none
profiles:
- name: default
devices:
eth0:
name: eth0
network: lxdbr0
type: nic
root:
path: /
pool: default
type: disk
'';
in {
name = "lxd-image";
meta = with pkgs.lib.maintainers; {
maintainers = [ mkg20001 ];
};
nodes.machine = { lib, ... }: {
virtualisation = {
# disk full otherwise
diskSize = 2048;
lxc.lxcfs.enable = true;
lxd.enable = true;
};
};
testScript = ''
machine.wait_for_unit("sockets.target")
machine.wait_for_unit("lxd.service")
machine.wait_for_file("/var/lib/lxd/unix.socket")
# It takes additional second for lxd to settle
machine.sleep(1)
# lxd expects the pool's directory to already exist
machine.succeed("mkdir /var/lxd-pool")
machine.succeed(
"cat ${lxd-config} | lxd init --preseed"
)
# TODO: test custom built container aswell
with subtest("importing container works"):
machine.succeed("lxc image import ${metadata}/*/*.tar.xz ${image}/*/*.tar.xz --alias nixos")
with subtest("launching container works"):
machine.succeed("lxc launch nixos machine -c security.nesting=true")
# make sure machine boots up properly
machine.sleep(5)
with subtest("container shell works"):
machine.succeed("echo true | lxc exec machine /run/current-system/sw/bin/bash -")
machine.succeed("lxc exec machine /run/current-system/sw/bin/true")
# with subtest("rebuilding works"):
# machine.succeed("lxc exec machine /run/current-system/sw/bin/nixos-rebuild switch")
'';
})

@ -1,79 +1,18 @@
import ./make-test-python.nix ({ pkgs, ...} :
import ./make-test-python.nix ({ pkgs, lib, ... } :
let
# Since we don't have access to the internet during the tests, we have to
# pre-fetch lxd containers beforehand.
#
# I've chosen to import Alpine Linux, because its image is turbo-tiny and,
# generally, sufficient for our tests.
alpine-meta-x86 = pkgs.fetchurl {
url = "https://tarballs.nixos.org/alpine/3.12/lxd.tar.xz";
hash = "sha256-1tcKaO9lOkvqfmG/7FMbfAEToAuFy2YMewS8ysBKuLA=";
};
alpine-meta-for = arch: pkgs.stdenv.mkDerivation {
name = "alpine-meta-${arch}";
version = "3.12";
unpackPhase = "true";
buildPhase = ''
runHook preBuild
tar xvf ${alpine-meta-x86}
sed -i 's/architecture: .*/architecture: ${arch}/' metadata.yaml
runHook postBuild
'';
installPhase = ''
runHook preInstall
tar czRf $out *
runHook postInstall
'';
};
lxd-image = import ../release.nix {
configuration = {
# Building documentation makes the test unnecessarily take a longer time:
documentation.enable = lib.mkForce false;
alpine-meta = {
x86_64-linux = alpine-meta-x86;
aarch64-linux = alpine-meta-for "aarch64";
}.${pkgs.system} or (throw "Unsupported system: ${pkgs.system}");
alpine-rootfs = {
x86_64-linux = pkgs.fetchurl {
url = "https://tarballs.nixos.org/alpine/3.12/rootfs.tar.xz";
hash = "sha256-Tba9sSoaiMtQLY45u7p5DMqXTSDgs/763L/SQp0bkCA=";
};
aarch64-linux = pkgs.fetchurl {
url = "https://dl-cdn.alpinelinux.org/alpine/v3.15/releases/aarch64/alpine-minirootfs-3.15.4-aarch64.tar.gz";
hash = "sha256-9kBz8Jwmo8XepJhTMt5zilCaHHpflnUH7y9+0To39Us=";
# Our tests require `grep` & friends:
environment.systemPackages = with pkgs; [ busybox ];
};
}.${pkgs.system} or (throw "Unsupported system: ${pkgs.system}");
lxd-config = pkgs.writeText "config.yaml" ''
storage_pools:
- name: default
driver: dir
config:
source: /var/lxd-pool
networks:
- name: lxdbr0
type: bridge
config:
ipv4.address: auto
ipv6.address: none
profiles:
- name: default
devices:
eth0:
name: eth0
network: lxdbr0
type: nic
root:
path: /
pool: default
type: disk
'';
};
lxd-image-metadata = lxd-image.lxdMeta.${pkgs.system};
lxd-image-rootfs = lxd-image.lxdImage.${pkgs.system};
in {
name = "lxd";
@ -84,6 +23,8 @@ in {
nodes.machine = { lib, ... }: {
virtualisation = {
diskSize = 2048;
# Since we're testing `limits.cpu`, we've gotta have a known number of
# cores to lean on
cores = 2;
@ -108,61 +49,66 @@ in {
machine.succeed("mkdir /var/lxd-pool")
machine.succeed(
"cat ${lxd-config} | lxd init --preseed"
"cat ${./common/lxd/config.yaml} | lxd init --preseed"
)
machine.succeed(
"lxc image import ${alpine-meta} ${alpine-rootfs} --alias alpine"
"lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs}/*/*.tar.xz --alias nixos"
)
with subtest("Containers can be launched and destroyed"):
machine.succeed("lxc launch alpine test")
machine.succeed("lxc exec test true")
machine.succeed("lxc delete -f test")
with subtest("Container can be managed"):
machine.succeed("lxc launch nixos container")
machine.sleep(5)
machine.succeed("echo true | lxc exec container /run/current-system/sw/bin/bash -")
machine.succeed("lxc exec container true")
machine.succeed("lxc delete -f container")
with subtest("Containers are being mounted with lxcfs inside"):
machine.succeed("lxc launch alpine test")
with subtest("Container is mounted with lxcfs inside"):
machine.succeed("lxc launch nixos container")
machine.sleep(5)
## ---------- ##
## limits.cpu ##
machine.succeed("lxc config set test limits.cpu 1")
machine.succeed("lxc restart test")
machine.succeed("lxc config set container limits.cpu 1")
machine.succeed("lxc restart container")
machine.sleep(5)
# Since Alpine doesn't have `nproc` pre-installed, we've gotta resort
# to the primal methods
assert (
"1"
== machine.succeed("lxc exec test grep -- -c ^processor /proc/cpuinfo").strip()
== machine.succeed("lxc exec container grep -- -c ^processor /proc/cpuinfo").strip()
)
machine.succeed("lxc config set test limits.cpu 2")
machine.succeed("lxc restart test")
machine.succeed("lxc config set container limits.cpu 2")
machine.succeed("lxc restart container")
machine.sleep(5)
assert (
"2"
== machine.succeed("lxc exec test grep -- -c ^processor /proc/cpuinfo").strip()
== machine.succeed("lxc exec container grep -- -c ^processor /proc/cpuinfo").strip()
)
## ------------- ##
## limits.memory ##
machine.succeed("lxc config set test limits.memory 64MB")
machine.succeed("lxc restart test")
machine.succeed("lxc config set container limits.memory 64MB")
machine.succeed("lxc restart container")
machine.sleep(5)
assert (
"MemTotal: 62500 kB"
== machine.succeed("lxc exec test grep -- MemTotal /proc/meminfo").strip()
== machine.succeed("lxc exec container grep -- MemTotal /proc/meminfo").strip()
)
machine.succeed("lxc config set test limits.memory 128MB")
machine.succeed("lxc restart test")
machine.succeed("lxc config set container limits.memory 128MB")
machine.succeed("lxc restart container")
machine.sleep(5)
assert (
"MemTotal: 125000 kB"
== machine.succeed("lxc exec test grep -- MemTotal /proc/meminfo").strip()
== machine.succeed("lxc exec container grep -- MemTotal /proc/meminfo").strip()
)
machine.succeed("lxc delete -f test")
machine.succeed("lxc delete -f container")
'';
})

@ -48,6 +48,7 @@ buildGo118Package rec {
'';
passthru.tests.lxd = nixosTests.lxd;
passthru.tests.lxd-nftables = nixosTests.lxd-nftables;
nativeBuildInputs = [ installShellFiles pkg-config makeWrapper ];
buildInputs = [ lxc acl libcap dqlite.dev raft-canonical.dev

@ -3,6 +3,7 @@
, rsync
, python3
, fetchFromGitHub
, nixosTests
}:
python3.pkgs.buildPythonApplication rec {
@ -37,6 +38,8 @@ python3.pkgs.buildPythonApplication rec {
doCheck = false;
passthru.tests.lxd-image-server = nixosTests.lxd-image-server;
meta = with lib; {
description = "Creates and manages a simplestreams lxd image server on top of nginx";
homepage = "https://github.com/Avature/lxd-image-server";

@ -103,7 +103,7 @@ rec {
forAllSystems = genAttrs supportedSystems;
# Generate attributes for all sytems matching at least one of the given
# Generate attributes for all systems matching at least one of the given
# patterns
forMatchingSystems = metaPatterns: genAttrs (supportedMatches metaPatterns);

Loading…
Cancel
Save