* nixos/k3s: simplify config expression * nixos/k3s: add config assertions and trim unneeded bits * nixos/k3s: add a test that k3s works; minor module improvements This is a single-node test. Eventually we should also have a multi-node test to verify the agent bit works, but that one's more involved. * nixos/k3s: add option description * nixos/k3s: add defaults for token/serveraddr Now that the assertion enforces their presence, we dont' need to use the typesystem for it. * nixos/k3s: remove unneeded sudo in test * nixos/k3s: add to test listwip/yesman
parent
a7b3a6982a
commit
bc138f407f
@ -0,0 +1,101 @@ |
||||
{ config, lib, pkgs, ... }: |
||||
|
||||
with lib; |
||||
let |
||||
cfg = config.services.k3s; |
||||
in |
||||
{ |
||||
# interface |
||||
options.services.k3s = { |
||||
enable = mkEnableOption "k3s"; |
||||
|
||||
package = mkOption { |
||||
type = types.package; |
||||
default = pkgs.k3s; |
||||
defaultText = "pkgs.k3s"; |
||||
example = literalExample "pkgs.k3s"; |
||||
description = "Package that should be used for k3s"; |
||||
}; |
||||
|
||||
role = mkOption { |
||||
description = '' |
||||
Whether k3s should run as a server or agent. |
||||
Note that the server, by default, also runs as an agent. |
||||
''; |
||||
default = "server"; |
||||
type = types.enum [ "server" "agent" ]; |
||||
}; |
||||
|
||||
serverAddr = mkOption { |
||||
type = types.str; |
||||
description = "The k3s server to connect to. This option only makes sense for an agent."; |
||||
example = "https://10.0.0.10:6443"; |
||||
default = ""; |
||||
}; |
||||
|
||||
token = mkOption { |
||||
type = types.str; |
||||
description = "The k3s token to use when connecting to the server. This option only makes sense for an agent."; |
||||
default = ""; |
||||
}; |
||||
|
||||
docker = mkOption { |
||||
type = types.bool; |
||||
default = false; |
||||
description = "Use docker to run containers rather than the built-in containerd."; |
||||
}; |
||||
|
||||
extraFlags = mkOption { |
||||
description = "Extra flags to pass to the k3s command."; |
||||
default = ""; |
||||
example = "--no-deploy traefik --cluster-cidr 10.24.0.0/16"; |
||||
}; |
||||
|
||||
disableAgent = mkOption { |
||||
type = types.bool; |
||||
default = false; |
||||
description = "Only run the server. This option only makes sense for a server."; |
||||
}; |
||||
}; |
||||
|
||||
# implementation |
||||
|
||||
config = mkIf cfg.enable { |
||||
assertions = [ |
||||
{ |
||||
assertion = cfg.role == "agent" -> cfg.serverAddr != ""; |
||||
message = "serverAddr should be set if role is 'agent'"; |
||||
} |
||||
{ |
||||
assertion = cfg.role == "agent" -> cfg.token != ""; |
||||
message = "token should be set if role is 'agent'"; |
||||
} |
||||
]; |
||||
|
||||
virtualisation.docker = mkIf cfg.docker { |
||||
enable = mkDefault true; |
||||
}; |
||||
|
||||
systemd.services.k3s = { |
||||
description = "k3s service"; |
||||
after = mkIf cfg.docker [ "docker.service" ]; |
||||
wantedBy = [ "multi-user.target" ]; |
||||
serviceConfig = { |
||||
# Taken from https://github.com/rancher/k3s/blob/v1.17.4+k3s1/contrib/ansible/roles/k3s/node/templates/k3s.service.j2 |
||||
Type = "notify"; |
||||
KillMode = "process"; |
||||
Delegate = "yes"; |
||||
Restart = "always"; |
||||
RestartSec = "5s"; |
||||
ExecStart = concatStringsSep " \\\n " ( |
||||
[ |
||||
"${cfg.package}/bin/k3s ${cfg.role}" |
||||
] ++ (optional cfg.docker "--docker") |
||||
++ (optional cfg.disableAgent "--disable-agent") |
||||
++ (optional (cfg.role == "agent") "--server ${cfg.serverAddr} --token ${cfg.token}") |
||||
++ [ cfg.extraFlags ] |
||||
); |
||||
}; |
||||
}; |
||||
}; |
||||
} |
@ -0,0 +1,78 @@ |
||||
import ./make-test-python.nix ({ pkgs, ... }: |
||||
|
||||
let |
||||
# A suitable k3s pause image, also used for the test pod |
||||
pauseImage = pkgs.dockerTools.buildImage { |
||||
name = "test.local/pause"; |
||||
tag = "local"; |
||||
contents = with pkgs; [ tini coreutils busybox ]; |
||||
config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ]; |
||||
}; |
||||
testPodYaml = pkgs.writeText "test.yml" '' |
||||
# Don't use the default service account because there's a race where it may |
||||
# not be created yet; make our own instead. |
||||
apiVersion: v1 |
||||
kind: ServiceAccount |
||||
metadata: |
||||
name: test |
||||
--- |
||||
apiVersion: v1 |
||||
kind: Pod |
||||
metadata: |
||||
name: test |
||||
spec: |
||||
serviceAccountName: test |
||||
containers: |
||||
- name: test |
||||
image: test.local/pause:local |
||||
imagePullPolicy: Never |
||||
command: ["sh", "-c", "sleep inf"] |
||||
''; |
||||
in |
||||
{ |
||||
name = "k3s"; |
||||
meta = with pkgs.stdenv.lib.maintainers; { |
||||
maintainers = [ euank ]; |
||||
}; |
||||
|
||||
nodes = { |
||||
k3s = |
||||
{ pkgs, ... }: { |
||||
environment.systemPackages = [ pkgs.k3s pkgs.gzip ]; |
||||
|
||||
# k3s uses enough resources the default vm fails. |
||||
virtualisation.memorySize = pkgs.lib.mkDefault 1536; |
||||
virtualisation.diskSize = pkgs.lib.mkDefault 4096; |
||||
|
||||
services.k3s.enable = true; |
||||
services.k3s.role = "server"; |
||||
services.k3s.package = pkgs.k3s; |
||||
# Slightly reduce resource usage |
||||
services.k3s.extraFlags = "--no-deploy coredns,servicelb,traefik,local-storage,metrics-server --pause-image test.local/pause:local"; |
||||
|
||||
users.users = { |
||||
noprivs = { |
||||
isNormalUser = true; |
||||
description = "Can't access k3s by default"; |
||||
password = "*"; |
||||
}; |
||||
}; |
||||
}; |
||||
}; |
||||
|
||||
testScript = '' |
||||
start_all() |
||||
|
||||
k3s.wait_for_unit("k3s") |
||||
k3s.succeed("k3s kubectl cluster-info") |
||||
k3s.fail("sudo -u noprivs k3s kubectl cluster-info") |
||||
# k3s.succeed("k3s check-config") # fails with the current nixos kernel config, uncomment once this passes |
||||
|
||||
k3s.succeed( |
||||
"zcat ${pauseImage} | k3s ctr image import -" |
||||
) |
||||
|
||||
k3s.succeed("k3s kubectl apply -f ${testPodYaml}") |
||||
k3s.succeed("k3s kubectl wait --for 'condition=Ready' pod/test") |
||||
''; |
||||
}) |
Loading…
Reference in new issue